content large_stringlengths 0 6.46M | path large_stringlengths 3 331 | license_type large_stringclasses 2 values | repo_name large_stringlengths 5 125 | language large_stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.46M | extension large_stringclasses 75 values | text stringlengths 0 6.46M |
|---|---|---|---|---|---|---|---|---|---|
#!/usr/bin/env Rscript
# description---------------------------------------------------------
# Inputs (not in argument order)
# 1 Path to an "BELD-data-less" input *.nc from EPIC
# (via, e.g., computeCropSum.sh) with
# dim(datavar) == [TSTEP, LAY, ROW, COL], where
# * [ROW, COL] is the gridspace
# * one layer per crop, 1 < i.layer <= n.layers.
# 2 Path to a *.rds BELD array (as output by beldToRDS.r) with
# dim(array) == [LAY, ROW, COL], where
# * [ROW, COL] is the same gridspace as the EPIC datavar
# * one layer per crop, hence ...
# * the "BELD vector" for a gridcell has length==n.layers.
# 3 Path to a "BELDed" output *.nc
# (via, e.g., computeCropSum.sh) very like the unsummed input, but
# + one new "BELD layer" per gridcell, i.layer == n.layers + 1,
# with values to be ignored.
# Outputs the BELDed output EPIC file, containing, in each gridcell,
# a value in [gridcell, i.layer] containing the sum of crop coverage
# (i.e., total share of the land cover on that gridcell) from BELD.
# code----------------------------------------------------------------
library(ncdf4)
source('./ioapi.r')
# constants-----------------------------------------------------------
datavar.name <- "DN2"
# path to input file
# TODO: get from environment
epic.input.fp <- sprintf('./5yravg.%sunsummed.nc', datavar.name)
# path to output file
# TODO: get from environment
# TODO: delete if exists
epic.output.fp <- sprintf('./5yravg.%sbelded.nc', datavar.name)
# path to serialized BELD array, as produced by beldToRDS
beld.fp <- "./BELD4crops_12km_6_13_11_LimeiRan.rds"
# plot-related vars. TODO: move me to a separate file!
plot.layers <- FALSE
image.fp <- "./compare.DN2.layers.pdf" # file to which to plot
map.table <- './map.CMAQkm.world.dat' # map to overlay on plot
l2d.fp <- "./layer2description.rds" # env mapping layer#s to crop descriptions
# package=grDevices
palette.vec <- c("grey","purple","deepskyblue2","green","yellow","orange","red","brown")
colors <- colorRampPalette(palette.vec)
# used for quantiling legend
probabilities.vec <- seq(0, 1, 1.0/(length(palette.vec) - 1))
# functions-----------------------------------------------------------
# Write sum in sum.layer (or NA if BELD layers are empty).
# function procedure (copied from above):
# 2 Determine whether gridcell is "empty," i.e., all its crop layers==NA.
# If so, write NAs to sum layer. If not:
# 3 Sum "BELD vector" and write to "BELD layer."
sum.beld.for.layers <- function(
epic.vec, beld.vec, i.col, i.row,
i.sum # index of cell in vector in which to write the sum
) {
beld.nna <- sum(subset(beld.vec, beld.vec > 0))
if (beld.nna) {
epic.vec[i.sum] <- beld.nna
} else {
# debugging
# cat(sprintf('No BELD data > 0 for gridcell==[%3i,%3i]\n', i.col, i.row))
epic.vec[i.sum] <- NA
}
# note: explicit `return` halts execution!
epic.vec
} # end function sum.beld.for.layers
# main----------------------------------------------------------------
# 1 Setup: open connections and devices, load data
# See http://rosettacode.org/wiki/Command-line_arguments#R
# TODO: check arguments
# Read the commandline arguments
args <- (commandArgs(TRUE))
# args is now a list of character vectors
# First check to see if any arguments were passed,
# then evaluate each argument.
if (length(args)==0) {
cat("No arguments supplied\n")
# defaults supplied above
} else {
# Note: cannot have spaces in arguments!
for (i in 1:length(args)) {
eval(parse(text=args[[i]]))
}
}
if (plot.layers) {
cat('writeBELDlayer.r: plotting layers\n')
source('./plotLayersForTimestep.r')
} else {
cat('writeBELDlayer.r: not plotting layers\n')
}
beld.array <- readRDS(beld.fp)
epic.input.file <- nc_open(epic.input.fp, write=FALSE, readunlim=FALSE)
epic.output.file <- nc_open(epic.output.fp, write=TRUE, readunlim=FALSE)
epic.input.datavar <- ncvar_get(epic.input.file, varid=datavar.name)
epic.output.datavar <- ncvar_get(epic.output.file, varid=datavar.name)
if (plot.layers) {
map <- read.table(map.table, sep=",")
attrs.list <- get.plot.addrs.from.IOAPI(epic.input.file)
# creates PDF, starts graphic device
pdf(image.fp, height=3.5, width=5, pointsize=1, onefile=TRUE)
}
# NOTE: this assumes input and output file have SAME DIMENSIONS
datavar.dims.n <- length(dim(epic.output.datavar))
datavar.cols.n <- dim(epic.output.datavar)[1]
datavar.rows.n <- dim(epic.output.datavar)[2]
datavar.cells.n <- datavar.rows.n * datavar.cols.n
datavar.layers.n <- dim(epic.output.datavar)[3]
# ASSERT: files have already had layers augmented by 2:
# crops layers, then "BELD layer" then "sum layer"
i.sum <- datavar.layers.n - 1
# used in reading one timestep at a time
# (Pierce-style read: see help(ncvar_get)#Examples)
start <- rep(1,datavar.dims.n) # start=(1,1,1,...)
# Remember timelike dim is always the LAST dimension!
# but if val=1, it is omitted from dim(epic.output.datavar), breaking Pierce-style read (below)
if (datavar.dims.n < 3) {
# TODO: throw
# TODO: print(paste()) -> cat(sprintf())
cat(sprintf('ERROR: datavar.dims.n==%i\n', datavar.dims.n))
} else if (datavar.dims.n == 3) {
datavar.timesteps.n <- 1
count <- c(dim(epic.output.datavar), 1)
start <- c(start, 1)
datavar.dims.max.vec <- count
datavar.dims.n <- 4
} else if (datavar.dims.n == 4) {
datavar.timesteps.n <- dim(epic.output.datavar)[datavar.dims.n]
count <- dim(epic.output.datavar)
datavar.dims.max.vec <- count
} else {
# TODO: throw
cat(sprintf('ERROR: datavar.dims.n==%i\n', datavar.dims.n))
}
# start debugging
# print('initially:')
# TODO: get output all on one line
# print('start==') ; print(start)
# print('count==') ; print(count)
# end debugging
# For each timestep in EPIC file:
# for safety (and pedagogy), read in data one timestep at a time, dim-agnostically
for (i.timestep in 1:datavar.timesteps.n) {
#i.timestep <- 1
# Initialize start and count to read one timestep of the variable:
# start=(1,1,1,i), count=(COL,ROW,LAY,i)
start[datavar.dims.n] <- i.timestep
count[datavar.dims.n] <- i.timestep
# start debugging
# cat(sprintf('for timestep==%i\n', i.timestep))
# TODO: get output all on one line
# print('start==') ; print(start)
# print('count==') ; print(count)
# end debugging
epic.input.timestep <- ncvar_get(epic.input.file, varid=datavar.name, start=start, count=count)
epic.output.timestep <- ncvar_get(epic.output.file, varid=datavar.name, start=start, count=count)
# TODO: can this be done as a dual `apply`, or a linear-algebra op?
# For each gridcell in timestep:
for (i.col in 1:datavar.cols.n) {
for (i.row in 1:datavar.rows.n) {
# debugging
# epic.vec has one value @ [248,50,5] (rest NA)
# beld.vec has one value @ [248,50,5], another @ [248,50,1] (rest 0)
# i.col <- 248 ; i.row <- 50
# Determine whether gridcell is "empty" in BELD,
# i.e., all its layers==0.
# If so, write NAs to BELD layer. Else: write sum of BELD layers.
beld.array[i.col,i.row,] -> beld.vec
epic.input.timestep[i.col,i.row,] -> epic.input.vec
epic.output.timestep[i.col,i.row,] -> epic.output.vec
epic.output.vec <- sum.beld.for.layers(
epic.input.vec, beld.vec, i.col, i.row, i.sum)
epic.output.timestep[i.col,i.row,] <- epic.output.vec
} # end for rows
} # end for cols
# Write the new'n'improved timestep back to file.
# TODO: don't write if no layers changed
ncvar_put(epic.output.file, varid=datavar.name, vals=epic.output.timestep, start=start, count=count)
if (plot.layers) {
# debugging
cat(sprintf('writeBELDlayer.r: plot.layers.for.timestep==%i, n.layers==%i\n',
i.timestep, datavar.layers.n))
epic.output.datavar <- ncvar_get(epic.output.file, varid=datavar.name)
plot.layers.for.timestep(
datavar=epic.output.datavar,
datavar.name=datavar.name,
datavar.parent=epic.output.file,
i.timestep=i.timestep,
n.layers=datavar.layers.n,
attrs.list=attrs.list,
q.vec=probabilities.vec,
l2d.fp=l2d.fp,
colors=colors,
map=map)
}
} # end for timesteps
# 5 Teardown
# Close the connections (ncdf=close.ncdf), ...
if (plot.layers) {
dev.off()
}
nc_close(epic.input.file)
nc_close(epic.output.file)
# * ... and remove their ADS (not the files!) from the workspace.
rm(epic.input.file)
rm(epic.output.file)
# debugging-----------------------------------------------------------
| /writeBELDlayer.r | no_license | gc13141112/ioapi-hack-R | R | false | false | 8,448 | r | #!/usr/bin/env Rscript
# description---------------------------------------------------------
# Inputs (not in argument order)
# 1 Path to an "BELD-data-less" input *.nc from EPIC
# (via, e.g., computeCropSum.sh) with
# dim(datavar) == [TSTEP, LAY, ROW, COL], where
# * [ROW, COL] is the gridspace
# * one layer per crop, 1 < i.layer <= n.layers.
# 2 Path to a *.rds BELD array (as output by beldToRDS.r) with
# dim(array) == [LAY, ROW, COL], where
# * [ROW, COL] is the same gridspace as the EPIC datavar
# * one layer per crop, hence ...
# * the "BELD vector" for a gridcell has length==n.layers.
# 3 Path to a "BELDed" output *.nc
# (via, e.g., computeCropSum.sh) very like the unsummed input, but
# + one new "BELD layer" per gridcell, i.layer == n.layers + 1,
# with values to be ignored.
# Outputs the BELDed output EPIC file, containing, in each gridcell,
# a value in [gridcell, i.layer] containing the sum of crop coverage
# (i.e., total share of the land cover on that gridcell) from BELD.
# code----------------------------------------------------------------
library(ncdf4)
source('./ioapi.r')
# constants-----------------------------------------------------------
datavar.name <- "DN2"
# path to input file
# TODO: get from environment
epic.input.fp <- sprintf('./5yravg.%sunsummed.nc', datavar.name)
# path to output file
# TODO: get from environment
# TODO: delete if exists
epic.output.fp <- sprintf('./5yravg.%sbelded.nc', datavar.name)
# path to serialized BELD array, as produced by beldToRDS
beld.fp <- "./BELD4crops_12km_6_13_11_LimeiRan.rds"
# plot-related vars. TODO: move me to a separate file!
plot.layers <- FALSE
image.fp <- "./compare.DN2.layers.pdf" # file to which to plot
map.table <- './map.CMAQkm.world.dat' # map to overlay on plot
l2d.fp <- "./layer2description.rds" # env mapping layer#s to crop descriptions
# package=grDevices
palette.vec <- c("grey","purple","deepskyblue2","green","yellow","orange","red","brown")
colors <- colorRampPalette(palette.vec)
# used for quantiling legend
probabilities.vec <- seq(0, 1, 1.0/(length(palette.vec) - 1))
# functions-----------------------------------------------------------
# Write sum in sum.layer (or NA if BELD layers are empty).
# function procedure (copied from above):
# 2 Determine whether gridcell is "empty," i.e., all its crop layers==NA.
# If so, write NAs to sum layer. If not:
# 3 Sum "BELD vector" and write to "BELD layer."
sum.beld.for.layers <- function(
epic.vec, beld.vec, i.col, i.row,
i.sum # index of cell in vector in which to write the sum
) {
beld.nna <- sum(subset(beld.vec, beld.vec > 0))
if (beld.nna) {
epic.vec[i.sum] <- beld.nna
} else {
# debugging
# cat(sprintf('No BELD data > 0 for gridcell==[%3i,%3i]\n', i.col, i.row))
epic.vec[i.sum] <- NA
}
# note: explicit `return` halts execution!
epic.vec
} # end function sum.beld.for.layers
# main----------------------------------------------------------------
# 1 Setup: open connections and devices, load data
# See http://rosettacode.org/wiki/Command-line_arguments#R
# TODO: check arguments
# Read the commandline arguments
args <- (commandArgs(TRUE))
# args is now a list of character vectors
# First check to see if any arguments were passed,
# then evaluate each argument.
if (length(args)==0) {
cat("No arguments supplied\n")
# defaults supplied above
} else {
# Note: cannot have spaces in arguments!
for (i in 1:length(args)) {
eval(parse(text=args[[i]]))
}
}
if (plot.layers) {
cat('writeBELDlayer.r: plotting layers\n')
source('./plotLayersForTimestep.r')
} else {
cat('writeBELDlayer.r: not plotting layers\n')
}
beld.array <- readRDS(beld.fp)
epic.input.file <- nc_open(epic.input.fp, write=FALSE, readunlim=FALSE)
epic.output.file <- nc_open(epic.output.fp, write=TRUE, readunlim=FALSE)
epic.input.datavar <- ncvar_get(epic.input.file, varid=datavar.name)
epic.output.datavar <- ncvar_get(epic.output.file, varid=datavar.name)
if (plot.layers) {
map <- read.table(map.table, sep=",")
attrs.list <- get.plot.addrs.from.IOAPI(epic.input.file)
# creates PDF, starts graphic device
pdf(image.fp, height=3.5, width=5, pointsize=1, onefile=TRUE)
}
# NOTE: this assumes input and output file have SAME DIMENSIONS
datavar.dims.n <- length(dim(epic.output.datavar))
datavar.cols.n <- dim(epic.output.datavar)[1]
datavar.rows.n <- dim(epic.output.datavar)[2]
datavar.cells.n <- datavar.rows.n * datavar.cols.n
datavar.layers.n <- dim(epic.output.datavar)[3]
# ASSERT: files have already had layers augmented by 2:
# crops layers, then "BELD layer" then "sum layer"
i.sum <- datavar.layers.n - 1
# used in reading one timestep at a time
# (Pierce-style read: see help(ncvar_get)#Examples)
start <- rep(1,datavar.dims.n) # start=(1,1,1,...)
# Remember timelike dim is always the LAST dimension!
# but if val=1, it is omitted from dim(epic.output.datavar), breaking Pierce-style read (below)
if (datavar.dims.n < 3) {
# TODO: throw
# TODO: print(paste()) -> cat(sprintf())
cat(sprintf('ERROR: datavar.dims.n==%i\n', datavar.dims.n))
} else if (datavar.dims.n == 3) {
datavar.timesteps.n <- 1
count <- c(dim(epic.output.datavar), 1)
start <- c(start, 1)
datavar.dims.max.vec <- count
datavar.dims.n <- 4
} else if (datavar.dims.n == 4) {
datavar.timesteps.n <- dim(epic.output.datavar)[datavar.dims.n]
count <- dim(epic.output.datavar)
datavar.dims.max.vec <- count
} else {
# TODO: throw
cat(sprintf('ERROR: datavar.dims.n==%i\n', datavar.dims.n))
}
# start debugging
# print('initially:')
# TODO: get output all on one line
# print('start==') ; print(start)
# print('count==') ; print(count)
# end debugging
# For each timestep in EPIC file:
# for safety (and pedagogy), read in data one timestep at a time, dim-agnostically
for (i.timestep in 1:datavar.timesteps.n) {
#i.timestep <- 1
# Initialize start and count to read one timestep of the variable:
# start=(1,1,1,i), count=(COL,ROW,LAY,i)
start[datavar.dims.n] <- i.timestep
count[datavar.dims.n] <- i.timestep
# start debugging
# cat(sprintf('for timestep==%i\n', i.timestep))
# TODO: get output all on one line
# print('start==') ; print(start)
# print('count==') ; print(count)
# end debugging
epic.input.timestep <- ncvar_get(epic.input.file, varid=datavar.name, start=start, count=count)
epic.output.timestep <- ncvar_get(epic.output.file, varid=datavar.name, start=start, count=count)
# TODO: can this be done as a dual `apply`, or a linear-algebra op?
# For each gridcell in timestep:
for (i.col in 1:datavar.cols.n) {
for (i.row in 1:datavar.rows.n) {
# debugging
# epic.vec has one value @ [248,50,5] (rest NA)
# beld.vec has one value @ [248,50,5], another @ [248,50,1] (rest 0)
# i.col <- 248 ; i.row <- 50
# Determine whether gridcell is "empty" in BELD,
# i.e., all its layers==0.
# If so, write NAs to BELD layer. Else: write sum of BELD layers.
beld.array[i.col,i.row,] -> beld.vec
epic.input.timestep[i.col,i.row,] -> epic.input.vec
epic.output.timestep[i.col,i.row,] -> epic.output.vec
epic.output.vec <- sum.beld.for.layers(
epic.input.vec, beld.vec, i.col, i.row, i.sum)
epic.output.timestep[i.col,i.row,] <- epic.output.vec
} # end for rows
} # end for cols
# Write the new'n'improved timestep back to file.
# TODO: don't write if no layers changed
ncvar_put(epic.output.file, varid=datavar.name, vals=epic.output.timestep, start=start, count=count)
if (plot.layers) {
# debugging
cat(sprintf('writeBELDlayer.r: plot.layers.for.timestep==%i, n.layers==%i\n',
i.timestep, datavar.layers.n))
epic.output.datavar <- ncvar_get(epic.output.file, varid=datavar.name)
plot.layers.for.timestep(
datavar=epic.output.datavar,
datavar.name=datavar.name,
datavar.parent=epic.output.file,
i.timestep=i.timestep,
n.layers=datavar.layers.n,
attrs.list=attrs.list,
q.vec=probabilities.vec,
l2d.fp=l2d.fp,
colors=colors,
map=map)
}
} # end for timesteps
# 5 Teardown
# Close the connections (ncdf=close.ncdf), ...
if (plot.layers) {
dev.off()
}
nc_close(epic.input.file)
nc_close(epic.output.file)
# * ... and remove their ADS (not the files!) from the workspace.
rm(epic.input.file)
rm(epic.output.file)
# debugging-----------------------------------------------------------
|
#-------------------------------------------------------------------------------
# Copyright (c) 2012 University of Illinois, NCSA.
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the
# University of Illinois/NCSA Open Source License
# which accompanies this distribution, and is available at
# http://opensource.ncsa.illinois.edu/license.html
#-------------------------------------------------------------------------------
#--------------------------------------------------------------------------------------------------#
##'
##' Plant leaf reflectance and transmittance are calculated from 400 nm to
##' 2500 nm (1 nm step) with the following parameters:
##'
##' @name prospect5
##' @title PROSPECT-5 leaf radiative transfer model
##' @param N leaf structure parameter. Number of elementary layers
##' @param Cab leaf chlorophyll a+b content in ug/cm2
##' @param Car leaf carotenoid content ug/cm2
##' @param Cw leaf equivalent water thickness (EWT) in g/cm2 or cm-1
##' @param Cm leaf dry matter content in g/cm2 (alias leaf mass per area [LMA])
##'
##' @import gsl
##' @export
##' @examples
##' LRT <- prospect5(2,65,30,0.004,0.002)
##'
##' @references Stokes G.G. (1862), On the intensity of the light reflected from or transmitted through a pile of plates, Proc. Roy. Soc. Lond., 11:545-556.
##' @references Allen W.A., Gausman H.W., Richardson A.J., Thomas J.R. (1969), Interaction of isotropic ligth with a compact plant leaf, J. Opt. Soc. Am., 59(10):1376-1379.
##' @references Jacquemoud S., Ustin S.L., Verdebout J., Schmuck G., Andreoli G., Hosgood B. (1996), Estimating leaf biochemistry using the PROSPECT leaf optical properties model, Remote Sens. Environ., 56:194-202.
##' @references Jacquemoud S., Baret F. (1990), PROSPECT: a model of leaf optical properties spectra, Remote Sens. Environ., 34:75-91.
##' @references Feret et al. (2008), PROSPECT-4 and 5: Advances in the Leaf Optical Properties Model Separating Photosynthetic Pigments, Remote Sensing of Environment
##' @references Feret et al. (2008). http://teledetection.ipgp.jussieu.fr/prosail/
##' @author Shawn Serbin
##'
prospect5 <- function(N,Cab,Car,Cw,Cm){
# Here are some examples observed during the LOPEX'93 experiment on
# fresh (F) and dry (D) leaves :
#
# ---------------------------------------------
# N Cab Cw Cm
# ---------------------------------------------
# min 1.000 0.0 0.004000 0.001900
# max 3.000 100.0 0.040000 0.016500
# corn (F) 1.518 58.0 0.013100 0.003662
# rice (F) 2.275 23.7 0.007500 0.005811
# clover (F) 1.875 46.7 0.010000 0.003014
# laurel (F) 2.660 74.1 0.019900 0.013520
# ---------------------------------------------
# min 1.500 0.0 0.000063 0.0019
# max 3.600 100.0 0.000900 0.0165
# bamboo (D) 2.698 70.8 0.000117 0.009327
# lettuce (D) 2.107 35.2 0.000244 0.002250
# walnut (D) 2.656 62.8 0.000263 0.006573
# chestnut (D) 1.826 47.7 0.000307 0.004305
# ---------------------------------------------
### Load the spec. abs. features
data(dataSpec_p5)
l <- dataSpec_p5[,1]
n <- dataSpec_p5[,2]
### Global absorption feature
k <- (Cab*dataSpec_p5[,3]+Car*dataSpec_p5[,4]+Cw*dataSpec_p5[,5]+Cm*dataSpec_p5[,6])/N
eps <- k[which(k==0)]
trans <- (1-k)*exp(-k)+k^2*expint_E1(k) ### global trans
### reflectivity and transmissivity at the interface. Leaf surface
#-------------------------------------------------
alpha <- 40
t12 <- tav(alpha,n) #trans
t21 <- (tav(90,n))/n^2 #trans
r12 <- 1-t12 #refl
r21 <- 1-t21 #refl
x <- (tav(alpha,n))/tav(90,n)
y <- x*(tav(90,n)-1)+1-tav(alpha,n)
### reflectance and transmittance of the elementary layer N = 1
#------------------------------------------------------------
ra <- r12+(t12*t21*r21*trans^2)/(1-r21^2*trans^2)
ta <- (t12*t21*trans)/(1-r21^2*trans^2)
r90 <- (ra-y)/x
t90 <- ta/x
#***********************************************************************
# reflectance and transmittance of N layers
#***********************************************************************
delta <- sqrt((t90^2-r90^2-1)^2-4*r90^2)
beta <- (1+r90^2-t90^2-delta)/(2*r90)
va <- (1+r90^2-t90^2+delta)/(2*r90)
if (any(va*(beta-r90)<=1e-14)) {
vb <- sqrt(beta*(va-r90)/(1e-14))
} else {
vb <- sqrt(beta*(va-r90)/(va*(beta-r90)))
}
### Calc over N layers
vbNN <- vb^(N-1)
vbNNinv <- 1/vbNN
vainv <- 1/va
s1 <- ta*t90*(vbNN-vbNNinv)
s2 <- ta*(va-vainv)
s3 <- va*vbNN-vainv*vbNNinv-r90*(vbNN-vbNNinv)
### Calculate output reflectance and transmittance of the modeled leaf
RN <- ra+s1/s3
TN <- s2/s3
LRT <- data.frame(Wavelength=l,
Reflectance=RN,
Transmittance=TN) # Output: wavelength, reflectance, transmittance
return(LRT)
}
#==================================================================================================#
####################################################################################################
### EOF. End of R script file.
#################################################################################################### | /modules/rtm/R/prospect5.R | permissive | rgknox/pecan | R | false | false | 5,361 | r | #-------------------------------------------------------------------------------
# Copyright (c) 2012 University of Illinois, NCSA.
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the
# University of Illinois/NCSA Open Source License
# which accompanies this distribution, and is available at
# http://opensource.ncsa.illinois.edu/license.html
#-------------------------------------------------------------------------------
#--------------------------------------------------------------------------------------------------#
##'
##' Plant leaf reflectance and transmittance are calculated from 400 nm to
##' 2500 nm (1 nm step) with the following parameters:
##'
##' @name prospect5
##' @title PROSPECT-5 leaf radiative transfer model
##' @param N leaf structure parameter. Number of elementary layers
##' @param Cab leaf chlorophyll a+b content in ug/cm2
##' @param Car leaf carotenoid content ug/cm2
##' @param Cw leaf equivalent water thickness (EWT) in g/cm2 or cm-1
##' @param Cm leaf dry matter content in g/cm2 (alias leaf mass per area [LMA])
##'
##' @import gsl
##' @export
##' @examples
##' LRT <- prospect5(2,65,30,0.004,0.002)
##'
##' @references Stokes G.G. (1862), On the intensity of the light reflected from or transmitted through a pile of plates, Proc. Roy. Soc. Lond., 11:545-556.
##' @references Allen W.A., Gausman H.W., Richardson A.J., Thomas J.R. (1969), Interaction of isotropic ligth with a compact plant leaf, J. Opt. Soc. Am., 59(10):1376-1379.
##' @references Jacquemoud S., Ustin S.L., Verdebout J., Schmuck G., Andreoli G., Hosgood B. (1996), Estimating leaf biochemistry using the PROSPECT leaf optical properties model, Remote Sens. Environ., 56:194-202.
##' @references Jacquemoud S., Baret F. (1990), PROSPECT: a model of leaf optical properties spectra, Remote Sens. Environ., 34:75-91.
##' @references Feret et al. (2008), PROSPECT-4 and 5: Advances in the Leaf Optical Properties Model Separating Photosynthetic Pigments, Remote Sensing of Environment
##' @references Feret et al. (2008). http://teledetection.ipgp.jussieu.fr/prosail/
##' @author Shawn Serbin
##'
prospect5 <- function(N,Cab,Car,Cw,Cm){
# Here are some examples observed during the LOPEX'93 experiment on
# fresh (F) and dry (D) leaves :
#
# ---------------------------------------------
# N Cab Cw Cm
# ---------------------------------------------
# min 1.000 0.0 0.004000 0.001900
# max 3.000 100.0 0.040000 0.016500
# corn (F) 1.518 58.0 0.013100 0.003662
# rice (F) 2.275 23.7 0.007500 0.005811
# clover (F) 1.875 46.7 0.010000 0.003014
# laurel (F) 2.660 74.1 0.019900 0.013520
# ---------------------------------------------
# min 1.500 0.0 0.000063 0.0019
# max 3.600 100.0 0.000900 0.0165
# bamboo (D) 2.698 70.8 0.000117 0.009327
# lettuce (D) 2.107 35.2 0.000244 0.002250
# walnut (D) 2.656 62.8 0.000263 0.006573
# chestnut (D) 1.826 47.7 0.000307 0.004305
# ---------------------------------------------
### Load the spec. abs. features
data(dataSpec_p5)
l <- dataSpec_p5[,1]
n <- dataSpec_p5[,2]
### Global absorption feature
k <- (Cab*dataSpec_p5[,3]+Car*dataSpec_p5[,4]+Cw*dataSpec_p5[,5]+Cm*dataSpec_p5[,6])/N
eps <- k[which(k==0)]
trans <- (1-k)*exp(-k)+k^2*expint_E1(k) ### global trans
### reflectivity and transmissivity at the interface. Leaf surface
#-------------------------------------------------
alpha <- 40
t12 <- tav(alpha,n) #trans
t21 <- (tav(90,n))/n^2 #trans
r12 <- 1-t12 #refl
r21 <- 1-t21 #refl
x <- (tav(alpha,n))/tav(90,n)
y <- x*(tav(90,n)-1)+1-tav(alpha,n)
### reflectance and transmittance of the elementary layer N = 1
#------------------------------------------------------------
ra <- r12+(t12*t21*r21*trans^2)/(1-r21^2*trans^2)
ta <- (t12*t21*trans)/(1-r21^2*trans^2)
r90 <- (ra-y)/x
t90 <- ta/x
#***********************************************************************
# reflectance and transmittance of N layers
#***********************************************************************
delta <- sqrt((t90^2-r90^2-1)^2-4*r90^2)
beta <- (1+r90^2-t90^2-delta)/(2*r90)
va <- (1+r90^2-t90^2+delta)/(2*r90)
if (any(va*(beta-r90)<=1e-14)) {
vb <- sqrt(beta*(va-r90)/(1e-14))
} else {
vb <- sqrt(beta*(va-r90)/(va*(beta-r90)))
}
### Calc over N layers
vbNN <- vb^(N-1)
vbNNinv <- 1/vbNN
vainv <- 1/va
s1 <- ta*t90*(vbNN-vbNNinv)
s2 <- ta*(va-vainv)
s3 <- va*vbNN-vainv*vbNNinv-r90*(vbNN-vbNNinv)
### Calculate output reflectance and transmittance of the modeled leaf
RN <- ra+s1/s3
TN <- s2/s3
LRT <- data.frame(Wavelength=l,
Reflectance=RN,
Transmittance=TN) # Output: wavelength, reflectance, transmittance
return(LRT)
}
#==================================================================================================#
####################################################################################################
### EOF. End of R script file.
#################################################################################################### |
rm(list=ls())
library(WebGestaltR)
WebGestaltR(enrichMethod="NTA", organism="hsapiens",
enrichDatabase="network_PPI_BIOGRID", interestGeneFile="HALLMARK_APICAL_SURFACE.geneset.txt",
interestGeneType="ensembl_gene_id",sigMethod="fdr",
outputDirectory=getwd(), highlightType = 'Neighbors', neighborNum = 10,
networkConstructionMethod="Network_Expansion")
| /Data/MSigDB.go.pathway/HALLMARK_APICAL_SURFACE/WebGestalt.R | no_license | haoboguo/NetBAS | R | false | false | 370 | r | rm(list=ls())
library(WebGestaltR)
WebGestaltR(enrichMethod="NTA", organism="hsapiens",
enrichDatabase="network_PPI_BIOGRID", interestGeneFile="HALLMARK_APICAL_SURFACE.geneset.txt",
interestGeneType="ensembl_gene_id",sigMethod="fdr",
outputDirectory=getwd(), highlightType = 'Neighbors', neighborNum = 10,
networkConstructionMethod="Network_Expansion")
|
mydata <- data.frame(dose=numeric(0),
drugA=numeric(0),
drugB=numeric(0))
fix(mydata)
dose <- mydata$dose
drugA <- mydata$drugA
drugB <- mydata$drugB
opar <- par(no.readonly = TRUE)
par(pin = c(2,3))
par(lwd=2,cex=1.5)
par(cex.axis=.75,font.axis=3)
plot(dose,drugA,type = "b",pch =19,lty=2,col="red")
plot(dose,drugB,type = "b",pch =23,lty=6,col="blue",bg="green")
par(opar)
#下面是p50代码
par(ann=FALSE)
plot(dose,drugA,type = "b",
col="red",lty=2,pch=2,lwd=2,
xlim =c(0,60),ylim=c(0,70))
title(main="clinical Trials for DrugsA",col.main="red",
sub="This is hypothetical data",col.sub="blue",
xlab = "Dosage",ylab = "Drug Response",
col.lab="green",cex.lab=0.75)
#代码清单3-2 P32 自定义坐标轴的事例
x <- c(1:10)
y <- x
z <- 10/x
opar <- par(no.readonly = TRUE)
par(mar=c(5,4,4,8) + 0.1)
plot(x,y,type = "b",
pch=21,col="red",
yaxt="n",lty=3,ann=FALSE)
lines(x,z,type = "b",pch=22,col="blue",lty=2)
axis(2,at=x,labels=x,col.axis="red",las=2)
axis(4,at=z,labels = round(z, digits = 2),col.axis="blue",las=2,cex.axis=0.7,tck=-.01)
mtext("y=10/x",side = 4,line = 3,cex.lab=1, las=2,col="blue")
title(main="An Example of Creative Axes",xlab="x values",ylab="Y=X")
par(opar)
#代码清单3-3 p55
dose <- c(20,30,40,45,60)
drugA <- c(16,20,27,40,60)
drugB <- c(15,18,25,31,40)
opar <- par(no.readonly = TRUE)
par(lwd=2,cex=1.5,font.lab=2)
plot(dose,drugA,type = "b",
pch =15,lty =1,col="red",ylim=c(0,60),
main="DrugA VS DrugeB",
xlab = "Druge Dose",ylab = "Drug Response")
lines(dose,drugB,type = "b",
pch=17,lty=2,col="blue")
abline(h=c(30),lwd=1.5,lty=2,col="gray")
library(Hmisc)
minor.tick(nx=3,ny=3,tick.ratio = 0.5)
legend("topleft",inset = .05,title = "Drug Type",c("A","B"),
lty = c(1,2),pch=c(15,17),col = c("red","blue"))
par(opar)
#文本标注 p56
attach(mtcars)
plot (wt,mpg,
main = "Mileage vs. Car Weight",
xlab ="weight",ylab = "Mileage",
pch=18,col="blue")
text(wt,mpg,
row.names(mtcars),
cex=0.6,pos=4,col="red")
detach(mtcars)
#展示不同字体族的代码
opar <- par(no.readonly = TRUE)
par(cex=1.5)
plot(1:7,1:7,type="n")
text(3,3,"example of default text")
text(4,4,family="mono","example of default text")
text(5,5,family="serif","example of default text")
text(6,6,family="sans","example of default text")
par(opar)
#图形组合 p59 图3-14
attach(mtcars)
opar <- par(no.readonly = TRUE)
par(mfrow = c(2,2))
plot(wt,mpg,main ="wt vs. mpg")
plot(wt,disp,main ="wt vs. disp")
hist(wt,main ="Histogram of wt")
boxplot(wt,main ="Boxplot of wt")
par(opar)
detach(mtcars)
#图形组合 p59 图3-15
attach(mtcars)
opar <- par(no.readonly = TRUE)
par(mfrow = c(3,1))
hist(wt)
hist(disp)
hist(mpg)
par(opar)
detach(mtcars)
#图形组合 p60 图3-16 nd 图3-17
attach(mtcars)
layout(matrix(c(1,1,2,3),2,2,byrow = TRUE),
widths=c(3,1),heights=c(1,2)) #此句去掉为3-16的图
hist(wt)
hist(mpg)
hist(disp)
detach(mtcars)
#对上面的升级一下来验证layout如何控制图形分布
attach(mtcars)
layout(matrix(c(1,2,3,4,5,5),3,2,byrow = TRUE))
hist(wt)
hist(mpg)
hist(disp)
hist(wt)
hist(mpg)
detach(mtcars)
#代码清单3-4 p62 图形布局的精细控制
opar <- par(no.readonly = TRUE)
par(fig=c(0,0.8,0,0.8))
plot(mtcars$wt,mtcars$mpg,
xlab = "Miles per Gallon",
ylab="Car Weight")
par(fig=c(0,0.8,0.55,1),new=TRUE)
boxplot(mtcars$wt,horizontal = TRUE,axes = FALSE)
par(fig=c(0.65,1,0,0.8),new=TRUE)
boxplot(mtcars$mpg,axes=FALSE)
mtext("Enhanced Scatterplot",side = 3,outer = TRUE,line=-3)
par(opar)
| /R与数据可视化/R学习笔记/4/chapter3.R | no_license | oocarain/Learning | R | false | false | 3,652 | r | mydata <- data.frame(dose=numeric(0),
drugA=numeric(0),
drugB=numeric(0))
fix(mydata)
dose <- mydata$dose
drugA <- mydata$drugA
drugB <- mydata$drugB
opar <- par(no.readonly = TRUE)
par(pin = c(2,3))
par(lwd=2,cex=1.5)
par(cex.axis=.75,font.axis=3)
plot(dose,drugA,type = "b",pch =19,lty=2,col="red")
plot(dose,drugB,type = "b",pch =23,lty=6,col="blue",bg="green")
par(opar)
#下面是p50代码
par(ann=FALSE)
plot(dose,drugA,type = "b",
col="red",lty=2,pch=2,lwd=2,
xlim =c(0,60),ylim=c(0,70))
title(main="clinical Trials for DrugsA",col.main="red",
sub="This is hypothetical data",col.sub="blue",
xlab = "Dosage",ylab = "Drug Response",
col.lab="green",cex.lab=0.75)
#代码清单3-2 P32 自定义坐标轴的事例
x <- c(1:10)
y <- x
z <- 10/x
opar <- par(no.readonly = TRUE)
par(mar=c(5,4,4,8) + 0.1)
plot(x,y,type = "b",
pch=21,col="red",
yaxt="n",lty=3,ann=FALSE)
lines(x,z,type = "b",pch=22,col="blue",lty=2)
axis(2,at=x,labels=x,col.axis="red",las=2)
axis(4,at=z,labels = round(z, digits = 2),col.axis="blue",las=2,cex.axis=0.7,tck=-.01)
mtext("y=10/x",side = 4,line = 3,cex.lab=1, las=2,col="blue")
title(main="An Example of Creative Axes",xlab="x values",ylab="Y=X")
par(opar)
#代码清单3-3 p55
dose <- c(20,30,40,45,60)
drugA <- c(16,20,27,40,60)
drugB <- c(15,18,25,31,40)
opar <- par(no.readonly = TRUE)
par(lwd=2,cex=1.5,font.lab=2)
plot(dose,drugA,type = "b",
pch =15,lty =1,col="red",ylim=c(0,60),
main="DrugA VS DrugeB",
xlab = "Druge Dose",ylab = "Drug Response")
lines(dose,drugB,type = "b",
pch=17,lty=2,col="blue")
abline(h=c(30),lwd=1.5,lty=2,col="gray")
library(Hmisc)
minor.tick(nx=3,ny=3,tick.ratio = 0.5)
legend("topleft",inset = .05,title = "Drug Type",c("A","B"),
lty = c(1,2),pch=c(15,17),col = c("red","blue"))
par(opar)
#文本标注 p56
attach(mtcars)
plot (wt,mpg,
main = "Mileage vs. Car Weight",
xlab ="weight",ylab = "Mileage",
pch=18,col="blue")
text(wt,mpg,
row.names(mtcars),
cex=0.6,pos=4,col="red")
detach(mtcars)
#展示不同字体族的代码
opar <- par(no.readonly = TRUE)
par(cex=1.5)
plot(1:7,1:7,type="n")
text(3,3,"example of default text")
text(4,4,family="mono","example of default text")
text(5,5,family="serif","example of default text")
text(6,6,family="sans","example of default text")
par(opar)
#图形组合 p59 图3-14
attach(mtcars)
opar <- par(no.readonly = TRUE)
par(mfrow = c(2,2))
plot(wt,mpg,main ="wt vs. mpg")
plot(wt,disp,main ="wt vs. disp")
hist(wt,main ="Histogram of wt")
boxplot(wt,main ="Boxplot of wt")
par(opar)
detach(mtcars)
#图形组合 p59 图3-15
attach(mtcars)
opar <- par(no.readonly = TRUE)
par(mfrow = c(3,1))
hist(wt)
hist(disp)
hist(mpg)
par(opar)
detach(mtcars)
#图形组合 p60 图3-16 nd 图3-17
attach(mtcars)
layout(matrix(c(1,1,2,3),2,2,byrow = TRUE),
widths=c(3,1),heights=c(1,2)) #此句去掉为3-16的图
hist(wt)
hist(mpg)
hist(disp)
detach(mtcars)
#对上面的升级一下来验证layout如何控制图形分布
attach(mtcars)
layout(matrix(c(1,2,3,4,5,5),3,2,byrow = TRUE))
hist(wt)
hist(mpg)
hist(disp)
hist(wt)
hist(mpg)
detach(mtcars)
#代码清单3-4 p62 图形布局的精细控制
opar <- par(no.readonly = TRUE)
par(fig=c(0,0.8,0,0.8))
plot(mtcars$wt,mtcars$mpg,
xlab = "Miles per Gallon",
ylab="Car Weight")
par(fig=c(0,0.8,0.55,1),new=TRUE)
boxplot(mtcars$wt,horizontal = TRUE,axes = FALSE)
par(fig=c(0.65,1,0,0.8),new=TRUE)
boxplot(mtcars$mpg,axes=FALSE)
mtext("Enhanced Scatterplot",side = 3,outer = TRUE,line=-3)
par(opar)
|
\name{render_choropleth}
\alias{render_choropleth}
\title{Render a choropleth}
\usage{
render_choropleth(choropleth.df, lod, title = "", scaleName = "",
showLabels = TRUE, states = state.abb)
}
\arguments{
\item{choropleth.df}{A data.frame with a column named
"region" and a column named "value". If lod is "state"
then region must contain state names (e.g. "California"
or "CA"). If lod is "county" then region must contain
county FIPS codes. if lod is "zip" then region must
contain 5 digit ZIP codes.}
\item{lod}{A string representing the level of detail of
your data. Must be one of "state", "county" or "zip".}
\item{title}{The title of the image. Defaults to "".}
\item{scaleName}{The name of the scale/legend. Default
to "".}
\item{showLabels}{For State maps, whether or not to show
labels of the states.}
\item{states}{A vector of states to render. Defaults to
state.abb. Defaults to state.abb.}
}
\value{
A choropleth.
}
\description{
Given a data.frame which contains map data and value data,
render it as a choropleth using ggplot2.
}
\examples{
data(choroplethr)
library(Hmisc) # for cut2
# States with greater than 1M residents
df.map = bind_df_to_map(df_pop_state, "state")
df.map$value = cut2(df.map$value, cuts=c(0,1000000,Inf))
render_choropleth(df.map, "state", "States with a population over 1M", "Population")
# Counties with greater than or greater than 1M residents
df.map = bind_df_to_map(df_pop_county, "county")
df.map$value = cut2(df.map$value, cuts=c(0,1000000,Inf))
render_choropleth(df.map, "county", "Counties with a population over 1M", "Population")
# Zip Code Tabulated Areas with less than 1000 people
df_pop_zip = df_pop_zip[df_pop_zip$value < 1000, ]
df.map = bind_df_to_map(df_pop_zip, "zip")
render_choropleth(df.map, "zip", "ZCTAs with less than 1000 people in California", states="CA")
}
\seealso{
\code{\link{get_acs_df}} and \code{\link{bind_df_to_map}}
}
| /man/render_choropleth.Rd | no_license | omerkara/choroplethr | R | false | false | 1,946 | rd | \name{render_choropleth}
\alias{render_choropleth}
\title{Render a choropleth}
\usage{
render_choropleth(choropleth.df, lod, title = "", scaleName = "",
showLabels = TRUE, states = state.abb)
}
\arguments{
\item{choropleth.df}{A data.frame with a column named
"region" and a column named "value". If lod is "state"
then region must contain state names (e.g. "California"
or "CA"). If lod is "county" then region must contain
county FIPS codes. if lod is "zip" then region must
contain 5 digit ZIP codes.}
\item{lod}{A string representing the level of detail of
your data. Must be one of "state", "county" or "zip".}
\item{title}{The title of the image. Defaults to "".}
\item{scaleName}{The name of the scale/legend. Default
to "".}
\item{showLabels}{For State maps, whether or not to show
labels of the states.}
\item{states}{A vector of states to render. Defaults to
state.abb. Defaults to state.abb.}
}
\value{
A choropleth.
}
\description{
Given a data.frame which contains map data and value data,
render it as a choropleth using ggplot2.
}
\examples{
data(choroplethr)
library(Hmisc) # for cut2
# States with greater than 1M residents
df.map = bind_df_to_map(df_pop_state, "state")
df.map$value = cut2(df.map$value, cuts=c(0,1000000,Inf))
render_choropleth(df.map, "state", "States with a population over 1M", "Population")
# Counties with greater than or greater than 1M residents
df.map = bind_df_to_map(df_pop_county, "county")
df.map$value = cut2(df.map$value, cuts=c(0,1000000,Inf))
render_choropleth(df.map, "county", "Counties with a population over 1M", "Population")
# Zip Code Tabulated Areas with less than 1000 people
df_pop_zip = df_pop_zip[df_pop_zip$value < 1000, ]
df.map = bind_df_to_map(df_pop_zip, "zip")
render_choropleth(df.map, "zip", "ZCTAs with less than 1000 people in California", states="CA")
}
\seealso{
\code{\link{get_acs_df}} and \code{\link{bind_df_to_map}}
}
|
setwd("/Users/fangshu/Desktop/course/Spring\ 2021/IE\ 592/Output_all/v26/Evaluation")
filenames <- list.files()
# filenames eg: evaluation_2011-04-01_2012-03-30.csv
# get 2011-04-01_2012-03-30 from filenames
names <- substr(filenames,12,32) # get the date
dat <- data.frame(names)
for(i in 1:length(filenames)){
dat_single <- read.csv(filenames[i],header = T)
dat[i,2:3] <- apply(dat_single[,2:3], 2, sum)
}
colnames(dat) <- c("Date_Range", "absolute criteria","relative criteria")
# reshape the data
library(reshape2)
library(ggplot2)
dat_plot <- melt(dat,id=c("Date_Range"))
dat_plot$Date_Range <- as.factor(dat_plot$Date_Range)
p <- ggplot(dat_plot,aes(Date_Range,value,color=variable))+geom_line(aes(group=variable)) + coord_flip()
p <- p + scale_x_discrete(limits = rev(levels(dat_plot$Date_Range)))
p <- p + theme(legend.title = element_blank(),legend.position = "top") + labs(y = "How many criteria are satisfied out of 6", x = "Date Range of the test data")
#ggsave('evaluation.png',p)
p
| /Visualization/Plot.R | no_license | lynnh20/Principal-Interaction-Modeling-ISU | R | false | false | 1,007 | r | setwd("/Users/fangshu/Desktop/course/Spring\ 2021/IE\ 592/Output_all/v26/Evaluation")
filenames <- list.files()
# filenames eg: evaluation_2011-04-01_2012-03-30.csv
# get 2011-04-01_2012-03-30 from filenames
names <- substr(filenames,12,32) # get the date
dat <- data.frame(names)
for(i in 1:length(filenames)){
dat_single <- read.csv(filenames[i],header = T)
dat[i,2:3] <- apply(dat_single[,2:3], 2, sum)
}
colnames(dat) <- c("Date_Range", "absolute criteria","relative criteria")
# reshape the data
library(reshape2)
library(ggplot2)
dat_plot <- melt(dat,id=c("Date_Range"))
dat_plot$Date_Range <- as.factor(dat_plot$Date_Range)
p <- ggplot(dat_plot,aes(Date_Range,value,color=variable))+geom_line(aes(group=variable)) + coord_flip()
p <- p + scale_x_discrete(limits = rev(levels(dat_plot$Date_Range)))
p <- p + theme(legend.title = element_blank(),legend.position = "top") + labs(y = "How many criteria are satisfied out of 6", x = "Date Range of the test data")
#ggsave('evaluation.png',p)
p
|
#' Get indegree values for all nodes
#' @description Get the indegree values for all
#' nodes in a graph.
#' @param graph a graph object of class
#' \code{dgr_graph}.
#' @param normalized set as \code{FALSE} (the default),
#' the indegree will be provided for each of
#' the nodes (as a count of edges to each node). When set
#' as \code{TRUE}, then the result for each node will be
#' divided by the total number of nodes in the graph minus 1.
#' @return a data frame with indegree values for
#' each of the nodes.
#' @examples
#' # Create a random graph
#' graph <-
#' create_random_graph(
#' n = 10, m = 22,
#' set_seed = 23)
#'
#' # Get the indegree values for all nodes
#' # in the graph
#' get_degree_in(graph)
#' #> id indegree
#' #> 1 1 0
#' #> 2 2 0
#' #> 3 3 1
#' #> 4 4 0
#' #> 5 5 3
#' #> 6 6 4
#' #> 7 7 3
#' #> 8 8 2
#' #> 9 9 4
#' #> 10 10 5
#'
#' # Add the indegree values to the graph
#' # as a node attribute
#' graph <-
#' graph %>%
#' join_node_attrs(
#' df = get_degree_in(.))
#'
#' # Display the graph's node data frame
#' get_node_df(graph)
#' #> id type label value indegree
#' #> 1 1 <NA> 1 6.0 0
#' #> 2 2 <NA> 2 2.5 0
#' #> 3 3 <NA> 3 3.5 1
#' #> 4 4 <NA> 4 7.5 0
#' #> 5 5 <NA> 5 8.5 3
#' #> 6 6 <NA> 6 4.5 4
#' #> 7 7 <NA> 7 10.0 3
#' #> 8 8 <NA> 8 10.0 2
#' #> 9 9 <NA> 9 8.5 4
#' #> 10 10 <NA> 10 10.0 5
#' @importFrom igraph degree
#' @export get_degree_in
get_degree_in <- function(graph,
normalized = FALSE) {
# Validation: Graph object is valid
if (graph_object_valid(graph) == FALSE) {
stop("The graph object is not valid.")
}
# Convert the graph to an igraph object
ig_graph <- to_igraph(graph)
# Get the indegree values for each of the
# graph's nodes
if (normalized == TRUE) {
indegree_values <-
igraph::degree(
ig_graph,
mode = "in",
normalized = TRUE)
}
if (normalized == FALSE) {
indegree_values <-
igraph::degree(
ig_graph,
mode = "in",
normalized = FALSE)
}
# Create df with indegree scores
data.frame(
id = indegree_values %>%
names() %>%
as.integer(),
indegree = indegree_values,
stringsAsFactors = FALSE)
}
| /R/get_degree_in.R | permissive | ktaranov/DiagrammeR | R | false | false | 2,465 | r | #' Get indegree values for all nodes
#' @description Get the indegree values for all
#' nodes in a graph.
#' @param graph a graph object of class
#' \code{dgr_graph}.
#' @param normalized set as \code{FALSE} (the default),
#' the indegree will be provided for each of
#' the nodes (as a count of edges to each node). When set
#' as \code{TRUE}, then the result for each node will be
#' divided by the total number of nodes in the graph minus 1.
#' @return a data frame with indegree values for
#' each of the nodes.
#' @examples
#' # Create a random graph
#' graph <-
#' create_random_graph(
#' n = 10, m = 22,
#' set_seed = 23)
#'
#' # Get the indegree values for all nodes
#' # in the graph
#' get_degree_in(graph)
#' #> id indegree
#' #> 1 1 0
#' #> 2 2 0
#' #> 3 3 1
#' #> 4 4 0
#' #> 5 5 3
#' #> 6 6 4
#' #> 7 7 3
#' #> 8 8 2
#' #> 9 9 4
#' #> 10 10 5
#'
#' # Add the indegree values to the graph
#' # as a node attribute
#' graph <-
#' graph %>%
#' join_node_attrs(
#' df = get_degree_in(.))
#'
#' # Display the graph's node data frame
#' get_node_df(graph)
#' #> id type label value indegree
#' #> 1 1 <NA> 1 6.0 0
#' #> 2 2 <NA> 2 2.5 0
#' #> 3 3 <NA> 3 3.5 1
#' #> 4 4 <NA> 4 7.5 0
#' #> 5 5 <NA> 5 8.5 3
#' #> 6 6 <NA> 6 4.5 4
#' #> 7 7 <NA> 7 10.0 3
#' #> 8 8 <NA> 8 10.0 2
#' #> 9 9 <NA> 9 8.5 4
#' #> 10 10 <NA> 10 10.0 5
#' @importFrom igraph degree
#' @export get_degree_in
get_degree_in <- function(graph,
normalized = FALSE) {
# Validation: Graph object is valid
if (graph_object_valid(graph) == FALSE) {
stop("The graph object is not valid.")
}
# Convert the graph to an igraph object
ig_graph <- to_igraph(graph)
# Get the indegree values for each of the
# graph's nodes
if (normalized == TRUE) {
indegree_values <-
igraph::degree(
ig_graph,
mode = "in",
normalized = TRUE)
}
if (normalized == FALSE) {
indegree_values <-
igraph::degree(
ig_graph,
mode = "in",
normalized = FALSE)
}
# Create df with indegree scores
data.frame(
id = indegree_values %>%
names() %>%
as.integer(),
indegree = indegree_values,
stringsAsFactors = FALSE)
}
|
#!/usr/bin/env Rscript
# Load useful libraries ----
suppressMessages( library(crayon) )
cat(cyan("Loading packages...\n"))
suppressMessages( library(dplyr) )
suppressMessages( library(readr) )
suppressMessages( library(rlang) )
suppressMessages( library(readxl) )
suppressMessages( library(stringr) )
suppressMessages( library(lubridate) )
# Helpful globals and functions ----
cat(cyan("Loading globals and helpers...\n"))
source("~/Box/Documents/R_helpers/config.R")
source("~/Box/Documents/R_helpers/helpers.R")
# Get Data ----
# _ UMMAP General ----
cat(cyan("Getting MRI data from UMMAP General...\n"))
fields_ug_mri_raw <-
c(
"subject_id"
, "exam_date"
# Get from MiNDSet and not UM MAP General
# , "sex_value"
# , "race_value"
, "mri_date"
, "uds_dx"
, "mri_elig_consent"
, "mri_elig_safety_screen"
, "mri_elig_yn"
, "scan_05_func_rest_motion" # Scan 5: func rest % Good Values
)
fields_ug_mri <- fields_ug_mri_raw %>% paste(collapse = ",")
json_ug_mri <-
export_redcap_records(
token = REDCAP_API_TOKEN_UMMAP_GEN,
fields = fields_ug_mri,
# vp = TRUE,
vp = FALSE,
# Filter for UMMAP IDs during UMMAP period
filterLogic = paste0("(",
"[subject_id] >= 'UM00000001'",
" AND ",
"[subject_id] <= 'UM00009999'",
# Remove exam_date filtering here
# because some MRI Eligibility forms
# completed before 2017
# " AND ",
# "[exam_date] >= '2017-03-01'",
")"))
df_ug_mri <- jsonlite::fromJSON(json_ug_mri) %>% as_tibble() %>% na_if("")
# - MiNDSet
cat(cyan("Getting Demographic data from MiNDSet...\n"))
fields_ms_mri_raw <-
c(
"subject_id"
, "sex_value"
, "race_value"
)
fields_ms_mri <- fields_ms_mri_raw %>% paste(collapse = ",")
json_ms_mri <-
export_redcap_records(
token = REDCAP_API_TOKEN_MINDSET,
fields = fields_ms_mri,
# vp = TRUE,
vp = FALSE,
# Filter for UMMAP IDs during UMMAP period
filterLogic = paste0("(",
"[subject_id] >= 'UM00000001'",
" AND ",
"[subject_id] <= 'UM00009999'",
# Remove exam_date filtering here
# because some MRI Eligibility forms
# completed before 2017
# " AND ",
# "[exam_date] >= '2017-03-01'",
")"))
df_ms_mri <- jsonlite::fromJSON(json_ms_mri) %>% as_tibble() %>% na_if("")
# _ UMMAP UDS3
fields_u3_raw <-
c(
"ptid"
, "note_mlstn_type"
, "protocol"
)
fields_u3 <- fields_u3_raw %>% paste(collapse = ",")
json_u3 <-
export_redcap_records(
token = REDCAP_API_TOKEN_UDS3n,
fields = fields_u3,
# vp = TRUE,
vp = FALSE,
# Filter for UMMAP IDs during UMMAP period
filterLogic = paste0("(",
"[ptid] >= 'UM00000001'",
" AND ",
"[ptid] <= 'UM00009999'",
" AND ",
"[note_mlstn_type] != ''",
")"))
df_u3 <- jsonlite::fromJSON(json_u3) %>% as_tibble() %>% na_if("")
# Clean Data ----
cat(cyan("Cleaning raw data...\n"))
# _ UMMAP General ----
cat(cyan("Getting list of MRI-ineligible participants...\n"))
# Get MRI-ineligible IDs from UMMAP General's `mri_elig_yn == 0`
df_inelig_ug <- df_ug_mri %>%
# Also need to retain when consent == 0, changed and to or
filter(!is.na(mri_elig_consent) |
!is.na(mri_elig_safety_screen),
mri_elig_yn == '0') %>%
select(subject_id, mri_elig_yn)
inelig_ids_ug <- df_inelig_ug %>% pull(subject_id) %>% sort() %>% unique()
# _ _ Define different Dx codes ----
nl_codes <- c(26, 17) # 17 = Depression
mci_codes <- c(1, 2, 27, 28, 29, 31, 34) # 29 = ImpNoMCI
dem_codes <- c(3, 4, 5, 6, 9, 10, 11, 12, 13, 35)
df_ug_mri_cln <- df_ug_mri %>%
# Keep only UM IDs
filter(str_detect(subject_id, "^UM\\d{8}$")) %>%
# Keep only UM IDs associated with UM MAP range
filter(subject_id >= "UM00000543") %>%
# Keep only participant-visit records with visit dates
filter(!is.na(exam_date)) %>%
arrange(subject_id, exam_date) %>%
# select(subject_id, -redcap_event_name, exam_date, mri_date, uds_dx) %>%
select(subject_id, -redcap_event_name, exam_date,
# sex_value, race_value,
mri_date, uds_dx,
pct_good_val = scan_05_func_rest_motion) %>%
# mutate `uds_dx` codes to English
mutate(uds_dx = case_when(
uds_dx %in% mci_codes ~ "MCI",
uds_dx %in% dem_codes ~ "Dementia",
uds_dx %in% nl_codes ~ "Normal",
!is.na(uds_dx) ~ "Other",
TRUE ~ NA_character_
)) %>%
# Clean out record that has double-assigned UM MAP ID :(
filter(!(subject_id == "UM00001353" & exam_date == "2017-05-01")) %>%
# Filter out exam date here
filter(exam_date >= '2017-03-01')
# Merge in sex_value and race_value from MiNDSet
df_ug_mri_cln <- left_join(df_ug_mri_cln,
df_ms_mri,
by = "subject_id") %>%
# rename `sex_value` and `race_value`
rename(sex = sex_value, race = race_value) %>%
# mutate `sex`
mutate(sex = case_when(
sex == 1 ~ "Male",
sex == 2 ~ "Female",
TRUE ~ NA_character_
)) %>%
# mutate `race`
mutate(race = case_when(
race == 1 ~ "White",
race == 2 ~ "African American",
race == 3 ~ "Asian",
race == 4 ~ "Hispanic",
race == 5 ~ "Other",
race == 6 ~ "Unknown",
TRUE ~ NA_character_
))
# _ _ UMMAP UDS3 ----
cat(cyan("Getting list of milestoned participants...\n"))
# Get milestoned IDs from UMMAP UDS3's `note_mlstn_type` and `protocol`
df_mlstn_u3 <- df_u3 %>%
# Keep only UM IDs
filter(str_detect(ptid, "^UM\\d{8}$")) %>%
# Keep only the latest Milestone form data
group_by(ptid) %>%
filter(redcap_event_name == max(redcap_event_name)) %>%
ungroup() %>%
filter(note_mlstn_type == 0 |
(note_mlstn_type == 1 & protocol == 1) |
(note_mlstn_type == 1 & protocol == 2))
mlstn_ids_u3 <- df_mlstn_u3 %>% pull(ptid) %>% sort() %>% unique()
# Since ineligible is getting more IDs now, need to override if ID has been
# milestoned (i.e., milestoned takes priority)
df_mlstn_ids_u3 <- tibble(mlstn_ids_u3) %>%
rename(subject_id = mlstn_ids_u3) %>%
mutate(mlstn = 1)
df_inelig_ids_ug <- tibble(inelig_ids_ug) %>%
rename(subject_id = inelig_ids_ug) %>%
mutate(inelig = 1)
df_mlstn_inelig_ids <- full_join(df_mlstn_ids_u3, df_inelig_ids_ug,
by = "subject_id")
inelig_ids_ug <- df_mlstn_inelig_ids %>%
filter(inelig == 1,
is.na(mlstn)) %>%
pull(subject_id) %>%
sort() %>%
unique()
mlstn_ids_u3 <- df_mlstn_inelig_ids %>%
filter(mlstn == 1) %>%
pull(subject_id) %>%
sort() %>%
unique()
# Process Data ----
cat(cyan("Processing data...\n"))
# _ Nest all but `subject_id` as df ----
# (i.e., nest `exam_date`, `mri_date`, `uds_dx`)
df_ug_mri_nest <- df_ug_mri_cln %>%
tidyr::nest(data = -c(subject_id))
# _ Derive `mri_action` based on data in nested df (`data`) ----
suppressWarnings(
df_ug_mri_nest_mut <- df_ug_mri_nest %>%
rowwise() %>%
mutate(data_nrow = nrow(data)) %>%
mutate(dx_max_row = case_when(
all(is.na(data$uds_dx)) ~ NA_integer_,
any(!is.na(data$uds_dx)) ~
# Last non-NA value in `data$uds_dx` column
length(data$uds_dx[!is.na(data$uds_dx)])
)) %>%
mutate(mri_max_row = case_when(
all(is.na(data$mri_date)) ~ NA_integer_,
any(!is.na(data$mri_date)) ~
as.integer(
max(seq_len(nrow(data))[!is.na(data$mri_date)], na.rm = TRUE)
),
TRUE ~ NA_integer_
)) %>%
mutate(recent_dx = data$uds_dx[dx_max_row]) %>%
mutate(mri_action = case_when(
# MRI-ineligible participants
subject_id %in% inelig_ids_ug ~ "Ineligible",
# Milestoned participants
subject_id %in% mlstn_ids_u3 ~ "Milestoned",
# no `mri_date`s at all => "Not Scanned"
all(is.na(data$mri_date)) ~ "Not Scanned",
# 1 visit
### NL
data_nrow == 1 && data$uds_dx[data_nrow] == "Normal" ~
paste(
"Schedule next scan ~",
# as.character(as_date(data$exam_date[data_nrow]) %m+% months(23))),
as.character(as_date(data$mri_date[mri_max_row]) %m+% months(23))),
### MCI
data_nrow == 1 && data$uds_dx[data_nrow] == "MCI" ~
paste(
"Schedule next scan ~",
# as.character(as_date(data$exam_date[data_nrow]) %m+% months(11))),
as.character(as_date(data$mri_date[mri_max_row]) %m+% months(11))),
### Dem
data_nrow == 1 && data$uds_dx[data_nrow] == "Dementia" ~
"Dementia Dx: Stop Scanning",
# 2 visits
### NL + NL
data_nrow > 1 &&
data$uds_dx[data_nrow-1] == "Normal" &&
data$uds_dx[data_nrow] == "Normal" ~
paste(
"Schedule next scan ~",
# as.character(as_date(data$exam_date[data_nrow-1]) %m+% months(23))),
as.character(as_date(data$mri_date[mri_max_row]) %m+% months(23))),
### MCI + NL
data_nrow > 1 &&
data$uds_dx[data_nrow-1] == "MCI" &&
data$uds_dx[data_nrow] == "Normal" ~
paste(
"Schedule next scan ~",
# as.character(as_date(data$exam_date[data_nrow]) %m+% months(11))),
as.character(as_date(data$mri_date[mri_max_row]) %m+% months(11))),
### XXX + MCI
data_nrow > 1 &&
data$uds_dx[data_nrow] == "MCI" ~
paste(
"Schedule next scan ~",
# as.character(as_date(data$exam_date[data_nrow]) %m+% months(11))),
as.character(as_date(data$mri_date[mri_max_row]) %m+% months(11))),
### XXX + Dem
data_nrow > 1 &&
data$uds_dx[data_nrow] == "Dementia" ~
"Dementia Dx: Stop Scanning",
### MCI + `NA`
data_nrow > 1 &&
data$uds_dx[data_nrow-1] == "MCI" &&
is.na(data$uds_dx[data_nrow]) ~
paste(
"Schedule next scan ~",
# as.character(as_date(data$exam_date[data_nrow-1]) %m+% months(11))),
as.character(as_date(data$mri_date[mri_max_row]) %m+% months(11))),
### NL + `NA`
data_nrow > 1 &&
data$uds_dx[data_nrow-1] == "Normal" &&
is.na(data$uds_dx[data_nrow]) ~
paste(
"Schedule next scan ~",
# as.character(as_date(data$exam_date[data_nrow-1]) %m+% months(23))),
as.character(as_date(data$mri_date[mri_max_row]) %m+% months(23))),
# catch-all
TRUE ~ NA_character_
)) %>%
ungroup()
)
# _ Unnest nested data and reshape/clean for easier digestion ----
df_ug_mri_unnest <- df_ug_mri_nest_mut %>%
select(-data_nrow, -mri_max_row, -dx_max_row) %>%
tidyr::unnest(data) %>%
calculate_visit_num(subject_id, exam_date) %>%
tidyr::gather(-subject_id, -visit_num, -sex, -race,
key = "key", value = "value") %>%
tidyr::unite("key__visit_num", key, visit_num, sep = "__") %>%
tidyr::spread(key = key__visit_num, value = value) %>%
mutate(recent_dx =
coalesce(
!!!syms(
tidyselect::vars_select(names(.), starts_with("recent_dx__"))
)
)
) %>%
mutate(mri_action =
coalesce(
!!!syms(
tidyselect::vars_select(names(.), starts_with("mri_action__"))
)
)
) %>%
select(-starts_with("recent_dx__")) %>%
select(-starts_with("mri_action__")) %>%
select(subject_id, mri_action, recent_dx, everything())
# _ Calculate MRI priority score
df_ug_mri_unnest <- df_ug_mri_unnest %>%
mutate(mri_priority = 0L) %>%
mutate(mri_priority = if_else(mri_action != "Ineligible",
mri_priority + 1L, mri_priority)) %>%
mutate(mri_priority = if_else(sex == "Male",
mri_priority + 1L, mri_priority)) %>%
mutate(mri_priority = if_else(race == "African American",
mri_priority + 1L, mri_priority)) %>%
mutate(mri_priority = if_else(mri_action == "Not Scanned",
mri_priority + 1L, mri_priority)) %>%
mutate(mri_priority = if_else(mri_action == "Dementia Dx: Stop Scanning",
0L, mri_priority)) %>%
mutate(mri_priority = if_else(mri_action == "Ineligible",
0L, mri_priority)) %>%
mutate(mri_priority = if_else(mri_action == "Milestoned",
0L, mri_priority)) %>%
select(subject_id, mri_action, mri_priority, everything())
# Write CSV ----
cat(cyan("Writing CSV file...\n"))
readr::write_csv(df_ug_mri_unnest,
paste0("~/Box/Documents/",
"MADC_gen/MRI_schedule_report/",
"MRI_Schedule_Report.csv"),
na = "")
cat(magenta("\nDone.\n\n"))
###@ #==-- : --==# @##==---==##@##==---==##@ #==-- : --==# @###
#==##@ #==-- --==# @##==---==##@ @##==---==##@ #==-- --==# @##==#
#--==##@ #==-==# @##==---==##@ # @##==---==##@ #==-==# @##==--#
#=---==##@ #=# @##==---==##@ #=# @##==---==##@ #=# @##==---=#
##==---==##@ # @##==---==##@ #==-==# @##==---==##@ # @##==---==##
#@##==---==##@ @##==---==##@ #==-- --==# @##==---==##@ @##==---==##@#
# @##==---==##@##==---==##@ EXTRA : SPACE @##==---==##@##==---==##@ #
#@##==---==##@ @##==---==##@ #==-- --==# @##==---==##@ @##==---==##@#
##==---==##@ # @##==---==##@ #==-==# @##==---==##@ # @##==---==##
#=---==##@ #=# @##==---==##@ #=# @##==---==##@ #=# @##==---=#
#--==##@ #==-==# @##==---==##@ # @##==---==##@ #==-==# @##==--#
#==##@ #==-- --==# @##==---==##@ @##==---==##@ #==-- --==# @##==#
###@ #==-- : --==# @##==---==##@##==---==##@ #==-- : --==# @###
#==##@ #==-- --==# @##==---==##@ @##==---==##@ #==-- --==# @##==#
#--==##@ #==-==# @##==---==##@ # @##==---==##@ #==-==# @##==--#
#=---==##@ #=# @##==---==##@ #=# @##==---==##@ #=# @##==---=#
##==---==##@ # @##==---==##@ #==-==# @##==---==##@ # @##==---==##
#@##==---==##@ @##==---==##@ #==-- --==# @##==---==##@ @##==---==##@#
# @##==---==##@##==---==##@ EXTRA : SPACE @##==---==##@##==---==##@ #
#@##==---==##@ @##==---==##@ #==-- --==# @##==---==##@ @##==---==##@#
##==---==##@ # @##==---==##@ #==-==# @##==---==##@ # @##==---==##
#=---==##@ #=# @##==---==##@ #=# @##==---==##@ #=# @##==---=#
#--==##@ #==-==# @##==---==##@ # @##==---==##@ #==-==# @##==--#
#==##@ #==-- --==# @##==---==##@ @##==---==##@ #==-- --==# @##==#
###@ #==-- : --==# @##==---==##@##==---==##@ #==-- : --==# @### | /MRI_schedule_report.R | no_license | MichiganADC/MRI_schedule_report | R | false | false | 15,205 | r | #!/usr/bin/env Rscript
# Load useful libraries ----
suppressMessages( library(crayon) )
cat(cyan("Loading packages...\n"))
suppressMessages( library(dplyr) )
suppressMessages( library(readr) )
suppressMessages( library(rlang) )
suppressMessages( library(readxl) )
suppressMessages( library(stringr) )
suppressMessages( library(lubridate) )
# Helpful globals and functions ----
cat(cyan("Loading globals and helpers...\n"))
source("~/Box/Documents/R_helpers/config.R")
source("~/Box/Documents/R_helpers/helpers.R")
# Get Data ----
# _ UMMAP General ----
cat(cyan("Getting MRI data from UMMAP General...\n"))
fields_ug_mri_raw <-
c(
"subject_id"
, "exam_date"
# Get from MiNDSet and not UM MAP General
# , "sex_value"
# , "race_value"
, "mri_date"
, "uds_dx"
, "mri_elig_consent"
, "mri_elig_safety_screen"
, "mri_elig_yn"
, "scan_05_func_rest_motion" # Scan 5: func rest % Good Values
)
fields_ug_mri <- fields_ug_mri_raw %>% paste(collapse = ",")
json_ug_mri <-
export_redcap_records(
token = REDCAP_API_TOKEN_UMMAP_GEN,
fields = fields_ug_mri,
# vp = TRUE,
vp = FALSE,
# Filter for UMMAP IDs during UMMAP period
filterLogic = paste0("(",
"[subject_id] >= 'UM00000001'",
" AND ",
"[subject_id] <= 'UM00009999'",
# Remove exam_date filtering here
# because some MRI Eligibility forms
# completed before 2017
# " AND ",
# "[exam_date] >= '2017-03-01'",
")"))
df_ug_mri <- jsonlite::fromJSON(json_ug_mri) %>% as_tibble() %>% na_if("")
# - MiNDSet
cat(cyan("Getting Demographic data from MiNDSet...\n"))
fields_ms_mri_raw <-
c(
"subject_id"
, "sex_value"
, "race_value"
)
fields_ms_mri <- fields_ms_mri_raw %>% paste(collapse = ",")
json_ms_mri <-
export_redcap_records(
token = REDCAP_API_TOKEN_MINDSET,
fields = fields_ms_mri,
# vp = TRUE,
vp = FALSE,
# Filter for UMMAP IDs during UMMAP period
filterLogic = paste0("(",
"[subject_id] >= 'UM00000001'",
" AND ",
"[subject_id] <= 'UM00009999'",
# Remove exam_date filtering here
# because some MRI Eligibility forms
# completed before 2017
# " AND ",
# "[exam_date] >= '2017-03-01'",
")"))
df_ms_mri <- jsonlite::fromJSON(json_ms_mri) %>% as_tibble() %>% na_if("")
# _ UMMAP UDS3
fields_u3_raw <-
c(
"ptid"
, "note_mlstn_type"
, "protocol"
)
fields_u3 <- fields_u3_raw %>% paste(collapse = ",")
json_u3 <-
export_redcap_records(
token = REDCAP_API_TOKEN_UDS3n,
fields = fields_u3,
# vp = TRUE,
vp = FALSE,
# Filter for UMMAP IDs during UMMAP period
filterLogic = paste0("(",
"[ptid] >= 'UM00000001'",
" AND ",
"[ptid] <= 'UM00009999'",
" AND ",
"[note_mlstn_type] != ''",
")"))
df_u3 <- jsonlite::fromJSON(json_u3) %>% as_tibble() %>% na_if("")
# Clean Data ----
cat(cyan("Cleaning raw data...\n"))
# _ UMMAP General ----
cat(cyan("Getting list of MRI-ineligible participants...\n"))
# Get MRI-ineligible IDs from UMMAP General's `mri_elig_yn == 0`
df_inelig_ug <- df_ug_mri %>%
# Also need to retain when consent == 0, changed and to or
filter(!is.na(mri_elig_consent) |
!is.na(mri_elig_safety_screen),
mri_elig_yn == '0') %>%
select(subject_id, mri_elig_yn)
inelig_ids_ug <- df_inelig_ug %>% pull(subject_id) %>% sort() %>% unique()
# _ _ Define different Dx codes ----
nl_codes <- c(26, 17) # 17 = Depression
mci_codes <- c(1, 2, 27, 28, 29, 31, 34) # 29 = ImpNoMCI
dem_codes <- c(3, 4, 5, 6, 9, 10, 11, 12, 13, 35)
df_ug_mri_cln <- df_ug_mri %>%
# Keep only UM IDs
filter(str_detect(subject_id, "^UM\\d{8}$")) %>%
# Keep only UM IDs associated with UM MAP range
filter(subject_id >= "UM00000543") %>%
# Keep only participant-visit records with visit dates
filter(!is.na(exam_date)) %>%
arrange(subject_id, exam_date) %>%
# select(subject_id, -redcap_event_name, exam_date, mri_date, uds_dx) %>%
select(subject_id, -redcap_event_name, exam_date,
# sex_value, race_value,
mri_date, uds_dx,
pct_good_val = scan_05_func_rest_motion) %>%
# mutate `uds_dx` codes to English
mutate(uds_dx = case_when(
uds_dx %in% mci_codes ~ "MCI",
uds_dx %in% dem_codes ~ "Dementia",
uds_dx %in% nl_codes ~ "Normal",
!is.na(uds_dx) ~ "Other",
TRUE ~ NA_character_
)) %>%
# Clean out record that has double-assigned UM MAP ID :(
filter(!(subject_id == "UM00001353" & exam_date == "2017-05-01")) %>%
# Filter out exam date here
filter(exam_date >= '2017-03-01')
# Merge in sex_value and race_value from MiNDSet
df_ug_mri_cln <- left_join(df_ug_mri_cln,
df_ms_mri,
by = "subject_id") %>%
# rename `sex_value` and `race_value`
rename(sex = sex_value, race = race_value) %>%
# mutate `sex`
mutate(sex = case_when(
sex == 1 ~ "Male",
sex == 2 ~ "Female",
TRUE ~ NA_character_
)) %>%
# mutate `race`
mutate(race = case_when(
race == 1 ~ "White",
race == 2 ~ "African American",
race == 3 ~ "Asian",
race == 4 ~ "Hispanic",
race == 5 ~ "Other",
race == 6 ~ "Unknown",
TRUE ~ NA_character_
))
# _ _ UMMAP UDS3 ----
cat(cyan("Getting list of milestoned participants...\n"))
# Get milestoned IDs from UMMAP UDS3's `note_mlstn_type` and `protocol`
df_mlstn_u3 <- df_u3 %>%
# Keep only UM IDs
filter(str_detect(ptid, "^UM\\d{8}$")) %>%
# Keep only the latest Milestone form data
group_by(ptid) %>%
filter(redcap_event_name == max(redcap_event_name)) %>%
ungroup() %>%
filter(note_mlstn_type == 0 |
(note_mlstn_type == 1 & protocol == 1) |
(note_mlstn_type == 1 & protocol == 2))
mlstn_ids_u3 <- df_mlstn_u3 %>% pull(ptid) %>% sort() %>% unique()
# Since ineligible is getting more IDs now, need to override if ID has been
# milestoned (i.e., milestoned takes priority)
df_mlstn_ids_u3 <- tibble(mlstn_ids_u3) %>%
rename(subject_id = mlstn_ids_u3) %>%
mutate(mlstn = 1)
df_inelig_ids_ug <- tibble(inelig_ids_ug) %>%
rename(subject_id = inelig_ids_ug) %>%
mutate(inelig = 1)
df_mlstn_inelig_ids <- full_join(df_mlstn_ids_u3, df_inelig_ids_ug,
by = "subject_id")
inelig_ids_ug <- df_mlstn_inelig_ids %>%
filter(inelig == 1,
is.na(mlstn)) %>%
pull(subject_id) %>%
sort() %>%
unique()
mlstn_ids_u3 <- df_mlstn_inelig_ids %>%
filter(mlstn == 1) %>%
pull(subject_id) %>%
sort() %>%
unique()
# Process Data ----
cat(cyan("Processing data...\n"))
# _ Nest all but `subject_id` as df ----
# (i.e., nest `exam_date`, `mri_date`, `uds_dx`)
df_ug_mri_nest <- df_ug_mri_cln %>%
tidyr::nest(data = -c(subject_id))
# _ Derive `mri_action` based on data in nested df (`data`) ----
suppressWarnings(
df_ug_mri_nest_mut <- df_ug_mri_nest %>%
rowwise() %>%
mutate(data_nrow = nrow(data)) %>%
mutate(dx_max_row = case_when(
all(is.na(data$uds_dx)) ~ NA_integer_,
any(!is.na(data$uds_dx)) ~
# Last non-NA value in `data$uds_dx` column
length(data$uds_dx[!is.na(data$uds_dx)])
)) %>%
mutate(mri_max_row = case_when(
all(is.na(data$mri_date)) ~ NA_integer_,
any(!is.na(data$mri_date)) ~
as.integer(
max(seq_len(nrow(data))[!is.na(data$mri_date)], na.rm = TRUE)
),
TRUE ~ NA_integer_
)) %>%
mutate(recent_dx = data$uds_dx[dx_max_row]) %>%
mutate(mri_action = case_when(
# MRI-ineligible participants
subject_id %in% inelig_ids_ug ~ "Ineligible",
# Milestoned participants
subject_id %in% mlstn_ids_u3 ~ "Milestoned",
# no `mri_date`s at all => "Not Scanned"
all(is.na(data$mri_date)) ~ "Not Scanned",
# 1 visit
### NL
data_nrow == 1 && data$uds_dx[data_nrow] == "Normal" ~
paste(
"Schedule next scan ~",
# as.character(as_date(data$exam_date[data_nrow]) %m+% months(23))),
as.character(as_date(data$mri_date[mri_max_row]) %m+% months(23))),
### MCI
data_nrow == 1 && data$uds_dx[data_nrow] == "MCI" ~
paste(
"Schedule next scan ~",
# as.character(as_date(data$exam_date[data_nrow]) %m+% months(11))),
as.character(as_date(data$mri_date[mri_max_row]) %m+% months(11))),
### Dem
data_nrow == 1 && data$uds_dx[data_nrow] == "Dementia" ~
"Dementia Dx: Stop Scanning",
# 2 visits
### NL + NL
data_nrow > 1 &&
data$uds_dx[data_nrow-1] == "Normal" &&
data$uds_dx[data_nrow] == "Normal" ~
paste(
"Schedule next scan ~",
# as.character(as_date(data$exam_date[data_nrow-1]) %m+% months(23))),
as.character(as_date(data$mri_date[mri_max_row]) %m+% months(23))),
### MCI + NL
data_nrow > 1 &&
data$uds_dx[data_nrow-1] == "MCI" &&
data$uds_dx[data_nrow] == "Normal" ~
paste(
"Schedule next scan ~",
# as.character(as_date(data$exam_date[data_nrow]) %m+% months(11))),
as.character(as_date(data$mri_date[mri_max_row]) %m+% months(11))),
### XXX + MCI
data_nrow > 1 &&
data$uds_dx[data_nrow] == "MCI" ~
paste(
"Schedule next scan ~",
# as.character(as_date(data$exam_date[data_nrow]) %m+% months(11))),
as.character(as_date(data$mri_date[mri_max_row]) %m+% months(11))),
### XXX + Dem
data_nrow > 1 &&
data$uds_dx[data_nrow] == "Dementia" ~
"Dementia Dx: Stop Scanning",
### MCI + `NA`
data_nrow > 1 &&
data$uds_dx[data_nrow-1] == "MCI" &&
is.na(data$uds_dx[data_nrow]) ~
paste(
"Schedule next scan ~",
# as.character(as_date(data$exam_date[data_nrow-1]) %m+% months(11))),
as.character(as_date(data$mri_date[mri_max_row]) %m+% months(11))),
### NL + `NA`
data_nrow > 1 &&
data$uds_dx[data_nrow-1] == "Normal" &&
is.na(data$uds_dx[data_nrow]) ~
paste(
"Schedule next scan ~",
# as.character(as_date(data$exam_date[data_nrow-1]) %m+% months(23))),
as.character(as_date(data$mri_date[mri_max_row]) %m+% months(23))),
# catch-all
TRUE ~ NA_character_
)) %>%
ungroup()
)
# _ Unnest nested data and reshape/clean for easier digestion ----
df_ug_mri_unnest <- df_ug_mri_nest_mut %>%
select(-data_nrow, -mri_max_row, -dx_max_row) %>%
tidyr::unnest(data) %>%
calculate_visit_num(subject_id, exam_date) %>%
tidyr::gather(-subject_id, -visit_num, -sex, -race,
key = "key", value = "value") %>%
tidyr::unite("key__visit_num", key, visit_num, sep = "__") %>%
tidyr::spread(key = key__visit_num, value = value) %>%
mutate(recent_dx =
coalesce(
!!!syms(
tidyselect::vars_select(names(.), starts_with("recent_dx__"))
)
)
) %>%
mutate(mri_action =
coalesce(
!!!syms(
tidyselect::vars_select(names(.), starts_with("mri_action__"))
)
)
) %>%
select(-starts_with("recent_dx__")) %>%
select(-starts_with("mri_action__")) %>%
select(subject_id, mri_action, recent_dx, everything())
# _ Calculate MRI priority score
df_ug_mri_unnest <- df_ug_mri_unnest %>%
mutate(mri_priority = 0L) %>%
mutate(mri_priority = if_else(mri_action != "Ineligible",
mri_priority + 1L, mri_priority)) %>%
mutate(mri_priority = if_else(sex == "Male",
mri_priority + 1L, mri_priority)) %>%
mutate(mri_priority = if_else(race == "African American",
mri_priority + 1L, mri_priority)) %>%
mutate(mri_priority = if_else(mri_action == "Not Scanned",
mri_priority + 1L, mri_priority)) %>%
mutate(mri_priority = if_else(mri_action == "Dementia Dx: Stop Scanning",
0L, mri_priority)) %>%
mutate(mri_priority = if_else(mri_action == "Ineligible",
0L, mri_priority)) %>%
mutate(mri_priority = if_else(mri_action == "Milestoned",
0L, mri_priority)) %>%
select(subject_id, mri_action, mri_priority, everything())
# Write CSV ----
cat(cyan("Writing CSV file...\n"))
readr::write_csv(df_ug_mri_unnest,
paste0("~/Box/Documents/",
"MADC_gen/MRI_schedule_report/",
"MRI_Schedule_Report.csv"),
na = "")
cat(magenta("\nDone.\n\n"))
###@ #==-- : --==# @##==---==##@##==---==##@ #==-- : --==# @###
#==##@ #==-- --==# @##==---==##@ @##==---==##@ #==-- --==# @##==#
#--==##@ #==-==# @##==---==##@ # @##==---==##@ #==-==# @##==--#
#=---==##@ #=# @##==---==##@ #=# @##==---==##@ #=# @##==---=#
##==---==##@ # @##==---==##@ #==-==# @##==---==##@ # @##==---==##
#@##==---==##@ @##==---==##@ #==-- --==# @##==---==##@ @##==---==##@#
# @##==---==##@##==---==##@ EXTRA : SPACE @##==---==##@##==---==##@ #
#@##==---==##@ @##==---==##@ #==-- --==# @##==---==##@ @##==---==##@#
##==---==##@ # @##==---==##@ #==-==# @##==---==##@ # @##==---==##
#=---==##@ #=# @##==---==##@ #=# @##==---==##@ #=# @##==---=#
#--==##@ #==-==# @##==---==##@ # @##==---==##@ #==-==# @##==--#
#==##@ #==-- --==# @##==---==##@ @##==---==##@ #==-- --==# @##==#
###@ #==-- : --==# @##==---==##@##==---==##@ #==-- : --==# @###
#==##@ #==-- --==# @##==---==##@ @##==---==##@ #==-- --==# @##==#
#--==##@ #==-==# @##==---==##@ # @##==---==##@ #==-==# @##==--#
#=---==##@ #=# @##==---==##@ #=# @##==---==##@ #=# @##==---=#
##==---==##@ # @##==---==##@ #==-==# @##==---==##@ # @##==---==##
#@##==---==##@ @##==---==##@ #==-- --==# @##==---==##@ @##==---==##@#
# @##==---==##@##==---==##@ EXTRA : SPACE @##==---==##@##==---==##@ #
#@##==---==##@ @##==---==##@ #==-- --==# @##==---==##@ @##==---==##@#
##==---==##@ # @##==---==##@ #==-==# @##==---==##@ # @##==---==##
#=---==##@ #=# @##==---==##@ #=# @##==---==##@ #=# @##==---=#
#--==##@ #==-==# @##==---==##@ # @##==---==##@ #==-==# @##==--#
#==##@ #==-- --==# @##==---==##@ @##==---==##@ #==-- --==# @##==#
###@ #==-- : --==# @##==---==##@##==---==##@ #==-- : --==# @### |
library(glmnet)
mydata = read.table("./TrainingSet/AvgRank/upper_aerodigestive_tract.csv",head=T,sep=",")
x = as.matrix(mydata[,4:ncol(mydata)])
y = as.matrix(mydata[,1])
set.seed(123)
glm = cv.glmnet(x,y,nfolds=10,type.measure="mse",alpha=0.15,family="gaussian",standardize=TRUE)
sink('./Model/EN/AvgRank/upper_aerodigestive_tract/upper_aerodigestive_tract_029.txt',append=TRUE)
print(glm$glmnet.fit)
sink()
| /Model/EN/AvgRank/upper_aerodigestive_tract/upper_aerodigestive_tract_029.R | no_license | leon1003/QSMART | R | false | false | 409 | r | library(glmnet)
mydata = read.table("./TrainingSet/AvgRank/upper_aerodigestive_tract.csv",head=T,sep=",")
x = as.matrix(mydata[,4:ncol(mydata)])
y = as.matrix(mydata[,1])
set.seed(123)
glm = cv.glmnet(x,y,nfolds=10,type.measure="mse",alpha=0.15,family="gaussian",standardize=TRUE)
sink('./Model/EN/AvgRank/upper_aerodigestive_tract/upper_aerodigestive_tract_029.txt',append=TRUE)
print(glm$glmnet.fit)
sink()
|
install.packages ("coala")
install.packages ("phytools")
library (coala)
library (phytools)
model <- coal_model (sample_size = 5, loci_number = 10, loci_length = 500, ploidy = 2) +
feat_mutation (10) +
feat_recombination (10) +
sumstat_trees () +
sumstat_nucleotide_div ()
stats <- simulate (model, nsim = 1)
Diversity <- stats$pi
Nloci <- length (stats$trees)
t1 <- read.tree (text=stats$trees [[1]][1])
plot (t1)
axisPhylo()
Age1 <- max (nodeHeights (t1))
t2 <- read.tree (text=stats$trees [[2]][1])
plot (t2)
axisPhylo ()
par (mfrow=c(1,2))
plot (t1)
axisPhylo ()
plot (t2)
axisPhylo ()
compare.chronograms (t1, t2)
t1_1 <- read.tree (text=stats$trees [[1]][1])
t1_2 <- read.tree (text=stats$trees [[1]][2])
compare.chronograms (t1_1, t1_2)
for (locus in 1:Nloci) {
ntrees <- length (stats$trees [[locus]])
for (n in 1:ntrees) {
if (locus == 1 && n ==1) {
outPhy <- read.tree (text=stats$trees [[locus]][n])
}
else {
outPhy <- ape:::c.phylo (outPhy, read.tree (text=stats$trees [[locus]] [n]))
}
}
}
par (mfrow = c(1,1))
densityTree (outPhy)
model3 <- coal_model (10, 50) +
feat_mutation (par_prior ("theta", sample.int (100, 1))) +
sumstat_nucleotide_div ()
stats <- simulate (model3, nsim = 40)
mean_pi <- sapply (stats, function (x) mean (x$pi))
theta <- sapply (stats, function (x) x$pars [["theta"]])
| /Task_02/task02.r | no_license | kenziecook/Tasks | R | false | false | 1,355 | r | install.packages ("coala")
install.packages ("phytools")
library (coala)
library (phytools)
model <- coal_model (sample_size = 5, loci_number = 10, loci_length = 500, ploidy = 2) +
feat_mutation (10) +
feat_recombination (10) +
sumstat_trees () +
sumstat_nucleotide_div ()
stats <- simulate (model, nsim = 1)
Diversity <- stats$pi
Nloci <- length (stats$trees)
t1 <- read.tree (text=stats$trees [[1]][1])
plot (t1)
axisPhylo()
Age1 <- max (nodeHeights (t1))
t2 <- read.tree (text=stats$trees [[2]][1])
plot (t2)
axisPhylo ()
par (mfrow=c(1,2))
plot (t1)
axisPhylo ()
plot (t2)
axisPhylo ()
compare.chronograms (t1, t2)
t1_1 <- read.tree (text=stats$trees [[1]][1])
t1_2 <- read.tree (text=stats$trees [[1]][2])
compare.chronograms (t1_1, t1_2)
for (locus in 1:Nloci) {
ntrees <- length (stats$trees [[locus]])
for (n in 1:ntrees) {
if (locus == 1 && n ==1) {
outPhy <- read.tree (text=stats$trees [[locus]][n])
}
else {
outPhy <- ape:::c.phylo (outPhy, read.tree (text=stats$trees [[locus]] [n]))
}
}
}
par (mfrow = c(1,1))
densityTree (outPhy)
model3 <- coal_model (10, 50) +
feat_mutation (par_prior ("theta", sample.int (100, 1))) +
sumstat_nucleotide_div ()
stats <- simulate (model3, nsim = 40)
mean_pi <- sapply (stats, function (x) mean (x$pi))
theta <- sapply (stats, function (x) x$pars [["theta"]])
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/copy_file.R
\name{copy_files}
\alias{copy_files}
\title{copy multiple files from a windows directory}
\usage{
copy_files(source_files, dest_dir, overwrite = FALSE, timestamp = FALSE)
}
\arguments{
\item{source_files}{\code{data.frame} with file_path and file_name of the object to copy}
\item{dest_dir}{string indicating the directory to paste copied file to}
\item{overwrite}{logical indicating whether existing files in destination directory should be overwritten - default FALSE results in no copy}
\item{timestamp}{logical indicating whether you'd like to timestamp the copied file}
}
\description{
copy multiple files from a windows directory
}
| /man/copy_files.Rd | no_license | ces0491/fileR | R | false | true | 731 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/copy_file.R
\name{copy_files}
\alias{copy_files}
\title{copy multiple files from a windows directory}
\usage{
copy_files(source_files, dest_dir, overwrite = FALSE, timestamp = FALSE)
}
\arguments{
\item{source_files}{\code{data.frame} with file_path and file_name of the object to copy}
\item{dest_dir}{string indicating the directory to paste copied file to}
\item{overwrite}{logical indicating whether existing files in destination directory should be overwritten - default FALSE results in no copy}
\item{timestamp}{logical indicating whether you'd like to timestamp the copied file}
}
\description{
copy multiple files from a windows directory
}
|
#data1 = read.csv("C:\\Users\\Abhinav Raj\\Downloads\\Data sets\\breast-cancer-wisconsin.data",header = TRUE)
#-------------------------Reading data--------------------------------------------------------------------
x=data.matrix(iris[-c(1:50),])
y=x[sample(100,replace=FALSE),]
train=y[1:80,]
test=y[81:100,]
knn<-function(x,y,k){
test_label=c()
#--------------------- function will take train data,test data & k and will give the accuracy-----------
for (i in 1:dim(y)[1]) {
z=matrix( rep(y[i,],dim(x)[1]),dim(x)[1],byrow=T )
d_vec=sqrt(rowSums((x[,-dim(x)[2]]-z[,-dim(z)[2]])^2)) #distance calculated
n_vec=order(d_vec)[1:k] #row index of nearest k training data sets
#-----------------------if k=1 then we will get a vector which needs to converted into matrix---------
if (k==1){
compare_matrix=t(as.matrix(x[n_vec,]))
}#END:if
else{
compare_matrix= x[n_vec,]#the marix of k neareast training set data
}#END:else
#--------------------------------- got compare_matrix ----------------------------------------------
tbl=table(compare_matrix[,dim(x)[2]])#making table of the species/labels
sort_table=sort(tbl)#sorted table
final_label=names(sort_table[dim(tbl)])#final label for test data
test_label[i]=as.numeric(final_label)
}#END:for
original_label= y[,dim(y)[2]]
accuracy= (sum(original_label == test_label)/dim(y)[1])
#------------------------------------confusion matrix-----------------------------------------------------
confusion_matrix=as.matrix(table(original_label,test_label))
TP=confusion_matrix[1,1]
FN=confusion_matrix[1,2]
FP=confusion_matrix[2,1]
TN=confusion_matrix[2,2]
MCC= ((TP*TN)-(FP*FN))/(sqrt((TP+FP)*(TP+FN)*(TN+FP)*(TN+FN)))
return(MCC)
#return(confusion_matrix)
}#END:function
#--------------------plot between MCC and various K--------------------------------------------------------
aa=seq(1,19,by=2)
bb=c()
for(i in 1:length(aa)){
bb[i]=knn(train,test,aa[i])
}
plot(aa,bb,type='b',pch=20,xlab="K",ylab="MCC")
| /knnWmcc.r | no_license | abhinavraj12345/Machine-Learning-R | R | false | false | 2,053 | r | #data1 = read.csv("C:\\Users\\Abhinav Raj\\Downloads\\Data sets\\breast-cancer-wisconsin.data",header = TRUE)
#-------------------------Reading data--------------------------------------------------------------------
x=data.matrix(iris[-c(1:50),])
y=x[sample(100,replace=FALSE),]
train=y[1:80,]
test=y[81:100,]
knn<-function(x,y,k){
test_label=c()
#--------------------- function will take train data,test data & k and will give the accuracy-----------
for (i in 1:dim(y)[1]) {
z=matrix( rep(y[i,],dim(x)[1]),dim(x)[1],byrow=T )
d_vec=sqrt(rowSums((x[,-dim(x)[2]]-z[,-dim(z)[2]])^2)) #distance calculated
n_vec=order(d_vec)[1:k] #row index of nearest k training data sets
#-----------------------if k=1 then we will get a vector which needs to converted into matrix---------
if (k==1){
compare_matrix=t(as.matrix(x[n_vec,]))
}#END:if
else{
compare_matrix= x[n_vec,]#the marix of k neareast training set data
}#END:else
#--------------------------------- got compare_matrix ----------------------------------------------
tbl=table(compare_matrix[,dim(x)[2]])#making table of the species/labels
sort_table=sort(tbl)#sorted table
final_label=names(sort_table[dim(tbl)])#final label for test data
test_label[i]=as.numeric(final_label)
}#END:for
original_label= y[,dim(y)[2]]
accuracy= (sum(original_label == test_label)/dim(y)[1])
#------------------------------------confusion matrix-----------------------------------------------------
confusion_matrix=as.matrix(table(original_label,test_label))
TP=confusion_matrix[1,1]
FN=confusion_matrix[1,2]
FP=confusion_matrix[2,1]
TN=confusion_matrix[2,2]
MCC= ((TP*TN)-(FP*FN))/(sqrt((TP+FP)*(TP+FN)*(TN+FP)*(TN+FN)))
return(MCC)
#return(confusion_matrix)
}#END:function
#--------------------plot between MCC and various K--------------------------------------------------------
aa=seq(1,19,by=2)
bb=c()
for(i in 1:length(aa)){
bb[i]=knn(train,test,aa[i])
}
plot(aa,bb,type='b',pch=20,xlab="K",ylab="MCC")
|
# 学生テーブル読み込み
students <- read.csv("sampledata_student.csv", header = T, stringsAsFactors = F)
# 試験成績テーブル読み込み
kokugo <- read.csv("sampledata_KOKUGO.csv", header = T, stringsAsFactors = F)
suugaku <- read.csv("sampledata_SUUGAKU.csv", header = T, stringsAsFactors = F)
shakai <- read.csv("sampledata_SHAKAI.csv", header = T, stringsAsFactors = F)
rika <- read.csv("sampledata_RIKA.csv", header = T, stringsAsFactors = F)
eigo <- read.csv("sampledata_EIGO.csv", header = T, stringsAsFactors = F)
ongaku <- read.csv("sampledata_ONGAKU.csv", header = T, stringsAsFactors = F)
bijutsu <- read.csv("sampledata_BIJUTSU.csv", header = T, stringsAsFactors = F)
hokentaiiku <- read.csv("sampledata_HOKENTAIIKU.csv", header = T, stringsAsFactors = F)
gijutsukatei <- read.csv("sampledata_GIJUTSUKATEI.csv", header = T, stringsAsFactors = F)
# 試験成績をひたすら学生テーブルと結合していく
students_scores <- merge(students, kokugo[,c("student_id", "KOKUGO")], by=c("student_id"), all.x=T)
students_scores <- merge(students_scores, suugaku[,c("student_id", "SUUGAKU")], by=c("student_id"), all.x=T)
students_scores <- merge(students_scores, shakai[,c("student_id", "SHAKAI")], by=c("student_id"), all.x=T)
students_scores <- merge(students_scores, rika[,c("student_id", "RIKA")], by=c("student_id"), all.x=T)
students_scores <- merge(students_scores, eigo[,c("student_id", "EIGO")], by=c("student_id"), all.x=T)
students_scores <- merge(students_scores, ongaku[,c("student_id", "ONGAKU")], by=c("student_id"), all.x=T)
students_scores <- merge(students_scores, bijutsu[,c("student_id", "BIJUTSU")], by=c("student_id"), all.x=T)
students_scores <- merge(students_scores, hokentaiiku[,c("student_id", "HOKENTAIIKU")], by=c("student_id"), all.x=T)
students_scores <- merge(students_scores, gijutsukatei[,c("student_id", "GIJUTSUKATEI")], by=c("student_id"), all.x=T)
students_scores
# 試験成績のカラムのみ抽出
score <- students_scores[,6:14]
score
# k-meansクラスタリング処理
cluster_count <- 5
km <- kmeans(score, cluster_count, iter.max = 200)
km
km$cluster
km$centers
write.table(km$centers, file = "clustered_means.csv", sep = ",", row.names=F)
# クラスタ番号と学生データを対応付け
clustered_student <- data.frame(students_scores[,1:3],km$cluster)
names(clustered_student) <- c("student_id", "name", "class", "cluster_no")
clustered_student_score <- data.frame(clustered_student, score)
write.table(clustered_student_score, file = "clustered_student_score.csv", sep = ",", row.names=F)
# クラスタ毎に試験成績の平均を計算。km$centersと一致。
library(dplyr)
cluster_mean <- clustered_student_score %>%
group_by(cluster_no) %>%
summarise(
KOKUGO = mean(KOKUGO),
SUGAKU = mean(SUUGAKU),
SHAKAI = mean(SHAKAI),
RIKA = mean(RIKA),
EIGO = mean(EIGO),
ONGAKU = mean(ONGAKU),
BIJUTSU = mean(BIJUTSU),
HOKENTAIIKU = mean(HOKENTAIIKU),
GIJUTSUKATEI = mean(GIJUTSUKATEI)
) %>%
as.data.frame
cluster_mean
| /k_means.R | no_license | kaznishi/r_sample | R | false | false | 3,105 | r | # 学生テーブル読み込み
students <- read.csv("sampledata_student.csv", header = T, stringsAsFactors = F)
# 試験成績テーブル読み込み
kokugo <- read.csv("sampledata_KOKUGO.csv", header = T, stringsAsFactors = F)
suugaku <- read.csv("sampledata_SUUGAKU.csv", header = T, stringsAsFactors = F)
shakai <- read.csv("sampledata_SHAKAI.csv", header = T, stringsAsFactors = F)
rika <- read.csv("sampledata_RIKA.csv", header = T, stringsAsFactors = F)
eigo <- read.csv("sampledata_EIGO.csv", header = T, stringsAsFactors = F)
ongaku <- read.csv("sampledata_ONGAKU.csv", header = T, stringsAsFactors = F)
bijutsu <- read.csv("sampledata_BIJUTSU.csv", header = T, stringsAsFactors = F)
hokentaiiku <- read.csv("sampledata_HOKENTAIIKU.csv", header = T, stringsAsFactors = F)
gijutsukatei <- read.csv("sampledata_GIJUTSUKATEI.csv", header = T, stringsAsFactors = F)
# 試験成績をひたすら学生テーブルと結合していく
students_scores <- merge(students, kokugo[,c("student_id", "KOKUGO")], by=c("student_id"), all.x=T)
students_scores <- merge(students_scores, suugaku[,c("student_id", "SUUGAKU")], by=c("student_id"), all.x=T)
students_scores <- merge(students_scores, shakai[,c("student_id", "SHAKAI")], by=c("student_id"), all.x=T)
students_scores <- merge(students_scores, rika[,c("student_id", "RIKA")], by=c("student_id"), all.x=T)
students_scores <- merge(students_scores, eigo[,c("student_id", "EIGO")], by=c("student_id"), all.x=T)
students_scores <- merge(students_scores, ongaku[,c("student_id", "ONGAKU")], by=c("student_id"), all.x=T)
students_scores <- merge(students_scores, bijutsu[,c("student_id", "BIJUTSU")], by=c("student_id"), all.x=T)
students_scores <- merge(students_scores, hokentaiiku[,c("student_id", "HOKENTAIIKU")], by=c("student_id"), all.x=T)
students_scores <- merge(students_scores, gijutsukatei[,c("student_id", "GIJUTSUKATEI")], by=c("student_id"), all.x=T)
students_scores
# 試験成績のカラムのみ抽出
score <- students_scores[,6:14]
score
# k-meansクラスタリング処理
cluster_count <- 5
km <- kmeans(score, cluster_count, iter.max = 200)
km
km$cluster
km$centers
write.table(km$centers, file = "clustered_means.csv", sep = ",", row.names=F)
# クラスタ番号と学生データを対応付け
clustered_student <- data.frame(students_scores[,1:3],km$cluster)
names(clustered_student) <- c("student_id", "name", "class", "cluster_no")
clustered_student_score <- data.frame(clustered_student, score)
write.table(clustered_student_score, file = "clustered_student_score.csv", sep = ",", row.names=F)
# クラスタ毎に試験成績の平均を計算。km$centersと一致。
library(dplyr)
cluster_mean <- clustered_student_score %>%
group_by(cluster_no) %>%
summarise(
KOKUGO = mean(KOKUGO),
SUGAKU = mean(SUUGAKU),
SHAKAI = mean(SHAKAI),
RIKA = mean(RIKA),
EIGO = mean(EIGO),
ONGAKU = mean(ONGAKU),
BIJUTSU = mean(BIJUTSU),
HOKENTAIIKU = mean(HOKENTAIIKU),
GIJUTSUKATEI = mean(GIJUTSUKATEI)
) %>%
as.data.frame
cluster_mean
|
rm(list =ls())
library(readxl)
library(dplyr)
library(RWeka)
library(lubridate)
library(stringr)
library(qdapRegex)
# import all tweet data####
fin <- list.files(path = "data/", full.names = T)
ds.list <- lapply(fin, read_excel, col_names = F)
ds <- unique(do.call(rbind.data.frame, ds.list))
# change column names
names(ds) <- c("created","screenName", "text" , "ID", "map.info.A", "map.info.B")
# convert date
ds$created <- mdy_hm(ds$created)
tweets.clean <- ds$text
#convert longitude and latitude
temp <- str_extract(ds$map.info.B,"\\d.*(,)-\\d.*(&z=14)")
locations = strsplit(temp, ",")
ds$latitude <- as.numeric(unlist(lapply(locations, function(x)x[1])))
ds$longitude <- as.numeric(
unlist(
lapply(locations,
function(x)strsplit(x[2], "&z")[[1]][1])))
write.csv(ds, file="urtweets.csv")
# import emoticon table ####
emoticons <- read.csv("emoticon_conversion_noGraphic.csv", header = F)
# set colum names
names(emoticons) <- c("unicode", "bytes","description")
rm(ds.list)
rm(fin)
# remove invalid rows
ds <- ds[!is.na(ds$created),]
# get word frequencies and tokens
tokens <- WordTokenizer(ds$text)
# create an argument for Weka Control
# tokenize tweets
n.gram.options <- Weka_control(max = 4, min = 2)
ngram_2 <- NGramTokenizer(ds$text, n.gram.options)
t <- arrange(as.data.frame(table(tokens)), desc(Freq))
# search for all emoticons
# emoji frequency - rows: individual tweets
# emoji frequency - columns : emojis
emoji.frequency <- matrix(NA, nrow = nrow(ds), ncol = nrow(emoticons))
for(i in 1:nrow(emoticons)){
print(i)
emoji.frequency[,i] <- regexpr(emoticons$bytes[i],ds$text, useBytes = T )
# if the emoji is present in any tweet
# remove the emoji from the tweet for text/sentiment analysis
if(any(emoji.frequency[,i]>-1)){
tweets.clean <- gsub(emoticons$bytes[i], "", tweets.clean, useBytes = T)
}
}
# tabulate the number of times each emoji is seen
emoji.counts <- colSums(emoji.frequency>-1)
# append this to the emoticons data.frame
emoticons <- cbind(emoji.counts, emoticons)
# emoticons <- arrange(emoticons, desc(emoji.counts))
# get data set of all rows with emojis and identify the emoji type
# emoji.ds contains all tweet info with description of emoji found
emoji.per.tweet <- rowSums(emoji.frequency > -1)
emoji.indexes <- which( emoji.per.tweet > 0)
emoji.ds <- NULL
for(i in emoji.indexes){
valid.cols <- which(emoji.frequency[i,]>-1)
for(j in valid.cols){
emoji.ds <- rbind(cbind(ds[i,], emoticons[j,]), emoji.ds)
}
}
# extract x, y coordinates ####
# temp <- str_extract(emoji.ds$map.info.B,"\\d.*(,)-\\d.*(&z=14)")
# locations = strsplit(temp, ",")
# emoji.ds$latitude <- as.numeric(unlist(lapply(locations, function(x)x[1])))
# emoji.ds$longitude <- as.numeric(
# unlist(
# lapply(locations,
# function(x)strsplit(x[2], "&z")[[1]][1])))
# Remove all urls from tweets ####
tweets.clean <- rm_twitter_url(tweets.clean, extract = F, replacement = "", clean = F)
# stats####
percentage.emoji <- 100*length(emoji.indexes)/nrow(ds)
cat("percentage of tweets with emoji: ", round(percentage.emoji,2),"%", sep = "")
#
print("Summary of emoji use per tweet (all tweets): \n")
print(summary(emoji.per.tweet))
print("Summary of emoji use per tweet (all tweets containing at least one emoji: \n")
print(summary(emoji.per.tweet[emoji.indexes]))
# write csv containing rows for each unique tweet:emoji combo ####
write.csv(emoji.ds, file = "twimoji.csv")
# write csv containing frequencies of emojis
write.csv(arrange(emoticons, desc(emoji.counts)), "emoticon_counts.csv")
| /twitterEmojiProject/EDA.R | no_license | laurenancona/twimoji | R | false | false | 3,594 | r | rm(list =ls())
library(readxl)
library(dplyr)
library(RWeka)
library(lubridate)
library(stringr)
library(qdapRegex)
# import all tweet data####
fin <- list.files(path = "data/", full.names = T)
ds.list <- lapply(fin, read_excel, col_names = F)
ds <- unique(do.call(rbind.data.frame, ds.list))
# change column names
names(ds) <- c("created","screenName", "text" , "ID", "map.info.A", "map.info.B")
# convert date
ds$created <- mdy_hm(ds$created)
tweets.clean <- ds$text
#convert longitude and latitude
temp <- str_extract(ds$map.info.B,"\\d.*(,)-\\d.*(&z=14)")
locations = strsplit(temp, ",")
ds$latitude <- as.numeric(unlist(lapply(locations, function(x)x[1])))
ds$longitude <- as.numeric(
unlist(
lapply(locations,
function(x)strsplit(x[2], "&z")[[1]][1])))
write.csv(ds, file="urtweets.csv")
# import emoticon table ####
emoticons <- read.csv("emoticon_conversion_noGraphic.csv", header = F)
# set colum names
names(emoticons) <- c("unicode", "bytes","description")
rm(ds.list)
rm(fin)
# remove invalid rows
ds <- ds[!is.na(ds$created),]
# get word frequencies and tokens
tokens <- WordTokenizer(ds$text)
# create an argument for Weka Control
# tokenize tweets
n.gram.options <- Weka_control(max = 4, min = 2)
ngram_2 <- NGramTokenizer(ds$text, n.gram.options)
t <- arrange(as.data.frame(table(tokens)), desc(Freq))
# search for all emoticons
# emoji frequency - rows: individual tweets
# emoji frequency - columns : emojis
emoji.frequency <- matrix(NA, nrow = nrow(ds), ncol = nrow(emoticons))
for(i in 1:nrow(emoticons)){
print(i)
emoji.frequency[,i] <- regexpr(emoticons$bytes[i],ds$text, useBytes = T )
# if the emoji is present in any tweet
# remove the emoji from the tweet for text/sentiment analysis
if(any(emoji.frequency[,i]>-1)){
tweets.clean <- gsub(emoticons$bytes[i], "", tweets.clean, useBytes = T)
}
}
# tabulate the number of times each emoji is seen
emoji.counts <- colSums(emoji.frequency>-1)
# append this to the emoticons data.frame
emoticons <- cbind(emoji.counts, emoticons)
# emoticons <- arrange(emoticons, desc(emoji.counts))
# get data set of all rows with emojis and identify the emoji type
# emoji.ds contains all tweet info with description of emoji found
emoji.per.tweet <- rowSums(emoji.frequency > -1)
emoji.indexes <- which( emoji.per.tweet > 0)
emoji.ds <- NULL
for(i in emoji.indexes){
valid.cols <- which(emoji.frequency[i,]>-1)
for(j in valid.cols){
emoji.ds <- rbind(cbind(ds[i,], emoticons[j,]), emoji.ds)
}
}
# extract x, y coordinates ####
# temp <- str_extract(emoji.ds$map.info.B,"\\d.*(,)-\\d.*(&z=14)")
# locations = strsplit(temp, ",")
# emoji.ds$latitude <- as.numeric(unlist(lapply(locations, function(x)x[1])))
# emoji.ds$longitude <- as.numeric(
# unlist(
# lapply(locations,
# function(x)strsplit(x[2], "&z")[[1]][1])))
# Remove all urls from tweets ####
tweets.clean <- rm_twitter_url(tweets.clean, extract = F, replacement = "", clean = F)
# stats####
percentage.emoji <- 100*length(emoji.indexes)/nrow(ds)
cat("percentage of tweets with emoji: ", round(percentage.emoji,2),"%", sep = "")
#
print("Summary of emoji use per tweet (all tweets): \n")
print(summary(emoji.per.tweet))
print("Summary of emoji use per tweet (all tweets containing at least one emoji: \n")
print(summary(emoji.per.tweet[emoji.indexes]))
# write csv containing rows for each unique tweet:emoji combo ####
write.csv(emoji.ds, file = "twimoji.csv")
# write csv containing frequencies of emojis
write.csv(arrange(emoticons, desc(emoji.counts)), "emoticon_counts.csv")
|
#' Get list of classes of data that have been
#' exposed in breaches
#'
#' @param verbose whether to message about http errors and re-tries
#' @param agent agent to be used as header in calls, by default "HIBPwned R pkg". # nolint
#'
#' @details Note that the package uses \code{memoise}
#' (\url{https://github.com/r-lib/memoise})
#' with no timeout argument
#' so that results are cached inside an active R session.
#'
#' @return Data.frame containing data class details
#' @export
#'
#' @examples
#' data_classes()
data_classes <- function(verbose = TRUE, agent = NULL) {
URLS <- "https://haveibeenpwned.com/api/v2/dataclasses" # nolint
res <- GETcontent(URLS, HIBP_headers(agent), verbose)# nolint
return(res)
}
| /R/data_classes.R | no_license | holisticinfosec/HIBPwned | R | false | false | 722 | r | #' Get list of classes of data that have been
#' exposed in breaches
#'
#' @param verbose whether to message about http errors and re-tries
#' @param agent agent to be used as header in calls, by default "HIBPwned R pkg". # nolint
#'
#' @details Note that the package uses \code{memoise}
#' (\url{https://github.com/r-lib/memoise})
#' with no timeout argument
#' so that results are cached inside an active R session.
#'
#' @return Data.frame containing data class details
#' @export
#'
#' @examples
#' data_classes()
data_classes <- function(verbose = TRUE, agent = NULL) {
URLS <- "https://haveibeenpwned.com/api/v2/dataclasses" # nolint
res <- GETcontent(URLS, HIBP_headers(agent), verbose)# nolint
return(res)
}
|
allspeeches <- readRDS("./data/asa_data.rds")
allspeeches$decade <- paste0(substr(as.character(allspeeches$year),1,3),"0")
allspeeches$num_neg_words <- unlist(lapply(allspeeches$neg.words, length))
allspeeches$num_pos_words <- unlist(lapply(allspeeches$pos.words, length))
entities <- readRDS("./data/entities.rds")
locations <- readRDS("./data/locations.rds")
topics <- readRDS("./data/topics.rds")
#Load the Data
stop_words <- c("the","an","will","can","also", "that","thats", "asa","american","statistical","statistics",
"statistician","statisticians","association","joint","meetings", "may", "might", "must", "one",
"many","per", "new","one","two","first","second","third", "cent","even","upon","much","time",
"year","years","use","used","must","good","great","better","best","way","bad","like","jsm",
"get","make","now","however","often","well","say","just")
topic_crosswalk <- data.frame(topic = c("topic_1","topic_2","topic_3","topic_4","topic_5",
"topic_6","topic_7","topic_8","topic_9"),
topic_label = c("Government","Economics","Science","Education","Technology",
"Health","Surveys","Profession","Business"),
stringsAsFactors = F)
missing_data_years <- c(1909, 1911, 1913, 1916, 1920, 1942, 1943, 1971)
all_speech_dropdown <- list(readibility = c("Number of Words", "Number of Unique Words",
"Average Word Length", "Average Sentence Length", "Length Rank",
"Type Token Ratio (TTR)","Flesch-Kincaid Grade", "Flesch-Kincaid Age"),
sentiment = c("Sentiment","Number of Unique Positive Words", "Number of Unique Negative Words"),
topics = c("Topic Loading for Education","Topic Loading for Health",
"Topic Loading for Business","Topic Loading for Government",
"Topic Loading for Technology","Topic Loading for Surveys",
"Topic Loading for Economics","Topic Loading for Science",
"Topic Loading for Profession"))
#Frequency Charts from Words
strcounts <- function(data,list, only=TRUE) {
cols <- c("term","year","period","name","category","gender","count")
counts <- data.frame(matrix(nrow=1,ncol=7))
names(counts) = cols
text <- regexer(list, only)
for(j in 1:nrow(data)){
for(i in 1:length(text)){
x <- str_count(data$speechtext[j], text[i])
df <- data.frame(simpleCap(list[i]), data$year[j], data$period[j],data$president[j],
data$category[j],data$gender[j], x)
names(df) = cols
counts <- rbind(counts,df)
}
}
counts <- counts[!is.na(counts$term), ]
return(counts)
}
context <- function(speech, list, only=TRUE) {
ct <- data.frame(matrix(nrow=1,ncol=2))
names(ct) <- c("term", "sentence")
text <- regexer(list, only)
for(i in 1:length(text)){
search <- paste0("(?i)((?=[^.\\n]*(",text[i],"))[^.\\n]+\\.?)")
x <- str_extract_all(speech, search)
if(length(x[[1]]) == 0){next}
temp <- data.frame(simpleCap(list[i]), x)
names(temp) = c("term", "sentence")
ct <- rbind(ct,temp)
}
ct <- ct[!is.na(ct$sentence), ]
return(ct)
}
regexer <- function(text, only=TRUE){
x <- strsplit(text, "\\|")
terms <- length(text)
for(i in 1:terms){
words <- length(x[[i]])
if(str_count(text[i],"\\|") > 0){
for(j in 1:words){
l <- substr(x[[i]][[j]], 1, 1)
m <- paste0("[",toupper(l),tolower(l), "]")
x[[i]][[j]] <- paste0(m,substr(x[[i]][[j]], 2, nchar(x[[i]][[j]])))
}
}else {
l <- substr(x[[i]], 1, 1)
m <- paste0("[",toupper(l),tolower(l), "]")
x[[i]] <- paste0(m,substr(x[[i]], 2, nchar(x[[i]])))
}
x[[i]] <- paste(x[[i]], collapse="|")
}
x <- unlist(x)
x <- strsplit(x, " ")
terms <- length(x)
for(i in 1:terms){
words <- length(x[[i]])
if(str_count(x[i], "\\s") > 0){
for(j in 2:words){
l <- substr(x[[i]][[j]], 1, 1)
m <- paste0("[",toupper(l),tolower(l), "]")
x[[i]][[j]] <- paste0(m,substr(x[[i]][[j]], 2, nchar(x[[i]][[j]])))
}
}
x[[i]] <- paste(x[[i]], collapse=" ")
if(only ==TRUE){ x[[i]] <- paste0("\\b",x[[i]],"\\b")}
}
x <- unlist(x)
return(x)
}
simpleCap <- function(x) {
s <- strsplit(x, "\\|")[[1]]
s <- paste(toupper(substring(s, 1,1)), substring(s, 2), sep="", collapse="|")
s <- strsplit(s, " ")[[1]]
paste(toupper(substring(s, 1,1)), substring(s, 2), sep="", collapse=" ")
}
stringsplitter <- function(wordstring){
x <- strsplit(wordstring, ",")[[1]] %>%
str_trim(side="both") %>%
unlist(.)
return(x)
}
make_tdm <- function(text, collapse_var=NA){
if(!is.na(collapse_var)){
text <- text %>%
group_by_(collapse_var) %>%
summarize(text = paste(speechtext, collapse=" "))
levels <- text[, collapse_var]
}else{
text <- text %>%
summarize(text = paste(speechtext, collapse=" "))
levels <- "none"
}
docs <- VCorpus(VectorSource(text$text)) %>%
tm_map(removePunctuation) %>%
tm_map(removeNumbers) %>%
tm_map(tolower) %>%
tm_map(removeWords, stopwords("english")) %>%
tm_map(removeWords, stop_words) %>%
tm_map(stripWhitespace) %>%
tm_map(PlainTextDocument)
tdm <- TermDocumentMatrix(docs) %>%
as.matrix()
colnames(tdm) <- levels[[1]]
tdm <- reshape2::melt(tdm)
names(tdm) <- c("term","variable","freq")
tdm <- arrange(tdm, variable, desc(freq))
return(tdm)
}
topncount <- function(tdm, top=15, col=c("lightsteelblue3","olivedrab3","indianred3")){
order <- tdm %>% group_by(term) %>%
summarize(sumfreq=sum(freq)) %>%
arrange(desc(sumfreq)) %>%
slice(1:top)
x <- tdm %>%
filter(term %in% order$term) %>%
mutate(term = factor(term, rev(order$term), ordered=TRUE),
variable = factor(variable, c("Academia","Government","Industry","none")))
p <- ggplot(x, aes(x=term, freq, fill=as.factor(variable), group=as.factor(variable))) +
geom_bar(stat="identity", color="white",size=.3, alpha=0.7) +
scale_fill_manual(values = col) +
scale_y_continuous(expand=c(0,0), limits=c(0,max(order$sumfreq)+10)) +
labs(
title = "Word Count",
x = "Word",
y = "Count",
fill = ""
) +
theme(plot.title = element_blank(),
plot.background = element_rect(fill = 'white', colour = 'white'),
panel.border = element_rect(fill = NA, color = 'white', size = 2),
panel.background = element_rect(fill = 'white', colour = 'white'),
panel.grid.major = element_line(colour = "grey79", size=.3, linetype = 3),
panel.grid.minor = element_blank(),
axis.text = element_text(size = 10, color="black", face="bold"),
axis.title.x = element_text(size = 12, face="bold", color = "black"),
axis.title.y = element_blank(),
axis.ticks = element_blank(),
axis.line = element_line(colour = "black", size=1),
legend.background = element_blank(),
legend.key = element_blank(),
legend.text = element_text(size = 12, color= "black"),
legend.title = element_text(size = 12,face="bold"),
legend.position = "none") +
coord_flip()
if(length(unique(x$variable)) > 1){
p <- p + theme(legend.position = "bottom") +
guides(fill = guide_legend(override.aes = list(size=11)))
}
p
}
cloud <- function(tdm, words=500, color="RdBu"){
par(mar = rep(0, 4))
wordcloud(tdm$term, tdm$freq, max.words = words, min.freq =2, scale=c(4,0.5), random.order = FALSE,
random.color = FALSE, colors= brewer.pal(8, color))
}
compcloud <- function(tdm, max=200, col=c("lightsteelblue3","olivedrab3","indianred3")){
tdm <- tdm %>%
data.frame(stringsAsFactors = F) %>%
spread(key="variable",value="freq") %>%
mutate(term = as.character(term))
mat <- tdm
row.names(mat) <- tdm$term
mat <- mat[ ,2:ncol(mat)]
par(mar = rep(0,4))
comparison.cloud(mat, random.order=FALSE, scale = c(5, .5),
rot.per=.2,min.freq=2,
colors = col,title.bg.colors = col,
fixed.asp=TRUE,title.size = 2, max.words=max)
}
spsentgraph <- function(speechtext){
speech.df <- data.table(speech=speechtext)
sentences <- data.table(sentSplit(speech.df, "speech"))
# Add a sentence counter and remove unnecessary variables
sentences[, sentence.num := seq(nrow(sentences))]
sentences[, tot := NULL]
setcolorder(sentences, c("sentence.num", "speech"))
# Syllables per sentence
sentences[, syllables := syllable_sum(gsub("[\\\\\\/|.]","x", speech))]
sentences = sentences[!is.na(syllables)]
# Add cumulative syllable count and percent complete as proxy for progression
sentences[, syllables.cumsum := cumsum(syllables)]
sentences[, pct.complete := syllables.cumsum / sum(sentences$syllables)]
sentences[, pct.complete.100 := pct.complete * 100]
pol.df <- polarity(sentences$speech)$all
sentences[, words := pol.df$wc]
sentences[, pol := pol.df$polarity]
sentences[pol > 0, dir := 1]
sentences[pol == 0, dir := 0]
sentences[pol < 0, dir := -1]
my.theme <-
theme(plot.title = element_blank(),
plot.background = element_rect(fill = 'white', colour = 'white'),
panel.border = element_rect(fill = NA, color = 'white', size = 2),
panel.background = element_rect(fill = 'white', colour = 'white'),
panel.grid.major = element_line(colour = "grey79", size=.3, linetype = 3),
panel.grid.minor = element_blank(),
axis.text = element_text(size = 10, color="black", face="bold"),
axis.title.x = element_text(size = 12, face="bold", color = "black"),
axis.title.y = element_blank(),
axis.ticks = element_blank(),
axis.line = element_line(colour = "black", size=1),
legend.background = element_blank(),
legend.key = element_blank(),
legend.text = element_text(size = 10, color= "black"),
legend.title = element_text(size = 12,face="bold"),
legend.position = "none")
CustomScatterPlot <- function(gg)
return(gg + geom_point(aes(color=dir)) + # Lighten dots
stat_smooth(method = 'loess', color="indianred3", fill="lightgray", size=1.4) +
xlab("Percent complete (by syllable count)") +
ylab("Sentiment (sentence-level polarity)") +
ggtitle("Sentiment Across Speech") +
scale_x_continuous(labels = percent) + my.theme)
CustomScatterPlot(ggplot(sentences, aes(pct.complete, pol)))
}
spsentcloud <- function(sentimentwords,maxwords = 250, col = c("darkolivegreen4","indianred3")){
docs <- VCorpus(VectorSource(sentimentwords)) %>%
tm_map(removePunctuation) %>%
tm_map(removeNumbers) %>%
tm_map(tolower) %>%
tm_map(removeWords, stopwords("english")) %>%
tm_map(stripWhitespace) %>%
tm_map(PlainTextDocument) %>%
tm_map(removeWords, c("the","an","vice"))
tdm <- TermDocumentMatrix(docs) %>%
as.matrix()
colnames(tdm) <- c("Positive","Negative")
par(mar = rep(0, 4))
comparison.cloud(tdm, random.order=FALSE, scale = c(6.5,0.6),
colors = col, title.bg.colors=c("darkolivegreen4","indianred3"),
title.size=2.5, max.words=maxwords)
}
topic_plot <- function(df, doc){
df %>%
filter(id == doc) %>%
tidyr::gather(key="topic",value="gamma",2:10) %>%
left_join(topic_crosswalk) %>%
mutate(topic_label = factor(topic_label, topic_crosswalk$topic_label, ordered = T)) %>%
ggplot(aes(topic_label, gamma)) +
labs(x="",
y="Topic Loading (Gamma)") +
geom_col(show.legend = FALSE, fill = "lightsteelblue3", alpha=0.7) +
theme(plot.title = element_blank(),
plot.background = element_rect(fill = 'white', colour = 'white'),
panel.border = element_rect(fill = NA, color = 'white', size = 2),
panel.background = element_rect(fill = 'white', colour = 'white'),
panel.grid.major = element_line(colour = "grey79", size=.3, linetype = 3),
panel.grid.minor = element_blank(),
axis.text = element_text(size = 14, color="black", face="bold"),
axis.text.x = element_text(angle=40, hjust=1),
axis.title = element_text(size = 16, face="bold", color = "black"),
axis.ticks = element_blank(),
axis.line = element_line(colour = "black", size=1),
legend.position = "none")
}
words_over_time <- function(df,words,stop=TRUE,colorvar,leg,scale=20,sz=14, forprint=FALSE) {
x <- strcounts(df, words, only=stop)
max_count <- max(x$count)
p <- ggplot(x, aes(as.numeric(year), term, size=count)) +
geom_point(stat="identity", aes_string(color = colorvar), alpha=.5) +
scale_x_continuous(breaks = seq(1900,2030,by=4)) +
scale_size(range=c(0,scale), guide="none") +
facet_wrap(~period, scales="free_x", ncol = 1) +
labs(title = "",
y = "",
x = "Year",
subtitle= paste0("Circles scaled by number of uses of the word (Maximum Uses: ",max_count,")"),
color = leg) +
scale_color_manual(values=c("lightsteelblue3","olivedrab3","indianred3","darkgoldenrod2")) +
theme_overtime(sz) +
guides(color = guide_legend(override.aes = list(size=11)))
if(colorvar == FALSE){
p <- p + theme(legend.position = "none")
}
if(forprint==TRUE){
p <- p + labs(title = "ASA Presidential Addresses, Words Through Time",
caption="Downloaded from: https://osmrbls.shinyapps.io/jsm_2020")
}
p
}
topics_over_time <- function(topic, topic_title, colorvar, legend_title, sz=14, forprint=FALSE) {
x <- left_join(topics[ ,c("id",topic)], allspeeches[ ,c("id","year","period", "category","gender")], by="id")
p <- ggplot(x, aes_string("year", topic)) +
geom_bar(stat="identity", aes_string(fill = colorvar), alpha=.5) +
scale_x_continuous(breaks = seq(1900,2030,by=4)) +
facet_wrap(~period, scales="free_x", ncol = 1) +
labs(title = paste0("Topic Loadings for ", topic_title),
y = "Topic Loading (Gamma)",
x = "Year",
fill = legend_title) +
scale_fill_manual(values=c("lightsteelblue3","olivedrab3","indianred3","darkgoldenrod2")) +
theme_overtime(sz) +
guides(fill = guide_legend(override.aes = list(size=11)))
if(colorvar == FALSE){
p <- p + theme(legend.position = "none")
}
if(forprint==TRUE){
p <- p + labs(title = "ASA Presidential Addresses, Topics Through Time",
subtitle = paste0("Topic Loadings for ", topic_title),
caption="Downloaded from: https://osmrbls.shinyapps.io/jsm_2020")
}
p
}
theme_overtime <- function(sz){
theme(plot.title = element_text(size=22, face="bold"),
plot.subtitle = element_text(size=15),
plot.caption = element_text(size=10),
plot.background = element_rect(fill = 'white'),
panel.border = element_rect(fill=NA, size=1, color="grey30"),
panel.background = element_blank(),
panel.grid.major = element_line(size=.3, color="grey50",linetype = 3),
panel.grid.minor = element_blank(),
axis.text = element_text(size = sz-2,face="bold", color="black"),
axis.title = element_text(size = sz, face="bold", color = "black"),
axis.title.y = element_blank(),
axis.ticks = element_blank(),
strip.text = element_text(size=sz, face="bold"),
strip.background = element_blank(),
legend.title = element_text(size=sz, face="bold"),
legend.text = element_text(size=sz-2),
legend.key = element_rect(fill = NA),
legend.position = "bottom")
}
speech_narrative <- function(df, pres_name){
pres <- df[df$yearpresident == pres_name,
c("president_name_forward","gender","affiliation","pres_number",
"title","date_of_address")]
if(pres$gender == "Male"){
pronoun <- "He"
posessive_pronoun <- "his"
}else{
pronoun <- "She"
posessive_pronoun <- "her"
}
if(pres$affiliation != "Unknown"){
affil <- paste0(", from the ", pres$affiliation, ",")
}else{
affil <- ""
}
sent1 <- paste0(pres$president_name_forward, affil, " was the ", pres$pres_number, " president of the ASA. ")
sent2 <- paste0("This page provides text analysis of ", posessive_pronoun, " address entitled <i>'",
pres$title, "'</i> delivered on ", format(as.Date(pres$date_of_address), format="%A, %B %e, %Y"), ".")
paragraph <- paste0("<h3>",pres$president_name_forward, "</h3>",
"<p style='font-size:1.2em'>", sent1, sent2, "<br><br>",
"You can read the full text of the speech by clicking the button below.</p>")
return(paragraph)
}
plot_location <- function(df, states, countries, current_location=NA){
if(!is.na(current_location)){
df$current_loc <- df$location == current_location
}else{
df$current_loc <- NA
}
g <- ggplot(countries, aes(x = long, y = lat)) +
geom_polygon(aes(group=group), fill="transparent", color = "lightsteelblue2") +
geom_polygon(data = states, aes(x=long, y=lat, group=group), color="lightsteelblue2",
fill="transparent", size=0.25) +
theme_minimal() +
theme(legend.position = "none",
plot.title = element_text(size=22, face="bold"),
plot.subtitle = element_text(size=15),
panel.grid = element_blank(),
axis.text = element_blank(),
axis.title = element_blank(),) +
coord_fixed(ratio=1.45, xlim = c(-125, -66.4), ylim = c(24.24, 50.75), expand = FALSE)
if(!is.na(current_location)){
g <- g +
geom_point(data=df[df$location != current_location, ], aes(lon, lat, size=num_hosting),
color="indianred3", alpha=0.8) +
geom_point(data=df[df$location == current_location, ], aes(lon, lat, size=num_hosting),
color="olivedrab4", pch=15) +
geom_label_repel(data = df[df$location==current_location, ],
aes(lon, lat, label=location), color="black", point.padding =0.5)
}else{
g <- g + geom_point(data=df, aes(lon, lat, size=num_hosting, color="indianred4"),
alpha=0.8)
}
return(g)
}
| /jsm_2020_app/global.R | no_license | brandonkopp/ASA-Presidents-Shiny | R | false | false | 18,584 | r | allspeeches <- readRDS("./data/asa_data.rds")
allspeeches$decade <- paste0(substr(as.character(allspeeches$year),1,3),"0")
allspeeches$num_neg_words <- unlist(lapply(allspeeches$neg.words, length))
allspeeches$num_pos_words <- unlist(lapply(allspeeches$pos.words, length))
entities <- readRDS("./data/entities.rds")
locations <- readRDS("./data/locations.rds")
topics <- readRDS("./data/topics.rds")
#Load the Data
stop_words <- c("the","an","will","can","also", "that","thats", "asa","american","statistical","statistics",
"statistician","statisticians","association","joint","meetings", "may", "might", "must", "one",
"many","per", "new","one","two","first","second","third", "cent","even","upon","much","time",
"year","years","use","used","must","good","great","better","best","way","bad","like","jsm",
"get","make","now","however","often","well","say","just")
topic_crosswalk <- data.frame(topic = c("topic_1","topic_2","topic_3","topic_4","topic_5",
"topic_6","topic_7","topic_8","topic_9"),
topic_label = c("Government","Economics","Science","Education","Technology",
"Health","Surveys","Profession","Business"),
stringsAsFactors = F)
missing_data_years <- c(1909, 1911, 1913, 1916, 1920, 1942, 1943, 1971)
all_speech_dropdown <- list(readibility = c("Number of Words", "Number of Unique Words",
"Average Word Length", "Average Sentence Length", "Length Rank",
"Type Token Ratio (TTR)","Flesch-Kincaid Grade", "Flesch-Kincaid Age"),
sentiment = c("Sentiment","Number of Unique Positive Words", "Number of Unique Negative Words"),
topics = c("Topic Loading for Education","Topic Loading for Health",
"Topic Loading for Business","Topic Loading for Government",
"Topic Loading for Technology","Topic Loading for Surveys",
"Topic Loading for Economics","Topic Loading for Science",
"Topic Loading for Profession"))
#Frequency Charts from Words
strcounts <- function(data,list, only=TRUE) {
cols <- c("term","year","period","name","category","gender","count")
counts <- data.frame(matrix(nrow=1,ncol=7))
names(counts) = cols
text <- regexer(list, only)
for(j in 1:nrow(data)){
for(i in 1:length(text)){
x <- str_count(data$speechtext[j], text[i])
df <- data.frame(simpleCap(list[i]), data$year[j], data$period[j],data$president[j],
data$category[j],data$gender[j], x)
names(df) = cols
counts <- rbind(counts,df)
}
}
counts <- counts[!is.na(counts$term), ]
return(counts)
}
context <- function(speech, list, only=TRUE) {
ct <- data.frame(matrix(nrow=1,ncol=2))
names(ct) <- c("term", "sentence")
text <- regexer(list, only)
for(i in 1:length(text)){
search <- paste0("(?i)((?=[^.\\n]*(",text[i],"))[^.\\n]+\\.?)")
x <- str_extract_all(speech, search)
if(length(x[[1]]) == 0){next}
temp <- data.frame(simpleCap(list[i]), x)
names(temp) = c("term", "sentence")
ct <- rbind(ct,temp)
}
ct <- ct[!is.na(ct$sentence), ]
return(ct)
}
regexer <- function(text, only=TRUE){
x <- strsplit(text, "\\|")
terms <- length(text)
for(i in 1:terms){
words <- length(x[[i]])
if(str_count(text[i],"\\|") > 0){
for(j in 1:words){
l <- substr(x[[i]][[j]], 1, 1)
m <- paste0("[",toupper(l),tolower(l), "]")
x[[i]][[j]] <- paste0(m,substr(x[[i]][[j]], 2, nchar(x[[i]][[j]])))
}
}else {
l <- substr(x[[i]], 1, 1)
m <- paste0("[",toupper(l),tolower(l), "]")
x[[i]] <- paste0(m,substr(x[[i]], 2, nchar(x[[i]])))
}
x[[i]] <- paste(x[[i]], collapse="|")
}
x <- unlist(x)
x <- strsplit(x, " ")
terms <- length(x)
for(i in 1:terms){
words <- length(x[[i]])
if(str_count(x[i], "\\s") > 0){
for(j in 2:words){
l <- substr(x[[i]][[j]], 1, 1)
m <- paste0("[",toupper(l),tolower(l), "]")
x[[i]][[j]] <- paste0(m,substr(x[[i]][[j]], 2, nchar(x[[i]][[j]])))
}
}
x[[i]] <- paste(x[[i]], collapse=" ")
if(only ==TRUE){ x[[i]] <- paste0("\\b",x[[i]],"\\b")}
}
x <- unlist(x)
return(x)
}
simpleCap <- function(x) {
s <- strsplit(x, "\\|")[[1]]
s <- paste(toupper(substring(s, 1,1)), substring(s, 2), sep="", collapse="|")
s <- strsplit(s, " ")[[1]]
paste(toupper(substring(s, 1,1)), substring(s, 2), sep="", collapse=" ")
}
stringsplitter <- function(wordstring){
x <- strsplit(wordstring, ",")[[1]] %>%
str_trim(side="both") %>%
unlist(.)
return(x)
}
make_tdm <- function(text, collapse_var=NA){
if(!is.na(collapse_var)){
text <- text %>%
group_by_(collapse_var) %>%
summarize(text = paste(speechtext, collapse=" "))
levels <- text[, collapse_var]
}else{
text <- text %>%
summarize(text = paste(speechtext, collapse=" "))
levels <- "none"
}
docs <- VCorpus(VectorSource(text$text)) %>%
tm_map(removePunctuation) %>%
tm_map(removeNumbers) %>%
tm_map(tolower) %>%
tm_map(removeWords, stopwords("english")) %>%
tm_map(removeWords, stop_words) %>%
tm_map(stripWhitespace) %>%
tm_map(PlainTextDocument)
tdm <- TermDocumentMatrix(docs) %>%
as.matrix()
colnames(tdm) <- levels[[1]]
tdm <- reshape2::melt(tdm)
names(tdm) <- c("term","variable","freq")
tdm <- arrange(tdm, variable, desc(freq))
return(tdm)
}
topncount <- function(tdm, top=15, col=c("lightsteelblue3","olivedrab3","indianred3")){
order <- tdm %>% group_by(term) %>%
summarize(sumfreq=sum(freq)) %>%
arrange(desc(sumfreq)) %>%
slice(1:top)
x <- tdm %>%
filter(term %in% order$term) %>%
mutate(term = factor(term, rev(order$term), ordered=TRUE),
variable = factor(variable, c("Academia","Government","Industry","none")))
p <- ggplot(x, aes(x=term, freq, fill=as.factor(variable), group=as.factor(variable))) +
geom_bar(stat="identity", color="white",size=.3, alpha=0.7) +
scale_fill_manual(values = col) +
scale_y_continuous(expand=c(0,0), limits=c(0,max(order$sumfreq)+10)) +
labs(
title = "Word Count",
x = "Word",
y = "Count",
fill = ""
) +
theme(plot.title = element_blank(),
plot.background = element_rect(fill = 'white', colour = 'white'),
panel.border = element_rect(fill = NA, color = 'white', size = 2),
panel.background = element_rect(fill = 'white', colour = 'white'),
panel.grid.major = element_line(colour = "grey79", size=.3, linetype = 3),
panel.grid.minor = element_blank(),
axis.text = element_text(size = 10, color="black", face="bold"),
axis.title.x = element_text(size = 12, face="bold", color = "black"),
axis.title.y = element_blank(),
axis.ticks = element_blank(),
axis.line = element_line(colour = "black", size=1),
legend.background = element_blank(),
legend.key = element_blank(),
legend.text = element_text(size = 12, color= "black"),
legend.title = element_text(size = 12,face="bold"),
legend.position = "none") +
coord_flip()
if(length(unique(x$variable)) > 1){
p <- p + theme(legend.position = "bottom") +
guides(fill = guide_legend(override.aes = list(size=11)))
}
p
}
cloud <- function(tdm, words=500, color="RdBu"){
par(mar = rep(0, 4))
wordcloud(tdm$term, tdm$freq, max.words = words, min.freq =2, scale=c(4,0.5), random.order = FALSE,
random.color = FALSE, colors= brewer.pal(8, color))
}
compcloud <- function(tdm, max=200, col=c("lightsteelblue3","olivedrab3","indianred3")){
tdm <- tdm %>%
data.frame(stringsAsFactors = F) %>%
spread(key="variable",value="freq") %>%
mutate(term = as.character(term))
mat <- tdm
row.names(mat) <- tdm$term
mat <- mat[ ,2:ncol(mat)]
par(mar = rep(0,4))
comparison.cloud(mat, random.order=FALSE, scale = c(5, .5),
rot.per=.2,min.freq=2,
colors = col,title.bg.colors = col,
fixed.asp=TRUE,title.size = 2, max.words=max)
}
spsentgraph <- function(speechtext){
speech.df <- data.table(speech=speechtext)
sentences <- data.table(sentSplit(speech.df, "speech"))
# Add a sentence counter and remove unnecessary variables
sentences[, sentence.num := seq(nrow(sentences))]
sentences[, tot := NULL]
setcolorder(sentences, c("sentence.num", "speech"))
# Syllables per sentence
sentences[, syllables := syllable_sum(gsub("[\\\\\\/|.]","x", speech))]
sentences = sentences[!is.na(syllables)]
# Add cumulative syllable count and percent complete as proxy for progression
sentences[, syllables.cumsum := cumsum(syllables)]
sentences[, pct.complete := syllables.cumsum / sum(sentences$syllables)]
sentences[, pct.complete.100 := pct.complete * 100]
pol.df <- polarity(sentences$speech)$all
sentences[, words := pol.df$wc]
sentences[, pol := pol.df$polarity]
sentences[pol > 0, dir := 1]
sentences[pol == 0, dir := 0]
sentences[pol < 0, dir := -1]
my.theme <-
theme(plot.title = element_blank(),
plot.background = element_rect(fill = 'white', colour = 'white'),
panel.border = element_rect(fill = NA, color = 'white', size = 2),
panel.background = element_rect(fill = 'white', colour = 'white'),
panel.grid.major = element_line(colour = "grey79", size=.3, linetype = 3),
panel.grid.minor = element_blank(),
axis.text = element_text(size = 10, color="black", face="bold"),
axis.title.x = element_text(size = 12, face="bold", color = "black"),
axis.title.y = element_blank(),
axis.ticks = element_blank(),
axis.line = element_line(colour = "black", size=1),
legend.background = element_blank(),
legend.key = element_blank(),
legend.text = element_text(size = 10, color= "black"),
legend.title = element_text(size = 12,face="bold"),
legend.position = "none")
CustomScatterPlot <- function(gg)
return(gg + geom_point(aes(color=dir)) + # Lighten dots
stat_smooth(method = 'loess', color="indianred3", fill="lightgray", size=1.4) +
xlab("Percent complete (by syllable count)") +
ylab("Sentiment (sentence-level polarity)") +
ggtitle("Sentiment Across Speech") +
scale_x_continuous(labels = percent) + my.theme)
CustomScatterPlot(ggplot(sentences, aes(pct.complete, pol)))
}
spsentcloud <- function(sentimentwords,maxwords = 250, col = c("darkolivegreen4","indianred3")){
docs <- VCorpus(VectorSource(sentimentwords)) %>%
tm_map(removePunctuation) %>%
tm_map(removeNumbers) %>%
tm_map(tolower) %>%
tm_map(removeWords, stopwords("english")) %>%
tm_map(stripWhitespace) %>%
tm_map(PlainTextDocument) %>%
tm_map(removeWords, c("the","an","vice"))
tdm <- TermDocumentMatrix(docs) %>%
as.matrix()
colnames(tdm) <- c("Positive","Negative")
par(mar = rep(0, 4))
comparison.cloud(tdm, random.order=FALSE, scale = c(6.5,0.6),
colors = col, title.bg.colors=c("darkolivegreen4","indianred3"),
title.size=2.5, max.words=maxwords)
}
topic_plot <- function(df, doc){
df %>%
filter(id == doc) %>%
tidyr::gather(key="topic",value="gamma",2:10) %>%
left_join(topic_crosswalk) %>%
mutate(topic_label = factor(topic_label, topic_crosswalk$topic_label, ordered = T)) %>%
ggplot(aes(topic_label, gamma)) +
labs(x="",
y="Topic Loading (Gamma)") +
geom_col(show.legend = FALSE, fill = "lightsteelblue3", alpha=0.7) +
theme(plot.title = element_blank(),
plot.background = element_rect(fill = 'white', colour = 'white'),
panel.border = element_rect(fill = NA, color = 'white', size = 2),
panel.background = element_rect(fill = 'white', colour = 'white'),
panel.grid.major = element_line(colour = "grey79", size=.3, linetype = 3),
panel.grid.minor = element_blank(),
axis.text = element_text(size = 14, color="black", face="bold"),
axis.text.x = element_text(angle=40, hjust=1),
axis.title = element_text(size = 16, face="bold", color = "black"),
axis.ticks = element_blank(),
axis.line = element_line(colour = "black", size=1),
legend.position = "none")
}
words_over_time <- function(df,words,stop=TRUE,colorvar,leg,scale=20,sz=14, forprint=FALSE) {
x <- strcounts(df, words, only=stop)
max_count <- max(x$count)
p <- ggplot(x, aes(as.numeric(year), term, size=count)) +
geom_point(stat="identity", aes_string(color = colorvar), alpha=.5) +
scale_x_continuous(breaks = seq(1900,2030,by=4)) +
scale_size(range=c(0,scale), guide="none") +
facet_wrap(~period, scales="free_x", ncol = 1) +
labs(title = "",
y = "",
x = "Year",
subtitle= paste0("Circles scaled by number of uses of the word (Maximum Uses: ",max_count,")"),
color = leg) +
scale_color_manual(values=c("lightsteelblue3","olivedrab3","indianred3","darkgoldenrod2")) +
theme_overtime(sz) +
guides(color = guide_legend(override.aes = list(size=11)))
if(colorvar == FALSE){
p <- p + theme(legend.position = "none")
}
if(forprint==TRUE){
p <- p + labs(title = "ASA Presidential Addresses, Words Through Time",
caption="Downloaded from: https://osmrbls.shinyapps.io/jsm_2020")
}
p
}
topics_over_time <- function(topic, topic_title, colorvar, legend_title, sz=14, forprint=FALSE) {
x <- left_join(topics[ ,c("id",topic)], allspeeches[ ,c("id","year","period", "category","gender")], by="id")
p <- ggplot(x, aes_string("year", topic)) +
geom_bar(stat="identity", aes_string(fill = colorvar), alpha=.5) +
scale_x_continuous(breaks = seq(1900,2030,by=4)) +
facet_wrap(~period, scales="free_x", ncol = 1) +
labs(title = paste0("Topic Loadings for ", topic_title),
y = "Topic Loading (Gamma)",
x = "Year",
fill = legend_title) +
scale_fill_manual(values=c("lightsteelblue3","olivedrab3","indianred3","darkgoldenrod2")) +
theme_overtime(sz) +
guides(fill = guide_legend(override.aes = list(size=11)))
if(colorvar == FALSE){
p <- p + theme(legend.position = "none")
}
if(forprint==TRUE){
p <- p + labs(title = "ASA Presidential Addresses, Topics Through Time",
subtitle = paste0("Topic Loadings for ", topic_title),
caption="Downloaded from: https://osmrbls.shinyapps.io/jsm_2020")
}
p
}
theme_overtime <- function(sz){
theme(plot.title = element_text(size=22, face="bold"),
plot.subtitle = element_text(size=15),
plot.caption = element_text(size=10),
plot.background = element_rect(fill = 'white'),
panel.border = element_rect(fill=NA, size=1, color="grey30"),
panel.background = element_blank(),
panel.grid.major = element_line(size=.3, color="grey50",linetype = 3),
panel.grid.minor = element_blank(),
axis.text = element_text(size = sz-2,face="bold", color="black"),
axis.title = element_text(size = sz, face="bold", color = "black"),
axis.title.y = element_blank(),
axis.ticks = element_blank(),
strip.text = element_text(size=sz, face="bold"),
strip.background = element_blank(),
legend.title = element_text(size=sz, face="bold"),
legend.text = element_text(size=sz-2),
legend.key = element_rect(fill = NA),
legend.position = "bottom")
}
speech_narrative <- function(df, pres_name){
pres <- df[df$yearpresident == pres_name,
c("president_name_forward","gender","affiliation","pres_number",
"title","date_of_address")]
if(pres$gender == "Male"){
pronoun <- "He"
posessive_pronoun <- "his"
}else{
pronoun <- "She"
posessive_pronoun <- "her"
}
if(pres$affiliation != "Unknown"){
affil <- paste0(", from the ", pres$affiliation, ",")
}else{
affil <- ""
}
sent1 <- paste0(pres$president_name_forward, affil, " was the ", pres$pres_number, " president of the ASA. ")
sent2 <- paste0("This page provides text analysis of ", posessive_pronoun, " address entitled <i>'",
pres$title, "'</i> delivered on ", format(as.Date(pres$date_of_address), format="%A, %B %e, %Y"), ".")
paragraph <- paste0("<h3>",pres$president_name_forward, "</h3>",
"<p style='font-size:1.2em'>", sent1, sent2, "<br><br>",
"You can read the full text of the speech by clicking the button below.</p>")
return(paragraph)
}
plot_location <- function(df, states, countries, current_location=NA){
if(!is.na(current_location)){
df$current_loc <- df$location == current_location
}else{
df$current_loc <- NA
}
g <- ggplot(countries, aes(x = long, y = lat)) +
geom_polygon(aes(group=group), fill="transparent", color = "lightsteelblue2") +
geom_polygon(data = states, aes(x=long, y=lat, group=group), color="lightsteelblue2",
fill="transparent", size=0.25) +
theme_minimal() +
theme(legend.position = "none",
plot.title = element_text(size=22, face="bold"),
plot.subtitle = element_text(size=15),
panel.grid = element_blank(),
axis.text = element_blank(),
axis.title = element_blank(),) +
coord_fixed(ratio=1.45, xlim = c(-125, -66.4), ylim = c(24.24, 50.75), expand = FALSE)
if(!is.na(current_location)){
g <- g +
geom_point(data=df[df$location != current_location, ], aes(lon, lat, size=num_hosting),
color="indianred3", alpha=0.8) +
geom_point(data=df[df$location == current_location, ], aes(lon, lat, size=num_hosting),
color="olivedrab4", pch=15) +
geom_label_repel(data = df[df$location==current_location, ],
aes(lon, lat, label=location), color="black", point.padding =0.5)
}else{
g <- g + geom_point(data=df, aes(lon, lat, size=num_hosting, color="indianred4"),
alpha=0.8)
}
return(g)
}
|
library(BayesSingleSub)
### Name: trendtest.MC.AR
### Title: Obtain Bayesian trend test or single case data
### Aliases: trendtest.MC.AR
### Keywords: htest models
### ** Examples
## Define data
data = c(87.5, 82.5, 53.4, 72.3, 94.2, 96.6, 57.4, 78.1, 47.2,
80.7, 82.1, 73.7, 49.3, 79.3, 73.3, 57.3, 31.7, 50.4, 77.8,
67, 40.5, 1.6, 38.6, 3.2, 24.1)
## Obtain log Bayes factors
logBFs = trendtest.MC.AR(data[1:10], data[11:25])
| /data/genthat_extracted_code/BayesSingleSub/examples/trendtest.MC.AR.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 438 | r | library(BayesSingleSub)
### Name: trendtest.MC.AR
### Title: Obtain Bayesian trend test or single case data
### Aliases: trendtest.MC.AR
### Keywords: htest models
### ** Examples
## Define data
data = c(87.5, 82.5, 53.4, 72.3, 94.2, 96.6, 57.4, 78.1, 47.2,
80.7, 82.1, 73.7, 49.3, 79.3, 73.3, 57.3, 31.7, 50.4, 77.8,
67, 40.5, 1.6, 38.6, 3.2, 24.1)
## Obtain log Bayes factors
logBFs = trendtest.MC.AR(data[1:10], data[11:25])
|
# Yige Wu @ WashU 2021 Jun
## annotate sample copy number profile (3p, 5q, 14q)
## CNV: https://wustl.box.com/s/vlde6w791k81q1aibvgs0a487zxleij6
# Purity and ploidy: https://wustl.box.com/s/jel5krgvnvlq5z32vdg4itdcq6znuqzn
# From UMich
# set up libraries and output directory -----------------------------------
## set working directory
dir_base = "~/Box/Ding_Lab/Projects_Current/RCC/ccRCC_snRNA/"
setwd(dir_base)
source("./ccRCC_snRNA_analysis/load_pkgs.R")
source("./ccRCC_snRNA_analysis/functions.R")
source("./ccRCC_snRNA_analysis/variables.R")
## set run id
version_tmp <- 1
run_id <- paste0(format(Sys.Date(), "%Y%m%d") , ".v", version_tmp)
## set output directory
dir_out <- paste0(makeOutDir(), run_id, "/")
dir.create(dir_out)
# input dependencies ------------------------------------------------------
## input CNA matrix
cna_df <- fread(data.table = F, input = "~/Box/CPTAC_ccRCC/Data_Freeze_1.1/CPTAC_ccRCC_Combined/Absolute_cnv/c3-ccrcc-combined-cnvex-lr_v1.0.csv")
## input snRNA sample set
metadata_df <- fread("./Resources/Analysis_Results/sample_info/make_meta_data/20210423.v1/meta_data.20210423.v1.tsv", data.table = F)
## input peaks
peaks_df <- fread(data.table = F, input = "./Resources/snATAC_Processed_Data/Differential_Peaks/BAP1_Specific/BAP1_comparison_Filtered_peaks_byMinPct_MinPctDiff.tsv")
peak2gene_df <- fread(data.table = F, input = "./Resources/snATAC_Processed_Data/Peak_Annotation/All_peaks_annotated_26snATAC_merged_obj.20210607.tsv")
## input the barcode info
barcodes_df <- fread(data.table = F, input = "./Resources/snATAC_Processed_Data/Barcode_Annotation/Cell_type_annotation_snATAC.20210604.tsv")
# preprocess ----------------------------
## get barcodes to process
barcodes_df <- barcodes_df %>%
mutate(id_bc = paste0(Sample, "_", Barcode))
barcodes_df$Case <- mapvalues(x = barcodes_df$Sample, from = metadata_df$Aliquot.snRNA, to = as.vector(metadata_df$Case))
barcodes_df$Sample_type <- mapvalues(x = barcodes_df$Sample, from = metadata_df$Aliquot.snRNA, to = as.vector(metadata_df$Sample_Type))
# table(barcodes_df$Cell_type)
# barcodes_df %>%
# filter(Sample_type == "Tumor" & Cell_type == "PT")
barcodes_process <- barcodes_df$id_bc[barcodes_df$Cell_type %in% c("Tumor", "PT")]
cases_process <- barcodes_df$Case[barcodes_df$Cell_type %in% c("Tumor", "PT")]
celltypes_process <- barcodes_df$Cell_type[barcodes_df$Cell_type %in% c("Tumor", "PT")]
## get genes to process
peaks_filtered_df <- peaks_df %>%
mutate(pct.diff = abs(pct.1 - pct.2)) %>%
filter(pct.diff >= 0.02)
peak2gene_filtered_df <- peak2gene_df %>%
filter(peak %in% peaks_filtered_df$peak)
genes_process <- unique(peak2gene_filtered_df$SYMBOL)
genes_process <- genes_process[genes_process %in% cna_df$gene_name]
# preprocess mean CNV values per gene--------------------------------------------------------------
## filter the CNVs
cna_filtered_df <- cna_df[cna_df$gene_name %in% genes_process,]
## preprocess the CNV data frame
colnames_old <- colnames(cna_filtered_df)
colnames_new <- str_split_fixed(string = colnames_old, pattern = "\\.", n = 4)[,1]
colnames(cna_filtered_df) <- colnames_new
cna_filtered_df <- cna_filtered_df[!duplicated(cna_filtered_df$gene_name),]
## add peak
cna_bypeak_df <- merge(x = cna_filtered_df,
y = peak2gene_filtered_df %>%
select(peak, SYMBOL),
by.x = c("gene_name"), by.y = c("SYMBOL"), all.x = T)
cna_bybc_bypeak_df <- cna_bypeak_df[, cases_process]
cna_bybc_bypeak_df[, celltypes_process == "PT"] <- 0
rownames(cna_bybc_bypeak_df) <- cna_bypeak_df$peak
colnames(cna_bybc_bypeak_df) <- barcodes_process
cna_bybc_bypeak_df[1:5, 1:5]
cna_t_mat <- t(as.matrix(cna_bybc_bypeak_df))
cna_t_mat[1:5, 1:5]
# write table -------------------------------------------------------------
file2write <- paste0(dir_out, "Barcode2BAP1PrefilteredPeak.CNV.", run_id, ".RDS")
saveRDS(object = cna_t_mat, file = file2write, compress = T)
| /snatac/da_peaks/annotate_peaks/map_cnvnex_lr_by_BAP1_comparison_prefilteredpeaks_by_barcode.R | no_license | ding-lab/ccRCC_snRNA_analysis | R | false | false | 3,956 | r | # Yige Wu @ WashU 2021 Jun
## annotate sample copy number profile (3p, 5q, 14q)
## CNV: https://wustl.box.com/s/vlde6w791k81q1aibvgs0a487zxleij6
# Purity and ploidy: https://wustl.box.com/s/jel5krgvnvlq5z32vdg4itdcq6znuqzn
# From UMich
# set up libraries and output directory -----------------------------------
## set working directory
dir_base = "~/Box/Ding_Lab/Projects_Current/RCC/ccRCC_snRNA/"
setwd(dir_base)
source("./ccRCC_snRNA_analysis/load_pkgs.R")
source("./ccRCC_snRNA_analysis/functions.R")
source("./ccRCC_snRNA_analysis/variables.R")
## set run id
version_tmp <- 1
run_id <- paste0(format(Sys.Date(), "%Y%m%d") , ".v", version_tmp)
## set output directory
dir_out <- paste0(makeOutDir(), run_id, "/")
dir.create(dir_out)
# input dependencies ------------------------------------------------------
## input CNA matrix
cna_df <- fread(data.table = F, input = "~/Box/CPTAC_ccRCC/Data_Freeze_1.1/CPTAC_ccRCC_Combined/Absolute_cnv/c3-ccrcc-combined-cnvex-lr_v1.0.csv")
## input snRNA sample set
metadata_df <- fread("./Resources/Analysis_Results/sample_info/make_meta_data/20210423.v1/meta_data.20210423.v1.tsv", data.table = F)
## input peaks
peaks_df <- fread(data.table = F, input = "./Resources/snATAC_Processed_Data/Differential_Peaks/BAP1_Specific/BAP1_comparison_Filtered_peaks_byMinPct_MinPctDiff.tsv")
peak2gene_df <- fread(data.table = F, input = "./Resources/snATAC_Processed_Data/Peak_Annotation/All_peaks_annotated_26snATAC_merged_obj.20210607.tsv")
## input the barcode info
barcodes_df <- fread(data.table = F, input = "./Resources/snATAC_Processed_Data/Barcode_Annotation/Cell_type_annotation_snATAC.20210604.tsv")
# preprocess ----------------------------
## get barcodes to process
barcodes_df <- barcodes_df %>%
mutate(id_bc = paste0(Sample, "_", Barcode))
barcodes_df$Case <- mapvalues(x = barcodes_df$Sample, from = metadata_df$Aliquot.snRNA, to = as.vector(metadata_df$Case))
barcodes_df$Sample_type <- mapvalues(x = barcodes_df$Sample, from = metadata_df$Aliquot.snRNA, to = as.vector(metadata_df$Sample_Type))
# table(barcodes_df$Cell_type)
# barcodes_df %>%
# filter(Sample_type == "Tumor" & Cell_type == "PT")
barcodes_process <- barcodes_df$id_bc[barcodes_df$Cell_type %in% c("Tumor", "PT")]
cases_process <- barcodes_df$Case[barcodes_df$Cell_type %in% c("Tumor", "PT")]
celltypes_process <- barcodes_df$Cell_type[barcodes_df$Cell_type %in% c("Tumor", "PT")]
## get genes to process
peaks_filtered_df <- peaks_df %>%
mutate(pct.diff = abs(pct.1 - pct.2)) %>%
filter(pct.diff >= 0.02)
peak2gene_filtered_df <- peak2gene_df %>%
filter(peak %in% peaks_filtered_df$peak)
genes_process <- unique(peak2gene_filtered_df$SYMBOL)
genes_process <- genes_process[genes_process %in% cna_df$gene_name]
# preprocess mean CNV values per gene--------------------------------------------------------------
## filter the CNVs
cna_filtered_df <- cna_df[cna_df$gene_name %in% genes_process,]
## preprocess the CNV data frame
colnames_old <- colnames(cna_filtered_df)
colnames_new <- str_split_fixed(string = colnames_old, pattern = "\\.", n = 4)[,1]
colnames(cna_filtered_df) <- colnames_new
cna_filtered_df <- cna_filtered_df[!duplicated(cna_filtered_df$gene_name),]
## add peak
cna_bypeak_df <- merge(x = cna_filtered_df,
y = peak2gene_filtered_df %>%
select(peak, SYMBOL),
by.x = c("gene_name"), by.y = c("SYMBOL"), all.x = T)
cna_bybc_bypeak_df <- cna_bypeak_df[, cases_process]
cna_bybc_bypeak_df[, celltypes_process == "PT"] <- 0
rownames(cna_bybc_bypeak_df) <- cna_bypeak_df$peak
colnames(cna_bybc_bypeak_df) <- barcodes_process
cna_bybc_bypeak_df[1:5, 1:5]
cna_t_mat <- t(as.matrix(cna_bybc_bypeak_df))
cna_t_mat[1:5, 1:5]
# write table -------------------------------------------------------------
file2write <- paste0(dir_out, "Barcode2BAP1PrefilteredPeak.CNV.", run_id, ".RDS")
saveRDS(object = cna_t_mat, file = file2write, compress = T)
|
library(tidyverse)
library(scales)
library(ggthemes)
df <- tidytuesdayR::tt_load('2021-02-09')
retire <- df$retirement
retire %>%
ggplot(aes(x = year, y = retirement, group = race, colour = race)) +
geom_smooth(size = 1.5, se = FALSE) +
theme_fivethirtyeight() +
scale_x_continuous(breaks = c(1990, 1995, 2000, 2005, 2010, 2015)) +
scale_y_continuous(labels = dollar_format(1)) +
theme(legend.title = element_blank()) +
labs(title = "Average family liquid retirement savings in the United States, 1989-2016",
subtitle = "Dollar amounts on Y-axis normalized to 2016 dollars",
caption = "Data source: Urban Institute/TidyTuesday")
ggsave("figures/2021-02-09.png", width = 17, height = 8)
| /code/2021-02-09.R | no_license | alexlusco/tidy-tuesday | R | false | false | 724 | r | library(tidyverse)
library(scales)
library(ggthemes)
df <- tidytuesdayR::tt_load('2021-02-09')
retire <- df$retirement
retire %>%
ggplot(aes(x = year, y = retirement, group = race, colour = race)) +
geom_smooth(size = 1.5, se = FALSE) +
theme_fivethirtyeight() +
scale_x_continuous(breaks = c(1990, 1995, 2000, 2005, 2010, 2015)) +
scale_y_continuous(labels = dollar_format(1)) +
theme(legend.title = element_blank()) +
labs(title = "Average family liquid retirement savings in the United States, 1989-2016",
subtitle = "Dollar amounts on Y-axis normalized to 2016 dollars",
caption = "Data source: Urban Institute/TidyTuesday")
ggsave("figures/2021-02-09.png", width = 17, height = 8)
|
#By listing the first six prime numbers: 2, 3, 5, 7, 11, and 13, we can see that the 6th prime is 13.
#What is the 10 001st prime number?
timestart <- Sys.time()
isprimenumber <- function(x){
if(x == 1){
return(FALSE)
}else{
y <- as.integer(sqrt(x)) + 1
for(i in 2:y){
if(x %% i == 0 & x != 2){
return(FALSE)
}
}
return(TRUE)
}
}
#method one
timestart <- Sys.time()
nst_prime_number <- function(n){
i = 0
x = 0
while(i<n){
x = x + 1
if(isprimenumber(x)){
i <- i + 1
}
}
return(c('i'= i, 'x' = x))
}
nst_prime_number(10001)
timestop <- Sys.time()
timecost<- timestop - timestart
timecost
#method two
timestart <- Sys.time()
nst_prime_number <- function(prime, num){
i <- 7
gab <- 2
while(n > 3){
flag <- TRUE
for(x in prime){
if(x * x > i){
break
}
if(i %% x == 0){
flag <- FALSE
break
}
}
if(flag){
l <- length(prime) +1
prime[[toString(l)]] <- i
if(l > num){
break
}
}
gab <- 6-gab
i <- i + gab
}
return(prime[[num]])
}
nst_prime_number(list('1'= 2, '2' = 3, '3' = 5), 10001)
timestop <- Sys.time()
timecost<- timestop - timestart
timecost
#method three
| /Project Euler/007-nst_prime_number.R | no_license | QiliWu/leetcode-and-Project-Euler | R | false | false | 1,259 | r | #By listing the first six prime numbers: 2, 3, 5, 7, 11, and 13, we can see that the 6th prime is 13.
#What is the 10 001st prime number?
timestart <- Sys.time()
isprimenumber <- function(x){
if(x == 1){
return(FALSE)
}else{
y <- as.integer(sqrt(x)) + 1
for(i in 2:y){
if(x %% i == 0 & x != 2){
return(FALSE)
}
}
return(TRUE)
}
}
#method one
timestart <- Sys.time()
nst_prime_number <- function(n){
i = 0
x = 0
while(i<n){
x = x + 1
if(isprimenumber(x)){
i <- i + 1
}
}
return(c('i'= i, 'x' = x))
}
nst_prime_number(10001)
timestop <- Sys.time()
timecost<- timestop - timestart
timecost
#method two
timestart <- Sys.time()
nst_prime_number <- function(prime, num){
i <- 7
gab <- 2
while(n > 3){
flag <- TRUE
for(x in prime){
if(x * x > i){
break
}
if(i %% x == 0){
flag <- FALSE
break
}
}
if(flag){
l <- length(prime) +1
prime[[toString(l)]] <- i
if(l > num){
break
}
}
gab <- 6-gab
i <- i + gab
}
return(prime[[num]])
}
nst_prime_number(list('1'= 2, '2' = 3, '3' = 5), 10001)
timestop <- Sys.time()
timecost<- timestop - timestart
timecost
#method three
|
# R code to illustrate K-nearest neighbors (KNN) classifier and the Bayes optimal classifier.
# This code accompanies Lecture 3: Measuring performance.
#
# Author: Jeffrey W. Miller
# Date: Feb 3, 2019
# ____________________________________________________________________________________________
# KNN classifier algorithm (for univariate x's and binary y's) -- probability version
# x0 = new point at which to predict the y value
# x = (x_1,...,x_n) = vector of training x's, where x[i] is real-valued
# y = (y_1,...,y_n) = vector of training y's, where y[i] is 0 or 1
# K = number of neighbors to use
# p1_hat = estimated probability of y0=1 given x0
# Note: We can transform p1_hat to a prediction of the y value at x0 by thresholding p1_hat.
KNN_classifier = function(x0, x, y, K) {
distances = abs(x - x0) # Euclidean distance between x0 and each x_i
o = order(distances) # order of the training points by distance from x0 (nearest to farthest)
p1_hat = mean(y[o[1:K]]) # proportion of y values of the K nearest training points that are equal to 1
return(p1_hat) # return estimated probability of y0=1
}
# ____________________________________________________________________________________________
# Demonstrate KNN classifier
# Simulate a dataset with univariate x's
set.seed(1) # set random number generator
n = 20 # number of samples
x = 5*runif(n) # simulate training x's uniformly on the interval [0,5]
p1 = function(x) { exp(2*cos(x))/(1 + exp(2*cos(x))) } # p1(x) = true probability of y=1 given x (true relationship between x and y)
y = rbinom(n,1,p1(x)) # simulate training y's as Bernoulli r.v.s with probabilities p1(x)
plot(x,y,col=2,pch=20,cex=2) # plot training data
x_grid = seq(from=0, to=5, by=0.01) # grid of x values at which to plot true and predicted y values
lines(x_grid,p1(x_grid)) # plot true p1(x) values for the grid
# Run KNN to predict y at each point on the grid of x values
K = 1 # number of neighbors to use
p1_grid_hat = sapply(x_grid, function(x0) { KNN_classifier(x0, x, y, K) }) # run KNN classifier at each x in the grid
y_grid_hat = round(p1_grid_hat > 0.5) # predict the y values for each x in the grid by thresholding the estimated probabilities
plot(x,y,col=2,pch=20,cex=2) # plot training data
title(paste("K =",K))
lines(x_grid,p1(x_grid)) # plot true p1(x) values for the grid
lines(x_grid,p1_grid_hat,col=4) # plot the estimated probabilities of y=1 for each x0 in the grid
lines(x_grid,y_grid_hat,col=3) # plot the predicted y values for each x0 in the grid
# ____________________________________________________________________________________________
# Error rates and the Bayes optimal classifier
# Training error rate
p1_hat = sapply(x, function(x0) { KNN_classifier(x0, x, y, K) }) # run KNN classifier (probability version) at each x in the training set
y_hat = round(p1_hat > 0.5) # predict the y values for each x in the training set (prediction version of KNN)
train_error = mean(y_hat != y) # compute the training error rate
print(paste0("Training error rate (K = ",K,"): ",train_error))
# Test error rate
n_test = 10000 # large number of samples to simulate as a test set
x_test = 5*runif(n_test) # simulate test x's
y_test = rbinom(n_test,1,p1(x_test)) # simulate test y's
p1_test_hat = sapply(x_test, function(x0) { KNN_classifier(x0, x, y, K) }) # run KNN classifier (probability version) at each x in the test set
y_test_hat = round(p1_test_hat > 0.5) # predict the y values for each x in the test set (prediction version of KNN)
test_error = mean(y_test_hat != y_test) # compute the test error rate
print(paste0("Test error rate (K = ",K,"): ",test_error))
# How can we tell if this is a good test error rate? Since this is a simulation, we can compare with the best possible test error rate...
# Bayes optimal classifier
y_hat_optimal = round(p1(x) > 0.5) # use the true p1(x) to make the best possible predictions on the training set
train_error_optimal = mean(y_hat_optimal != y) # compute the training error rate for the Bayes optimal classifier
print(paste0("Training error rate (Optimal): ",train_error_optimal))
y_test_hat_optimal = round(p1(x_test) > 0.5) # use the true p1(x) to make the best possible predictions on the test set
test_error_optimal = mean(y_test_hat_optimal != y_test) # compute the test error rate for the Bayes optimal classifier
print(paste0("Test error rate (Optimal): ",test_error_optimal))
# ____________________________________________________________________________________________
# Bias-variance tradeoff
K = 1 # number of neighbors to use
n_datasets = 50 # number of data sets to simulate, to approximate expectations over the Y's (with fixed x's)
for (i in 1:n_datasets) {
y = rbinom(n,1,p1(x)) # simulate training y's
plot(x,y,col=2,pch=20,cex=2,ylim=c(0,1)) # plot training data
lines(x_grid,p1(x_grid)) # plot true p1(x) values for the grid
p1_grid_hat = sapply(x_grid, function(x0) { KNN_classifier(x0, x, y, K) }) # run KNN classifier at each x in the grid
# y_grid_hat = round(p1_grid_hat > 0.5) # predict the y values for each x
lines(x_grid,p1_grid_hat,col=4) # plot predicted p1(x) values for the grid
# lines(x_grid,y_grid_hat,col=3) # plot the predicted y values for each x0 in the grid
Sys.sleep(0.1) # pause in order to display the plot
# readline() # Wait for <Enter> to continue
}
| /knn-classifier.r | no_license | Celaeno1017/Statistical-Learning | R | false | false | 5,525 | r | # R code to illustrate K-nearest neighbors (KNN) classifier and the Bayes optimal classifier.
# This code accompanies Lecture 3: Measuring performance.
#
# Author: Jeffrey W. Miller
# Date: Feb 3, 2019
# ____________________________________________________________________________________________
# KNN classifier algorithm (for univariate x's and binary y's) -- probability version
# x0 = new point at which to predict the y value
# x = (x_1,...,x_n) = vector of training x's, where x[i] is real-valued
# y = (y_1,...,y_n) = vector of training y's, where y[i] is 0 or 1
# K = number of neighbors to use
# p1_hat = estimated probability of y0=1 given x0
# Note: We can transform p1_hat to a prediction of the y value at x0 by thresholding p1_hat.
KNN_classifier = function(x0, x, y, K) {
distances = abs(x - x0) # Euclidean distance between x0 and each x_i
o = order(distances) # order of the training points by distance from x0 (nearest to farthest)
p1_hat = mean(y[o[1:K]]) # proportion of y values of the K nearest training points that are equal to 1
return(p1_hat) # return estimated probability of y0=1
}
# ____________________________________________________________________________________________
# Demonstrate KNN classifier
# Simulate a dataset with univariate x's
set.seed(1) # set random number generator
n = 20 # number of samples
x = 5*runif(n) # simulate training x's uniformly on the interval [0,5]
p1 = function(x) { exp(2*cos(x))/(1 + exp(2*cos(x))) } # p1(x) = true probability of y=1 given x (true relationship between x and y)
y = rbinom(n,1,p1(x)) # simulate training y's as Bernoulli r.v.s with probabilities p1(x)
plot(x,y,col=2,pch=20,cex=2) # plot training data
x_grid = seq(from=0, to=5, by=0.01) # grid of x values at which to plot true and predicted y values
lines(x_grid,p1(x_grid)) # plot true p1(x) values for the grid
# Run KNN to predict y at each point on the grid of x values
K = 1 # number of neighbors to use
p1_grid_hat = sapply(x_grid, function(x0) { KNN_classifier(x0, x, y, K) }) # run KNN classifier at each x in the grid
y_grid_hat = round(p1_grid_hat > 0.5) # predict the y values for each x in the grid by thresholding the estimated probabilities
plot(x,y,col=2,pch=20,cex=2) # plot training data
title(paste("K =",K))
lines(x_grid,p1(x_grid)) # plot true p1(x) values for the grid
lines(x_grid,p1_grid_hat,col=4) # plot the estimated probabilities of y=1 for each x0 in the grid
lines(x_grid,y_grid_hat,col=3) # plot the predicted y values for each x0 in the grid
# ____________________________________________________________________________________________
# Error rates and the Bayes optimal classifier
# Training error rate
p1_hat = sapply(x, function(x0) { KNN_classifier(x0, x, y, K) }) # run KNN classifier (probability version) at each x in the training set
y_hat = round(p1_hat > 0.5) # predict the y values for each x in the training set (prediction version of KNN)
train_error = mean(y_hat != y) # compute the training error rate
print(paste0("Training error rate (K = ",K,"): ",train_error))
# Test error rate
n_test = 10000 # large number of samples to simulate as a test set
x_test = 5*runif(n_test) # simulate test x's
y_test = rbinom(n_test,1,p1(x_test)) # simulate test y's
p1_test_hat = sapply(x_test, function(x0) { KNN_classifier(x0, x, y, K) }) # run KNN classifier (probability version) at each x in the test set
y_test_hat = round(p1_test_hat > 0.5) # predict the y values for each x in the test set (prediction version of KNN)
test_error = mean(y_test_hat != y_test) # compute the test error rate
print(paste0("Test error rate (K = ",K,"): ",test_error))
# How can we tell if this is a good test error rate? Since this is a simulation, we can compare with the best possible test error rate...
# Bayes optimal classifier
y_hat_optimal = round(p1(x) > 0.5) # use the true p1(x) to make the best possible predictions on the training set
train_error_optimal = mean(y_hat_optimal != y) # compute the training error rate for the Bayes optimal classifier
print(paste0("Training error rate (Optimal): ",train_error_optimal))
y_test_hat_optimal = round(p1(x_test) > 0.5) # use the true p1(x) to make the best possible predictions on the test set
test_error_optimal = mean(y_test_hat_optimal != y_test) # compute the test error rate for the Bayes optimal classifier
print(paste0("Test error rate (Optimal): ",test_error_optimal))
# ____________________________________________________________________________________________
# Bias-variance tradeoff
K = 1 # number of neighbors to use
n_datasets = 50 # number of data sets to simulate, to approximate expectations over the Y's (with fixed x's)
for (i in 1:n_datasets) {
y = rbinom(n,1,p1(x)) # simulate training y's
plot(x,y,col=2,pch=20,cex=2,ylim=c(0,1)) # plot training data
lines(x_grid,p1(x_grid)) # plot true p1(x) values for the grid
p1_grid_hat = sapply(x_grid, function(x0) { KNN_classifier(x0, x, y, K) }) # run KNN classifier at each x in the grid
# y_grid_hat = round(p1_grid_hat > 0.5) # predict the y values for each x
lines(x_grid,p1_grid_hat,col=4) # plot predicted p1(x) values for the grid
# lines(x_grid,y_grid_hat,col=3) # plot the predicted y values for each x0 in the grid
Sys.sleep(0.1) # pause in order to display the plot
# readline() # Wait for <Enter> to continue
}
|
#' Summarize tests of directed separation using Fisher's C statistic
#'
#' @param dTable a \code{data.frame} containing tests of directed separation from \code{dSep}
#' @param add.claims an optional vector of additional independence claims (i.e., P-values)
#' to be added to the basis set
#' @param basis.set An optional list of independence claims.
#' @param direction a vector of claims defining the specific directionality of any independence
#' claim(s)
#' @param conserve whether the most conservative P-value should be returned.
#' Default is FALSE
#' @param conditional whether the conditioning variables should be shown in the table.
#' Default is FALSE
#' @param .progressBar an optional progress bar. Default is FALSE
#'
#' @return a vector corresponding to the C statistic, d.f., and P-value
#'
#' @export
#'
fisherC <- function(dTable, add.claims = NULL, basis.set = NULL, direction = NULL, conserve = FALSE, conditional = FALSE, .progressBar = FALSE) {
if(class(dTable) == "list") dTable <- as.psem(dTable)
if(class(dTable) == "psem") dTable <- dSep(dTable, basis.set, direction, conserve, conditional, .progressBar)
if(length(dTable) == 0) {
Cstat <- 0
DF <- 0
P <- 1
} else {
ps <- dTable$P.Value
if(!is.null(add.claims)) {
ps <- c(ps, add.claims)
message("Fisher's C has been adjusted to include additional claims not shown in the tests of directed separation.")
}
if(any(ps == 0)) ps <- ps + 1e-20
Cstat <- -2 * sum(log(ps))
DF <- 2 * length(ps)
P <- 1 - pchisq(Cstat, DF)
}
ret <- data.frame(Fisher.C = Cstat, df = DF, P.Value = P)
ret[, which(sapply(ret, is.numeric))] <- round(ret[, which(sapply(ret, is.numeric))], 3)
return(ret)
}
| /R/fisherC.R | no_license | ecustwy/piecewiseSEM | R | false | false | 1,813 | r | #' Summarize tests of directed separation using Fisher's C statistic
#'
#' @param dTable a \code{data.frame} containing tests of directed separation from \code{dSep}
#' @param add.claims an optional vector of additional independence claims (i.e., P-values)
#' to be added to the basis set
#' @param basis.set An optional list of independence claims.
#' @param direction a vector of claims defining the specific directionality of any independence
#' claim(s)
#' @param conserve whether the most conservative P-value should be returned.
#' Default is FALSE
#' @param conditional whether the conditioning variables should be shown in the table.
#' Default is FALSE
#' @param .progressBar an optional progress bar. Default is FALSE
#'
#' @return a vector corresponding to the C statistic, d.f., and P-value
#'
#' @export
#'
fisherC <- function(dTable, add.claims = NULL, basis.set = NULL, direction = NULL, conserve = FALSE, conditional = FALSE, .progressBar = FALSE) {
if(class(dTable) == "list") dTable <- as.psem(dTable)
if(class(dTable) == "psem") dTable <- dSep(dTable, basis.set, direction, conserve, conditional, .progressBar)
if(length(dTable) == 0) {
Cstat <- 0
DF <- 0
P <- 1
} else {
ps <- dTable$P.Value
if(!is.null(add.claims)) {
ps <- c(ps, add.claims)
message("Fisher's C has been adjusted to include additional claims not shown in the tests of directed separation.")
}
if(any(ps == 0)) ps <- ps + 1e-20
Cstat <- -2 * sum(log(ps))
DF <- 2 * length(ps)
P <- 1 - pchisq(Cstat, DF)
}
ret <- data.frame(Fisher.C = Cstat, df = DF, P.Value = P)
ret[, which(sapply(ret, is.numeric))] <- round(ret[, which(sapply(ret, is.numeric))], 3)
return(ret)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/bic_util.R
\name{bic.write.dat}
\alias{bic.write.dat}
\title{Write file containing tab delimited data}
\usage{
bic.write.dat(dat, file.name, add.rownames = F, col.names = T, quote = F,
sep = "\\t")
}
\arguments{
\item{dat}{matrix containing data to be writtedn}
\item{file.name}{file name}
\item{add.rownames}{include row names in file; Default: FALSE}
\item{col.names}{include column names in file; Default: TRUE}
\item{quote}{include quotes around each matrix cell; Default: FALSE}
\item{sep}{delimiter; Default: "\\t"}
}
\description{
Wrapper around \code{write.table()} with several defaults set.
}
| /man/bic.write.dat.Rd | no_license | caitlinjones/bicrnaseq | R | false | true | 688 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/bic_util.R
\name{bic.write.dat}
\alias{bic.write.dat}
\title{Write file containing tab delimited data}
\usage{
bic.write.dat(dat, file.name, add.rownames = F, col.names = T, quote = F,
sep = "\\t")
}
\arguments{
\item{dat}{matrix containing data to be writtedn}
\item{file.name}{file name}
\item{add.rownames}{include row names in file; Default: FALSE}
\item{col.names}{include column names in file; Default: TRUE}
\item{quote}{include quotes around each matrix cell; Default: FALSE}
\item{sep}{delimiter; Default: "\\t"}
}
\description{
Wrapper around \code{write.table()} with several defaults set.
}
|
## DATA PREP TITLE
## Data Source
# Source Name
# Link:
# Downloaded:
# Timeseries:
# Format:
# Notes here
## Summary
library(tidyverse)
library(plotly)
library(viridis)
library(validate)
## Read in Data
## Tidy: Select subset, clean up data tables, fix strings, fix class
## Wrangle: Join, combine,
## Summarize
## Plotting
| /dataprep/dataprep-template.R | no_license | iwensu0313/us-aquaculture | R | false | false | 345 | r | ## DATA PREP TITLE
## Data Source
# Source Name
# Link:
# Downloaded:
# Timeseries:
# Format:
# Notes here
## Summary
library(tidyverse)
library(plotly)
library(viridis)
library(validate)
## Read in Data
## Tidy: Select subset, clean up data tables, fix strings, fix class
## Wrangle: Join, combine,
## Summarize
## Plotting
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/initialize_module.R
\name{initialize_module}
\alias{initialize_module}
\title{Title}
\usage{
initialize_module(x, param, init, control, s)
}
\arguments{
\item{x}{A number.}
\item{y}{A number.}
}
\value{
return value here.
}
\description{
Description
}
\details{
Additional details here
}
\examples{
example function call here
}
| /man/initialize_module.Rd | no_license | EvoNetHIV/RoleSPVL | R | false | true | 407 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/initialize_module.R
\name{initialize_module}
\alias{initialize_module}
\title{Title}
\usage{
initialize_module(x, param, init, control, s)
}
\arguments{
\item{x}{A number.}
\item{y}{A number.}
}
\value{
return value here.
}
\description{
Description
}
\details{
Additional details here
}
\examples{
example function call here
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/zone_utility_functions.R
\name{powerset_zones}
\alias{powerset_zones}
\title{Creates a set of all non-empty subsets of the integers from 1 to \eqn{n}.}
\usage{
powerset_zones(n)
}
\arguments{
\item{n}{An integer larger than 0.}
}
\value{
A list of integer vectors.
}
\description{
Creates a list of all \eqn{2^(n-1)} non-empty subsets of the integers from 1
to \eqn{n}.
}
\keyword{internal}
| /man/powerset_zones.Rd | no_license | rfsaldanha/scanstatistics | R | false | true | 471 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/zone_utility_functions.R
\name{powerset_zones}
\alias{powerset_zones}
\title{Creates a set of all non-empty subsets of the integers from 1 to \eqn{n}.}
\usage{
powerset_zones(n)
}
\arguments{
\item{n}{An integer larger than 0.}
}
\value{
A list of integer vectors.
}
\description{
Creates a list of all \eqn{2^(n-1)} non-empty subsets of the integers from 1
to \eqn{n}.
}
\keyword{internal}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/LongituRF.R
\name{sig.fbm}
\alias{sig.fbm}
\title{Title}
\usage{
sig.fbm(Y, sigma, id, Z, epsilon, Btilde, time, sigma2, h)
}
\arguments{
\item{h}{}
}
\description{
Title
}
\keyword{internal}
| /man/sig.fbm.Rd | no_license | mseyna/LongituRF | R | false | true | 270 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/LongituRF.R
\name{sig.fbm}
\alias{sig.fbm}
\title{Title}
\usage{
sig.fbm(Y, sigma, id, Z, epsilon, Btilde, time, sigma2, h)
}
\arguments{
\item{h}{}
}
\description{
Title
}
\keyword{internal}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/datasets.R
\docType{data}
\name{mn_iceout}
\alias{mn_iceout}
\title{Minnesota Department of Natural Resources State Ice-out Data}
\format{A data frame with 19,261 rows and 10 variables}
\usage{
data(mn_iceout)
}
\description{
This dataset contains the Minnesota state-level historical data
generated via the dataset builders. The dataset builder source can be
found at the \code{URL} in the package \code{DESCRIPTION}.
\itemize{
\item \code{state} <chr> (always Minnesota)
\item \code{body_name} <chr> (inland body of water name)
\item \code{date} <Date> (ice-out date)
\item \code{year} <int> (ice-out year)
\item \code{doy} <int> (ice-out day of year)
\item \code{lat}/\code{lon} <dbl> (coordinates)
\item \code{id} <chr> (MDNR resource id)
\item \code{comments} <chr> (field notes)
\item \code{source} <chr> (recording person/institution)
}
}
\note{
Last updated 2019-01-15.
}
\references{
\url{https://www.dnr.state.mn.us/ice_out/index.html?year=1843}
}
\keyword{datasets}
| /man/mn_iceout.Rd | permissive | BigelowLab/iceout | R | false | true | 1,055 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/datasets.R
\docType{data}
\name{mn_iceout}
\alias{mn_iceout}
\title{Minnesota Department of Natural Resources State Ice-out Data}
\format{A data frame with 19,261 rows and 10 variables}
\usage{
data(mn_iceout)
}
\description{
This dataset contains the Minnesota state-level historical data
generated via the dataset builders. The dataset builder source can be
found at the \code{URL} in the package \code{DESCRIPTION}.
\itemize{
\item \code{state} <chr> (always Minnesota)
\item \code{body_name} <chr> (inland body of water name)
\item \code{date} <Date> (ice-out date)
\item \code{year} <int> (ice-out year)
\item \code{doy} <int> (ice-out day of year)
\item \code{lat}/\code{lon} <dbl> (coordinates)
\item \code{id} <chr> (MDNR resource id)
\item \code{comments} <chr> (field notes)
\item \code{source} <chr> (recording person/institution)
}
}
\note{
Last updated 2019-01-15.
}
\references{
\url{https://www.dnr.state.mn.us/ice_out/index.html?year=1843}
}
\keyword{datasets}
|
dataPrep <- function(lData) {
n <- length(lData[[1]])
retList <- list()
tmp <- rep(list(NA), 5)
names(tmp) <- c("title", "subtitle", "ranges", "measures", "markers")
retList[[1]] <- tmp
for (i in 1:n) {
tmp[[1]] <- lData$title[[i]]
tmp[[2]] <- lData$subtitle[[i]]
tmp[[3]] <- lData$range[[i]]
tmp[[4]] <- lData$measures[[i]]
tmp[[5]] <- lData$markers[[i]]
retList[[i+1]] <- tmp
}
return(retList)
}
| /R/utils.R | no_license | smartinsightsfromdata/d3Dashboard | R | false | false | 440 | r | dataPrep <- function(lData) {
n <- length(lData[[1]])
retList <- list()
tmp <- rep(list(NA), 5)
names(tmp) <- c("title", "subtitle", "ranges", "measures", "markers")
retList[[1]] <- tmp
for (i in 1:n) {
tmp[[1]] <- lData$title[[i]]
tmp[[2]] <- lData$subtitle[[i]]
tmp[[3]] <- lData$range[[i]]
tmp[[4]] <- lData$measures[[i]]
tmp[[5]] <- lData$markers[[i]]
retList[[i+1]] <- tmp
}
return(retList)
}
|
#######
#Author: Nashipae Waweru
#Date: 28/MARCH/2020
#Title: Objects
#######
# Before we continue, make sure you know what objects you have in your environment. Use a function to get a list of the object in your environment ????
#Solution
ls()
# Create a variable called lvl that contains the following values: 8, 10, 10, 1, 10, 10, 8, 12, 1, 12.
#Solution
lvl <- c(8, 10, 10, 1, 10, 10, 8, 12, 1, 12)
# Use the commands listed below to do the following operations:
#Find the sum of the elements in lvl
sum(lvl)
#Find the average of the elements in lvl
mean(lvl)
#Find the median value (the middle value) of the elements in lvl
median(lvl)
#Get R to return the length of the lvl variable
length(lvl)
?length()
#Find the standard deviation of the values in lvl
sd(lvl)
#Find the standard deviation of the elements in lvl, then round it; do it in a single command
round(sd(lvl))
#Find the standard deviation of the elements in lvl, then round it, then specifically ask R to print it; do it in a single command
print(round(sd(lvl)))
#Functions to use: sum(), mean(), median(), length(), sd(), round(), print()
# Tip: when typing code in the console and you want to run different commands on the same object, use the Up Arrow to access the last command you executed, and simply edit the function call, keeping the rest the same. | /Day 12/Exercise 3.R | no_license | Nashie-R/100DaysOfCodingR. | R | false | false | 1,386 | r | #######
#Author: Nashipae Waweru
#Date: 28/MARCH/2020
#Title: Objects
#######
# Before we continue, make sure you know what objects you have in your environment. Use a function to get a list of the object in your environment ????
#Solution
ls()
# Create a variable called lvl that contains the following values: 8, 10, 10, 1, 10, 10, 8, 12, 1, 12.
#Solution
lvl <- c(8, 10, 10, 1, 10, 10, 8, 12, 1, 12)
# Use the commands listed below to do the following operations:
#Find the sum of the elements in lvl
sum(lvl)
#Find the average of the elements in lvl
mean(lvl)
#Find the median value (the middle value) of the elements in lvl
median(lvl)
#Get R to return the length of the lvl variable
length(lvl)
?length()
#Find the standard deviation of the values in lvl
sd(lvl)
#Find the standard deviation of the elements in lvl, then round it; do it in a single command
round(sd(lvl))
#Find the standard deviation of the elements in lvl, then round it, then specifically ask R to print it; do it in a single command
print(round(sd(lvl)))
#Functions to use: sum(), mean(), median(), length(), sd(), round(), print()
# Tip: when typing code in the console and you want to run different commands on the same object, use the Up Arrow to access the last command you executed, and simply edit the function call, keeping the rest the same. |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/MixedGraph.R
\name{tianComponent}
\alias{tianComponent}
\alias{tianComponent.MixedGraph}
\title{Returns the Tian c-component of a node}
\usage{
tianComponent(this, node)
\method{tianComponent}{MixedGraph}(this, node)
}
\arguments{
\item{this}{the mixed graph object}
\item{node}{the node for which to return its c-component}
}
\description{
Returns the Tian c-component of a node
}
| /man/tianComponent.Rd | no_license | Lucaweihs/SEMID | R | false | true | 462 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/MixedGraph.R
\name{tianComponent}
\alias{tianComponent}
\alias{tianComponent.MixedGraph}
\title{Returns the Tian c-component of a node}
\usage{
tianComponent(this, node)
\method{tianComponent}{MixedGraph}(this, node)
}
\arguments{
\item{this}{the mixed graph object}
\item{node}{the node for which to return its c-component}
}
\description{
Returns the Tian c-component of a node
}
|
## R accepts a variety of compressed file formats for ASCII files:
#> scan.txt.Renishaw ("txt.Renishaw/chondro.gz")
#> scan.txt.Renishaw ("txt.Renishaw/chondro.xz")
#> scan.txt.Renishaw ("txt.Renishaw/chondro.lzma")
#> scan.txt.Renishaw ("txt.Renishaw/chondro.gz")
#> scan.txt.Renishaw ("txt.Renishaw/chondro.bz2")
##Very large files can be read in chunks to save memory:
#> scan.txt.Renishaw ("txt.Renishaw/chondro.txt", nlines = 1e5, nspc = 875)
#Instead of a file name, scan.txt.Renishaw accepts also a connection.
#> paracetamol <- scan.txt.Renishaw ("txt.Renishaw/paracetamol.txt", "spc")
##Horiba's LabSpec
#> spc <- read.txt.Horiba ("txt.HoribaJobinYvon/ts.txt",
#+ cols = list (t = "t / s", spc = "I / a.u.",
#+ .wavelength = expression (Delta * tilde (nu) / cm^-1))
#+ )
#> spc
##Witek
#> scan.dat.Witec ("txt.Witec/WitecExample-x.dat", points.per.line = 10, lines.per.image = 10)
##Import of matlab files by Cytospec
#> read.cytomat ("mat.cytospec/cytospec.mat", blocks = TRUE)
##Bruker's ENVI files
#> spc <- read.ENVI ("ENVI/example2.img")
##Thermo Galactic’s .spc file format can be imported by read.spc.
##Instead of a file name, scan.txt.Renishaw accepts also a connection.
#> paracetamol <- scan.txt.Renishaw ("txt.Renishaw/paracetamol.txt", "spc")
#> paracetamol
| /DATA/Spectroscopy/formats.r | no_license | Aurametrix/R | R | false | false | 1,292 | r | ## R accepts a variety of compressed file formats for ASCII files:
#> scan.txt.Renishaw ("txt.Renishaw/chondro.gz")
#> scan.txt.Renishaw ("txt.Renishaw/chondro.xz")
#> scan.txt.Renishaw ("txt.Renishaw/chondro.lzma")
#> scan.txt.Renishaw ("txt.Renishaw/chondro.gz")
#> scan.txt.Renishaw ("txt.Renishaw/chondro.bz2")
##Very large files can be read in chunks to save memory:
#> scan.txt.Renishaw ("txt.Renishaw/chondro.txt", nlines = 1e5, nspc = 875)
#Instead of a file name, scan.txt.Renishaw accepts also a connection.
#> paracetamol <- scan.txt.Renishaw ("txt.Renishaw/paracetamol.txt", "spc")
##Horiba's LabSpec
#> spc <- read.txt.Horiba ("txt.HoribaJobinYvon/ts.txt",
#+ cols = list (t = "t / s", spc = "I / a.u.",
#+ .wavelength = expression (Delta * tilde (nu) / cm^-1))
#+ )
#> spc
##Witek
#> scan.dat.Witec ("txt.Witec/WitecExample-x.dat", points.per.line = 10, lines.per.image = 10)
##Import of matlab files by Cytospec
#> read.cytomat ("mat.cytospec/cytospec.mat", blocks = TRUE)
##Bruker's ENVI files
#> spc <- read.ENVI ("ENVI/example2.img")
##Thermo Galactic’s .spc file format can be imported by read.spc.
##Instead of a file name, scan.txt.Renishaw accepts also a connection.
#> paracetamol <- scan.txt.Renishaw ("txt.Renishaw/paracetamol.txt", "spc")
#> paracetamol
|
library(R.utils)
library(EIEvent)
appStem <- cmdArg("app",NULL)
if (FALSE) {
appStem <- "SummerCamp"
}
source("/usr/local/share/Proc4/EIini.R")
EI.config <- fromJSON(file.path(config.dir,"config.json"),FALSE)
app <- as.character(Proc4.config$apps[appStem])
if (length(app)==0L || any(app=="NULL")) {
stop("Could not find apps for ",appStem)
}
if (!(appStem %in% EI.config$appStem)) {
stop("Configuration not set for app ",appStem)
}
logfile <- (file.path(logpath, sub("<app>",appStem,EI.config$logname)))
if (interactive()) {
flog.appender(appender.tee(logfile))
} else {
flog.appender(appender.file(logfile))
}
flog.threshold(EI.config$loglevel)
doRunrun(app,EI.config,EIeng.local,config.dir,outdir)
| /bin/RunEIEvent.R | no_license | ralmond/PP-EI | R | false | false | 718 | r | library(R.utils)
library(EIEvent)
appStem <- cmdArg("app",NULL)
if (FALSE) {
appStem <- "SummerCamp"
}
source("/usr/local/share/Proc4/EIini.R")
EI.config <- fromJSON(file.path(config.dir,"config.json"),FALSE)
app <- as.character(Proc4.config$apps[appStem])
if (length(app)==0L || any(app=="NULL")) {
stop("Could not find apps for ",appStem)
}
if (!(appStem %in% EI.config$appStem)) {
stop("Configuration not set for app ",appStem)
}
logfile <- (file.path(logpath, sub("<app>",appStem,EI.config$logname)))
if (interactive()) {
flog.appender(appender.tee(logfile))
} else {
flog.appender(appender.file(logfile))
}
flog.threshold(EI.config$loglevel)
doRunrun(app,EI.config,EIeng.local,config.dir,outdir)
|
#################################################################
###################### 0. PREREQUISITES #########################
#################################################################
#Loading required packages
library(tidyverse)
library(plyr)
library(gridExtra)
library(plyr)
library(MASS)
#SET YOUR WORKING DIRECTORY HERE
setwd("~....../Assignment Bart-Jan/Code and data")
data <- read.csv("movies.csv")
#SAMPLER PREREQUISITES
burn = 1000 #Burn in
k=10000 #Amount of iterations
N=nrow(data) #Total number of cases
n.chains=2 #Number of chains
dv = "gross" #Dependent variable
iv = c("budget","score","year","usa") #Independent variables
#Determining initial values for first run (withouth interaction)
init1<- c(1,-4,6,-3,3,100) #Initial values chain 1
init2<- c(3,5,-7,4,-1,10) #Initial values chain 2
inits <- c(init1,init2) #Combined initials values
#Determining initial values for second run (with interaction)
init11<- c(1,-4,6,-3,3,4,100) #Initial values chain 1
init22<- c(3,5,-7,4,-1,-1,10) #Initial values chain 2
inits2 <- c(init11,init22) #Combined initials values
#Centering predictors
data$budget <- data$budget - mean(data$budget) #Center predictor 1
data$score <- data$score - mean(data$score) #Center predictor 2
data$year <- data$year - mean(data$year) #Center predictor 3
data$usa <- data$usa - mean(data$usa)#Center predictor 4
#Creating a standardized dataset (this data set is used for a Gibbs/MH run, in order to obtain standardized coefficients for later use)
data_std <- data
sd.budget <- sd(data$budget)
sd.score <- sd(data$score)
sd.year <- sd(data$year)
sd.gross <- sd(data$gross)
sd.usa <- sd(data$usa)
data_std$gross <- data_std$gross - mean(data_std$gross) #Also centering the DV for the gibbs run with standardized data
for (j in 1:nrow(data_std)){ #For loop for dividing each value by it's standard deviation
data_std[j,"budget"] <- data_std[j,"budget"] / sd.budget
data_std[j,"score"] <- data_std[j,"score"] / sd.score
data_std[j,"year"] <- data_std[j,"year"] / sd.year
data_std[j,"gross"] <- data_std[j,"gross"] / sd.gross
data_std[j,"usa"] <- data_std[j,"usa"] / sd.usa
}
#################################################################
###################### I. SAMPLING ##############################
#################################################################
source("Gibs_no_int.R")
source("Gibs_int.R")
view(gibtropolis) #View source code Gibbs sampling WITHOUTH interaction
view(gibtropolis2.0) #View source code Gibbs sampling WITH interaction
#Running the gibbs samplers
set.seed(204)
x.wide <- gibtropolis(data, k=10000, dv=dv, iv=iv, inits=inits, n.chains = 2, burn = 1000) #Model without interaction
set.seed(204)
y.wide <- gibtropolis2.0(data, k=10000, dv=dv, iv=iv, inits=inits2, n.chains = 2, burn = 1000) #Model with interaction
set.seed(204)
y.std <- gibtropolis2.0(data_std, k=10000, dv=dv, iv=iv, inits=inits2, n.chains = 2, burn = 1000) #Model with standardized data (for std. coefficients)
#################################################################
########### II. CREATING LONGFORMAT DATA OBJECTS ################
#################################################################
#Longformat for model withouth interaction (model 1)
x.long <- matrix(nrow = (n.chains*(k-burn))+n.chains, ncol = ncol(x.wide) /2) #Creating a matrix new matrix for storing sample from posterior distributions
for (i in 1:6){ #Combining the chains for each parameter in a single collumn
x.long[,i] <- c(x.wide[,i],x.wide[,6+i])
}
x.long <- as.data.frame(x.long) #Saving as dataframe
#Longformat for model with interaction (model 2)
y.long <- matrix(nrow = (n.chains*(k-burn))+n.chains, ncol = ncol(y.wide) /2) #Creating a matrix new matrix for storing sample from posterior distributions
for (i in 1:7){ #Combining the chains for each parameter in a single collumn
y.long[,i] <- c(y.wide[,i],y.wide[,7+i])
}
y.long <- as.data.frame(y.long) #Saving as dataframe
#Longformat for model standardized model with interaction
y.std.long <- matrix(nrow = (n.chains*(k-burn))+n.chains, ncol = ncol(y.wide) /2) #Creating a matrix new matrix for storing sample from posterior distributions
for (i in 1:7){ #Combining the chains for each parameter in a single collumn
y.std.long[,i] <- c(y.std[,i],y.std[,7+i])
}
y.std.long <- as.data.frame(y.std.long) #Saving as dataframe
#################################################################
###################### III. CONVERGENCE #########################
#################################################################
source("Convergence.R")
view(history.plot) #View source code history plot
view(autocorr.plot) #View source autocorrelation plot
#Histrory plots. Model WITHOUTH interaction
grid.arrange(arrangeGrob(history.plot(x.wide,1,7,"Beta0 - Intercept"), #Display plots in grid
history.plot(x.wide,2,8, "Beta1 - Budget"),
history.plot(x.wide,3,9, "Beta2 - Score"),
history.plot(x.wide,4,10, "Beta3 - Year"),
history.plot(x.wide,5,11, "Beta4 - USA"),
history.plot(x.wide,6,12, "Residual Variance"),
ncol = 2))
#Histrory plots. Model WITH interaction
grid.arrange(arrangeGrob(history.plot(y.wide,1,8,"Beta0 - Intercept"), #Display plots in grid
history.plot(y.wide,2,9, "Beta1 - Budget"),
history.plot(y.wide,3,10, "Beta2 - Score"),
history.plot(y.wide,4,11, "Beta3 - Year"),
history.plot(y.wide,5,12, "Beta4 - USA"),
history.plot(y.wide,6,13, "Beta5 - Budget*Score"),
history.plot(y.wide,7,14, "Residual Variance"),
ncol = 2))
#Autocorrelation plots. Model WITHOUTH interaction
grid.arrange(arrangeGrob(autocorr.plot(x.wide,1,7,"Beta0 - Intercept"), #Plot autocorrelations
autocorr.plot(x.wide,2,8,"Beta1 - Budget"),
autocorr.plot(x.wide,3,9,"Beta2 - Rating"),
autocorr.plot(x.wide,4,10,"Beta3 - Year"),
autocorr.plot(x.wide,5,11,"Beta4 - USA"),
autocorr.plot(x.wide,6,12,"Residual Variance"),
ncol = 2))
#Autocorrelation plots. Model WITH interaction
grid.arrange(arrangeGrob(autocorr.plot(y.wide,1,8,"Beta0 - Intercept"), #Plot autocorrelations
autocorr.plot(y.wide,2,9,"Beta1 - Budget"),
autocorr.plot(y.wide,3,10,"Beta2 - Rating"),
autocorr.plot(y.wide,4,11,"Beta3 - Year"),
autocorr.plot(y.wide,5,12,"Beta4 - USA"),
autocorr.plot(y.wide,6,13,"Beta5 - Budget*Score"),
autocorr.plot(y.wide,7,14,"Residual Variance"),
ncol = 2))
#MARKOV CHAIN ERROR
#Model withouth interaction
(b1.1 <- sd(x.long[,1]) / sqrt((k*n.chains)-2*burn)) #B0
(b2.1 <- sd(x.long[,2]) / sqrt((k*n.chains)-2*burn))#B1
(b3.1 <- sd(x.long[,3]) / sqrt((k*n.chains)-2*burn)) #B2
(b4.1 <- sd(x.long[,4]) / sqrt((k*n.chains)-2*burn) )#B3
(b5.1 <- sd(x.long[,5]) / sqrt((k*n.chains)-2*burn)) #B4
(s2.1 <- sd(x.long[,6]) / sqrt((k*n.chains)-2*burn)) #Sigma-squared
#In percentage of standard deviation in sample
(b1.1 / sd(x.long[,1])) *100
(b2.1 / sd(x.long[,2])) *100
(b3.1 / sd(x.long[,3])) *100
(b4.1 / sd(x.long[,4])) *100
(b5.1 / sd(x.long[,5])) *100
(s2.1 / sd(x.long[,6])) *100
#Model with interaction
(b1.2 <- sd(y.long[,1]) / sqrt((k*n.chains)-2*burn)) #B0
(b2.2 <- sd(y.long[,2]) / sqrt((k*n.chains)-2*burn)) #B1
(b3.2 <- sd(y.long[,3]) / sqrt((k*n.chains)-2*burn)) #B2
(b4.2 <- sd(y.long[,4]) / sqrt((k*n.chains)-2*burn)) #B3
(b5.2 <- sd(y.long[,5]) / sqrt((k*n.chains)-2*burn)) #B4
(b6.2 <- sd(y.long[,6]) / sqrt((k*n.chains)-2*burn)) #B5
(s2.2 <- sd(y.long[,7]) / sqrt((k*n.chains)-2*burn)) #Sigma-squared
#In percentage of standard deviation in sample
(b1.2 / sd(y.long[,1])) *100
(b2.2 / sd(y.long[,2])) *100
(b3.2 / sd(y.long[,3])) *100
(b4.2 / sd(y.long[,4])) *100
(b5.2 / sd(y.long[,5])) *100
(b6.2 / sd(y.long[,6])) *100
(s2.2 / sd(y.long[,7])) *100
#ACCEPTANCE RATIO MH STEP (of both chains combined)
#Model withouth interaction
(length(unique(x.wide[,2])) / length(x.wide[,2]) + length(unique(x.wide[,8])) / length(x.wide[,8])) /n.chains
#Model with interaction
(length(unique(y.wide[,2])) / length(y.wide[,2]) + length(unique(y.wide[,9])) / length(y.wide[,9])) /n.chains
#################################################################
############### IV. POSTERIOR PREDICTIVE CHECK ##################
#################################################################
source("PPC.R")
source("PPC_int.R")
view(ppc) #View source code for ppc model withouth interaction
view(ppc2) #View source code for ppc model with interaction
ppc(x.wide, k-burn, data)
ppc2(y.wide, k-burn, data)
#################################################################
############################ V. DIC ############################
#################################################################
source("DIC_no_int.R")
source("DIC_int.R")
view(DIC) #View source code for the DIC model withouth interaction
view(DIC2) #View source code for the DIC model with interaction
DIC(x.long,data) #DIC model withouth interaction
DIC2(y.long,data) #DIC model with interaction
#################################################################
######################## VI. RESULTS ############################
#################################################################
source("Results.R")
view(posterior.hist)
#Displaying posterior histograms
grid.arrange(arrangeGrob(posterior.hist(y.long, 1,"B0 - Intercept"),
posterior.hist(y.long, 2,"B1 - Budget"),
posterior.hist(y.long, 3,"B2 - Score"),
posterior.hist(y.long, 4,"B3 - Year"),
posterior.hist(y.long, 5,"B4 - USA"),
posterior.hist(y.long, 6,"B5 - Budget * Score"),
posterior.hist(y.long, 7,"Residual Variance"),
ncol = 2))
#Calculating standardized coefficients from the standardized sampling run
mean(y.std.long[,2]) #Standardized coefficient budget
mean(y.std.long[,3]) #Standardized coefficient score
mean(y.std.long[,6]) #Standardized coefficient interaction
#################################################################
###################### VII. BAYES FACTOR ########################
#################################################################
source("Bayes_Factor.R")
view(BayesFactor)
set.seed(204)
BayesFactor(data)
| /main.R | no_license | BartJanBoverhof/Bayesian-Regression | R | false | false | 10,790 | r | #################################################################
###################### 0. PREREQUISITES #########################
#################################################################
#Loading required packages
library(tidyverse)
library(plyr)
library(gridExtra)
library(plyr)
library(MASS)
#SET YOUR WORKING DIRECTORY HERE
setwd("~....../Assignment Bart-Jan/Code and data")
data <- read.csv("movies.csv")
#SAMPLER PREREQUISITES
burn = 1000 #Burn in
k=10000 #Amount of iterations
N=nrow(data) #Total number of cases
n.chains=2 #Number of chains
dv = "gross" #Dependent variable
iv = c("budget","score","year","usa") #Independent variables
#Determining initial values for first run (withouth interaction)
init1<- c(1,-4,6,-3,3,100) #Initial values chain 1
init2<- c(3,5,-7,4,-1,10) #Initial values chain 2
inits <- c(init1,init2) #Combined initials values
#Determining initial values for second run (with interaction)
init11<- c(1,-4,6,-3,3,4,100) #Initial values chain 1
init22<- c(3,5,-7,4,-1,-1,10) #Initial values chain 2
inits2 <- c(init11,init22) #Combined initials values
#Centering predictors
data$budget <- data$budget - mean(data$budget) #Center predictor 1
data$score <- data$score - mean(data$score) #Center predictor 2
data$year <- data$year - mean(data$year) #Center predictor 3
data$usa <- data$usa - mean(data$usa)#Center predictor 4
#Creating a standardized dataset (this data set is used for a Gibbs/MH run, in order to obtain standardized coefficients for later use)
data_std <- data
sd.budget <- sd(data$budget)
sd.score <- sd(data$score)
sd.year <- sd(data$year)
sd.gross <- sd(data$gross)
sd.usa <- sd(data$usa)
data_std$gross <- data_std$gross - mean(data_std$gross) #Also centering the DV for the gibbs run with standardized data
for (j in 1:nrow(data_std)){ #For loop for dividing each value by it's standard deviation
data_std[j,"budget"] <- data_std[j,"budget"] / sd.budget
data_std[j,"score"] <- data_std[j,"score"] / sd.score
data_std[j,"year"] <- data_std[j,"year"] / sd.year
data_std[j,"gross"] <- data_std[j,"gross"] / sd.gross
data_std[j,"usa"] <- data_std[j,"usa"] / sd.usa
}
#################################################################
###################### I. SAMPLING ##############################
#################################################################
source("Gibs_no_int.R")
source("Gibs_int.R")
view(gibtropolis) #View source code Gibbs sampling WITHOUTH interaction
view(gibtropolis2.0) #View source code Gibbs sampling WITH interaction
#Running the gibbs samplers
set.seed(204)
x.wide <- gibtropolis(data, k=10000, dv=dv, iv=iv, inits=inits, n.chains = 2, burn = 1000) #Model without interaction
set.seed(204)
y.wide <- gibtropolis2.0(data, k=10000, dv=dv, iv=iv, inits=inits2, n.chains = 2, burn = 1000) #Model with interaction
set.seed(204)
y.std <- gibtropolis2.0(data_std, k=10000, dv=dv, iv=iv, inits=inits2, n.chains = 2, burn = 1000) #Model with standardized data (for std. coefficients)
#################################################################
########### II. CREATING LONGFORMAT DATA OBJECTS ################
#################################################################
#Longformat for model withouth interaction (model 1)
x.long <- matrix(nrow = (n.chains*(k-burn))+n.chains, ncol = ncol(x.wide) /2) #Creating a matrix new matrix for storing sample from posterior distributions
for (i in 1:6){ #Combining the chains for each parameter in a single collumn
x.long[,i] <- c(x.wide[,i],x.wide[,6+i])
}
x.long <- as.data.frame(x.long) #Saving as dataframe
#Longformat for model with interaction (model 2)
y.long <- matrix(nrow = (n.chains*(k-burn))+n.chains, ncol = ncol(y.wide) /2) #Creating a matrix new matrix for storing sample from posterior distributions
for (i in 1:7){ #Combining the chains for each parameter in a single collumn
y.long[,i] <- c(y.wide[,i],y.wide[,7+i])
}
y.long <- as.data.frame(y.long) #Saving as dataframe
#Longformat for model standardized model with interaction
y.std.long <- matrix(nrow = (n.chains*(k-burn))+n.chains, ncol = ncol(y.wide) /2) #Creating a matrix new matrix for storing sample from posterior distributions
for (i in 1:7){ #Combining the chains for each parameter in a single collumn
y.std.long[,i] <- c(y.std[,i],y.std[,7+i])
}
y.std.long <- as.data.frame(y.std.long) #Saving as dataframe
#################################################################
###################### III. CONVERGENCE #########################
#################################################################
source("Convergence.R")
view(history.plot) #View source code history plot
view(autocorr.plot) #View source autocorrelation plot
#Histrory plots. Model WITHOUTH interaction
grid.arrange(arrangeGrob(history.plot(x.wide,1,7,"Beta0 - Intercept"), #Display plots in grid
history.plot(x.wide,2,8, "Beta1 - Budget"),
history.plot(x.wide,3,9, "Beta2 - Score"),
history.plot(x.wide,4,10, "Beta3 - Year"),
history.plot(x.wide,5,11, "Beta4 - USA"),
history.plot(x.wide,6,12, "Residual Variance"),
ncol = 2))
#Histrory plots. Model WITH interaction
grid.arrange(arrangeGrob(history.plot(y.wide,1,8,"Beta0 - Intercept"), #Display plots in grid
history.plot(y.wide,2,9, "Beta1 - Budget"),
history.plot(y.wide,3,10, "Beta2 - Score"),
history.plot(y.wide,4,11, "Beta3 - Year"),
history.plot(y.wide,5,12, "Beta4 - USA"),
history.plot(y.wide,6,13, "Beta5 - Budget*Score"),
history.plot(y.wide,7,14, "Residual Variance"),
ncol = 2))
#Autocorrelation plots. Model WITHOUTH interaction
grid.arrange(arrangeGrob(autocorr.plot(x.wide,1,7,"Beta0 - Intercept"), #Plot autocorrelations
autocorr.plot(x.wide,2,8,"Beta1 - Budget"),
autocorr.plot(x.wide,3,9,"Beta2 - Rating"),
autocorr.plot(x.wide,4,10,"Beta3 - Year"),
autocorr.plot(x.wide,5,11,"Beta4 - USA"),
autocorr.plot(x.wide,6,12,"Residual Variance"),
ncol = 2))
#Autocorrelation plots. Model WITH interaction
grid.arrange(arrangeGrob(autocorr.plot(y.wide,1,8,"Beta0 - Intercept"), #Plot autocorrelations
autocorr.plot(y.wide,2,9,"Beta1 - Budget"),
autocorr.plot(y.wide,3,10,"Beta2 - Rating"),
autocorr.plot(y.wide,4,11,"Beta3 - Year"),
autocorr.plot(y.wide,5,12,"Beta4 - USA"),
autocorr.plot(y.wide,6,13,"Beta5 - Budget*Score"),
autocorr.plot(y.wide,7,14,"Residual Variance"),
ncol = 2))
#MARKOV CHAIN ERROR
#Model withouth interaction
(b1.1 <- sd(x.long[,1]) / sqrt((k*n.chains)-2*burn)) #B0
(b2.1 <- sd(x.long[,2]) / sqrt((k*n.chains)-2*burn))#B1
(b3.1 <- sd(x.long[,3]) / sqrt((k*n.chains)-2*burn)) #B2
(b4.1 <- sd(x.long[,4]) / sqrt((k*n.chains)-2*burn) )#B3
(b5.1 <- sd(x.long[,5]) / sqrt((k*n.chains)-2*burn)) #B4
(s2.1 <- sd(x.long[,6]) / sqrt((k*n.chains)-2*burn)) #Sigma-squared
#In percentage of standard deviation in sample
(b1.1 / sd(x.long[,1])) *100
(b2.1 / sd(x.long[,2])) *100
(b3.1 / sd(x.long[,3])) *100
(b4.1 / sd(x.long[,4])) *100
(b5.1 / sd(x.long[,5])) *100
(s2.1 / sd(x.long[,6])) *100
#Model with interaction
(b1.2 <- sd(y.long[,1]) / sqrt((k*n.chains)-2*burn)) #B0
(b2.2 <- sd(y.long[,2]) / sqrt((k*n.chains)-2*burn)) #B1
(b3.2 <- sd(y.long[,3]) / sqrt((k*n.chains)-2*burn)) #B2
(b4.2 <- sd(y.long[,4]) / sqrt((k*n.chains)-2*burn)) #B3
(b5.2 <- sd(y.long[,5]) / sqrt((k*n.chains)-2*burn)) #B4
(b6.2 <- sd(y.long[,6]) / sqrt((k*n.chains)-2*burn)) #B5
(s2.2 <- sd(y.long[,7]) / sqrt((k*n.chains)-2*burn)) #Sigma-squared
#In percentage of standard deviation in sample
(b1.2 / sd(y.long[,1])) *100
(b2.2 / sd(y.long[,2])) *100
(b3.2 / sd(y.long[,3])) *100
(b4.2 / sd(y.long[,4])) *100
(b5.2 / sd(y.long[,5])) *100
(b6.2 / sd(y.long[,6])) *100
(s2.2 / sd(y.long[,7])) *100
#ACCEPTANCE RATIO MH STEP (of both chains combined)
#Model withouth interaction
(length(unique(x.wide[,2])) / length(x.wide[,2]) + length(unique(x.wide[,8])) / length(x.wide[,8])) /n.chains
#Model with interaction
(length(unique(y.wide[,2])) / length(y.wide[,2]) + length(unique(y.wide[,9])) / length(y.wide[,9])) /n.chains
#################################################################
############### IV. POSTERIOR PREDICTIVE CHECK ##################
#################################################################
source("PPC.R")
source("PPC_int.R")
view(ppc) #View source code for ppc model withouth interaction
view(ppc2) #View source code for ppc model with interaction
ppc(x.wide, k-burn, data)
ppc2(y.wide, k-burn, data)
#################################################################
############################ V. DIC ############################
#################################################################
source("DIC_no_int.R")
source("DIC_int.R")
view(DIC) #View source code for the DIC model withouth interaction
view(DIC2) #View source code for the DIC model with interaction
DIC(x.long,data) #DIC model withouth interaction
DIC2(y.long,data) #DIC model with interaction
#################################################################
######################## VI. RESULTS ############################
#################################################################
source("Results.R")
view(posterior.hist)
#Displaying posterior histograms
grid.arrange(arrangeGrob(posterior.hist(y.long, 1,"B0 - Intercept"),
posterior.hist(y.long, 2,"B1 - Budget"),
posterior.hist(y.long, 3,"B2 - Score"),
posterior.hist(y.long, 4,"B3 - Year"),
posterior.hist(y.long, 5,"B4 - USA"),
posterior.hist(y.long, 6,"B5 - Budget * Score"),
posterior.hist(y.long, 7,"Residual Variance"),
ncol = 2))
#Calculating standardized coefficients from the standardized sampling run
mean(y.std.long[,2]) #Standardized coefficient budget
mean(y.std.long[,3]) #Standardized coefficient score
mean(y.std.long[,6]) #Standardized coefficient interaction
#################################################################
###################### VII. BAYES FACTOR ########################
#################################################################
source("Bayes_Factor.R")
view(BayesFactor)
set.seed(204)
BayesFactor(data)
|
## METHODOLOGY & RESEARCH DESIGN: QUANTITATIVE METHODS
## Session 5: BIVARIATE STATISTIC I
## March 19, 2020
######################################################################
# LET'S CALCULATE THE AVERAGE AGE OF THE PORTUGUESE POPULATION
######################################################################
# install these packages and load them
install.packages("foreign")
install.packages("Rmisc")
library("foreign")
library("Rmisc")
# we are going to use data from the Eurobarometer of September 2018
# the Eurobarometer file needs to be in your working directory [getwd(), setwd()]
eurobar<- read.dta("eu.sep18.dta")
attach(eurobar)
# mean for the variable d11 (age) and mean+CI
mean(d11)
CI(d11)
######################################################################
# LET'S RUN A CROSS-TAB
#####################################################################
# install this package and load it
install.packages("gmodels")
library("gmodels")
# run a cross-tab with gender (d10) and benefit from EU membership (qa16)
CrossTable(qa16, d10, format="SPSS", digit=1, prop.chisq =FALSE)
# run the same table with the chi-squared
CrossTable(qa16, d10, format="SPSS", digit=1, prop.chisq = FALSE,prop.r = FALSE, prop.t = FALSE, chisq= TRUE)
# run a cross-tab with gender (d10) and the importance of equality between men and women (qa14_1) + chi-squared
CrossTable(qa14_1, d10, format="SPSS", digit=1, prop.chisq = FALSE,prop.r = FALSE, prop.t = FALSE, chisq= TRUE)
| /Seminar in Quantitative Methods/session5_script.R | no_license | beatrizmaciel/seminarquantitativemethods | R | false | false | 1,548 | r | ## METHODOLOGY & RESEARCH DESIGN: QUANTITATIVE METHODS
## Session 5: BIVARIATE STATISTIC I
## March 19, 2020
######################################################################
# LET'S CALCULATE THE AVERAGE AGE OF THE PORTUGUESE POPULATION
######################################################################
# install these packages and load them
install.packages("foreign")
install.packages("Rmisc")
library("foreign")
library("Rmisc")
# we are going to use data from the Eurobarometer of September 2018
# the Eurobarometer file needs to be in your working directory [getwd(), setwd()]
eurobar<- read.dta("eu.sep18.dta")
attach(eurobar)
# mean for the variable d11 (age) and mean+CI
mean(d11)
CI(d11)
######################################################################
# LET'S RUN A CROSS-TAB
#####################################################################
# install this package and load it
install.packages("gmodels")
library("gmodels")
# run a cross-tab with gender (d10) and benefit from EU membership (qa16)
CrossTable(qa16, d10, format="SPSS", digit=1, prop.chisq =FALSE)
# run the same table with the chi-squared
CrossTable(qa16, d10, format="SPSS", digit=1, prop.chisq = FALSE,prop.r = FALSE, prop.t = FALSE, chisq= TRUE)
# run a cross-tab with gender (d10) and the importance of equality between men and women (qa14_1) + chi-squared
CrossTable(qa14_1, d10, format="SPSS", digit=1, prop.chisq = FALSE,prop.r = FALSE, prop.t = FALSE, chisq= TRUE)
|
# test-paper-workflow.R - Bill White - 4/27/17
#
# Test replicating the Bioinformatics paper simulated and real data analysis.
library(privateEC)
context("Paper Workflows")
test_that("run one workflow of a simulation plus an analysis step", {
num.samples <- 100
num.variables <- 100
pct.signals <- 0.1
upd.frq <- 0.1 * num.variables
one.step.result <- paperSimWorkflow(n.samples = num.samples,
n.variables = num.variables,
pct.signals = pct.signals,
update.freq = upd.frq,
verbose = FALSE)
expect_equal(length(one.step.result), 2)
expect_equal(length(one.step.result$run.results), 3)
})
test_that("run one workflow for a real data analysis", {
data(rsfMRIcorrMDD)
# ~100 variables for a test
test.mat <- rsfMRIcorrMDD[, 2900:ncol(rsfMRIcorrMDD)]
real.result <- paperRealWorkflow(real.data = test.mat,
label = "phenos",
update.freq = 5,
verbose = FALSE)
expect_equal(length(real.result), 2)
expect_equal(length(real.result$run.results), 3)
})
| /tests/testthat/test-paper-workflow.R | no_license | saeid651/privateEC | R | false | false | 1,228 | r | # test-paper-workflow.R - Bill White - 4/27/17
#
# Test replicating the Bioinformatics paper simulated and real data analysis.
library(privateEC)
context("Paper Workflows")
test_that("run one workflow of a simulation plus an analysis step", {
num.samples <- 100
num.variables <- 100
pct.signals <- 0.1
upd.frq <- 0.1 * num.variables
one.step.result <- paperSimWorkflow(n.samples = num.samples,
n.variables = num.variables,
pct.signals = pct.signals,
update.freq = upd.frq,
verbose = FALSE)
expect_equal(length(one.step.result), 2)
expect_equal(length(one.step.result$run.results), 3)
})
test_that("run one workflow for a real data analysis", {
data(rsfMRIcorrMDD)
# ~100 variables for a test
test.mat <- rsfMRIcorrMDD[, 2900:ncol(rsfMRIcorrMDD)]
real.result <- paperRealWorkflow(real.data = test.mat,
label = "phenos",
update.freq = 5,
verbose = FALSE)
expect_equal(length(real.result), 2)
expect_equal(length(real.result$run.results), 3)
})
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/setters.R
\name{nTotalEntries<-}
\alias{nTotalEntries<-}
\title{Set total number database entries}
\usage{
nTotalEntries(this) <- value
}
\arguments{
\item{this}{Object of class \code{MiRNANameConverter}}
\item{value}{An \code{integer} value}
}
\value{
A \code{MiRNANameConverter} object
}
\description{
This function sets the total number of entries contained
in the \code{mimat} table. The number is the sum of the entries of all
miRBase versions provided by the package.
}
\details{
The total number is evaluated and set in the object initialization.
}
\author{
Stefan Haunsberger
}
| /man/nTotalEntries-set.Rd | no_license | StefanHaunsberger/miRNAmeConverter | R | false | true | 665 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/setters.R
\name{nTotalEntries<-}
\alias{nTotalEntries<-}
\title{Set total number database entries}
\usage{
nTotalEntries(this) <- value
}
\arguments{
\item{this}{Object of class \code{MiRNANameConverter}}
\item{value}{An \code{integer} value}
}
\value{
A \code{MiRNANameConverter} object
}
\description{
This function sets the total number of entries contained
in the \code{mimat} table. The number is the sum of the entries of all
miRBase versions provided by the package.
}
\details{
The total number is evaluated and set in the object initialization.
}
\author{
Stefan Haunsberger
}
|
# Error testing
# Unarine Singo
# 6 January 2021
# import R functions ------------
source("R/plotter.R")
source("R/simulate.R")
source("R/segmentation.R")
source("R/simulate.R")
#source("R/stylisedFacts.R")
source("R/05_ICC.R")
source("R/NohAnzatsStateSimulation.R")
# libraries ---------------------
library('tidyverse')
library(readxl)
library(MASS)
library(fossil)
library(markovchain)
library(depmixS4)
library(mclust)
library(xts)
library(PerformanceAnalytics)
library(hash)
library(parallel)
library(foreach)
library(doParallel)
# ---------------------------------------------
# Setup ---------------------------------------
# ---------------------------------------------
# identify estimate parameters from real data using a hidden markov model
#helper functions
naSums = function(x){sum(is.na(x))}
#set.seed(1)
# SnP 500 data clean -------------------------------------
allStocks = read_csv(file ="data/sandp500/all_stocks_5yr.csv")
allStocks = allStocks %>% dplyr::select(date, close, Name)
flatStocks = allStocks %>% spread(key = Name, value = close, fill = NA ) # explode matrix
survivorStocks = flatStocks %>% select_if(apply(flatStocks,naSums, MARGIN = 2) == 0) # removed stocks that did not trade in the whole period
# move this to a new document
#log returns
smaller = sample(2:400, size = 100)
GRet = survivorStocks[, c(smaller)]
GRet = diff(as.matrix(log(GRet)))
# find estimates
returns = rowMeans(GRet)
# ---------------------------------------------
# Simulate 2 state data -----------------------
# ---------------------------------------------
dimensions = 100
states = 4
No.iters = 1:5
intraClusterStength = seq( from = 0.1, to = 1,length.out = 10)
# setup --------------
hmm = depmix(returns ~ 1, family = gaussian(), nstates = states, data=data.frame(returns=returns))
hmmfit = fit(hmm, verbose = FALSE)
post_probs = posterior(hmmfit)
# Output both the true regimes and the
# posterior probabilities of the regimes
layout(1:3)
plot(100*cumsum(returns)+100, type ='l')
plot(returns, type ='l')
matplot(post_probs[,-1], type='l', main='Regime Posterior Probabilities', ylab='Probability')
# identified transition states
summary(hmmfit)
hmmTMatrix = matrix(getpars(hmmfit)[(nstates(hmmfit)+1):(nstates(hmmfit)^2+nstates(hmmfit))],
byrow=TRUE,nrow=nstates(hmmfit),ncol=nstates(hmmfit))
hmmStatePaths = posterior(hmmfit)$state
#State sample statistics
State1Returns = returns[hmmStatePaths==1]
State2Returns = returns[hmmStatePaths==2]
State3Returns = returns[hmmStatePaths==3]
State4Returns = returns[hmmStatePaths==4]
mean(State1Returns)
mean(State2Returns)
mean(State3Returns)
mean(State4Returns)
mean(returns)
sd(State2Returns)
sd(State1Returns)
sd(State3Returns)
sd(State4Returns)
sd(returns)
layout(1:1)
# simulate a fixed data set to identify a resonable gamma
DimData = list()
result = matrix(-99, nrow = length(No.iters), ncol = 10)
marketStates = c("1", "2", "3", "4")
byRow = TRUE
twoState = new("markovchain", states = marketStates, byrow = byRow, transitionMatrix = hmmTMatrix, name = "twoStateMarket")
param = list()
param[[1]] = list('mu' = mean(State1Returns),
'sigma' = sd(State1Returns))
param[[2]] = list('mu' = mean(State2Returns),
'sigma' = sd(State2Returns))
param[[3]] = list('mu' = mean(State3Returns),
'sigma' = sd(State3Returns))
param[[4]] = list('mu' = mean(State4Returns),
'sigma' = sd(State4Returns))
twoStateSequence = as(rmarkovchain(n = 600, object = twoState, t0 = '1'), 'numeric')
stocks = c(100)
#----------------------------------------
# ICC results - gamma 0, no penalisation 2 state market
#----------------------------------------
# using SnP based hmm
# 100 stocks
it = 1
stockSize = 100
intra = c(0.25, 0.5, 0.75, 1)
ICC.RandIndex.Scene1 = matrix(NA, nrow =length(intra) , ncol = it)
ICC.Mean.Scene1 = matrix(NA, nrow =length(intra)*4 , ncol = it) # store mean estimates
ICC.Std.Scene1 = matrix(NA, nrow =length(intra)*4 , ncol = it) #store sd estimates
Noh.Mean.Scene1 = matrix(NA, nrow =length(intra)*4 , ncol = it) # store mean estimates
Noh.Std.Scene1 = matrix(NA, nrow =length(intra)*4 , ncol = it) #store sd estimates
row.names(ICC.RandIndex.Scene1) = intra
row.names(ICC.Mean.Scene1) = c("0.25_1", "0.5_1", "0.75_1", "1_1", "0.25_2", "0.5_2", "0.75_2", "1_2", "0.25_3", "0.5_3", "0.75_3", "1_3", "0.25_4", "0.5_4", "0.75_4", "1_4")
row.names(ICC.Std.Scene1) = c("0.25_1", "0.5_1", "0.75_1", "1_1", "0.25_2", "0.5_2", "0.75_2", "1_2", "0.25_3", "0.5_3", "0.75_3", "1_3", "0.25_4", "0.5_4", "0.75_4", "1_4")
row.names(Noh.Mean.Scene1) = c("0.25_1", "0.5_1", "0.75_1", "1_1", "0.25_2", "0.5_2", "0.75_2", "1_2", "0.25_3", "0.5_3", "0.75_3", "1_3", "0.25_4", "0.5_4", "0.75_4", "1_4")
row.names(Noh.Std.Scene1) = c("0.25_1", "0.5_1", "0.75_1", "1_1", "0.25_2", "0.5_2", "0.75_2", "1_2", "0.25_3", "0.5_3", "0.75_3", "1_3", "0.25_4", "0.5_4", "0.75_4", "1_4")
dataNoh = NohAnzats.simulate(stateSequence = twoStateSequence, g = c(intra[i], intra[i], intra[i], intra[i]), clusters = 4, stocks = stockSize, gaussian = T, param)
plotter.series(colMeans((dataNoh$stockMatrix)),dataNoh$stateSeq
, title = paste('Simlated market returns'))
plotter.series(colMeans((dataNoh$stockMatrix)),dataNoh$stateSeq
, title = paste('Simlated market returns'), S0 = 100)
ICC.Output = ICC.cluster(returnsMatrix = dataNoh$stockMatrix, sparseMethod = 2, gamma = 0, K = 4, max.iters = 15)
State1Returns = colMeans((dataNoh$stockMatrix))[ICC.Output$OptimalPath==1]
State2Returns = colMeans((dataNoh$stockMatrix))[ICC.Output$OptimalPath==2]
State3Returns = colMeans((dataNoh$stockMatrix))[ICC.Output$OptimalPath==3]
State4Returns = colMeans((dataNoh$stockMatrix))[ICC.Output$OptimalPath==4]
ICC.RandIndex.Scene1[i,j] = adj.rand.index(dataNoh$stateSeq, ICC.Output$OptimalPath)
sparseMethod = 2
gamma = 0
K = 4
max.iters = 15
distanceFunction = 1
returnsMatrix = dataNoh$stockMatrix
trace(print)
Time = ncol(returnsMatrix)
optimalViterbi = NA
optimalStateNum = 0
iters = 0
ErrorRate = 0
max.it = max.iters
stop.crit = 0.00001
ds = NA
d.hist= c()
#initialise cluster parameters
states.est = ICC.shuffle(K = K, Time = Time)
theta.est = ICC.thetaEst(K = K, returns = returnsMatrix, stateSeq = states.est, sparse = sparseMethod)
#print( det(theta.est$precision[[1]]))
#print( det(theta.est$precision[[2]]))
#det(LoGo.solve(abs(cov(t(returnsMatrix[, which(states.est == 1)])))))
distance.est = ICC.distance(returns = returnsMatrix, dist = distanceFunction, theta = theta.est, K = K)
#plot(y = distance.est[1,], x = 1:Time, type='l', col = 'red', main = 'initialisation')
#lines(y = distance.est[2,], x = 1:Time, col = 'blue')
optimalViterbi = viterbi(D = t(distance.est), K=K, gamma=gamma)
states.est = optimalViterbi$Final_Path
states.est
# check if assingment is valid. if not start with random assingment
if (length(unique(states.est)) != K || sum(diff(states.est)!=0)<3 ) {
print(paste('Produced single state path at iteration', it))
states.est = ICC.shuffle(K = K, Time = Time)
}
theta.est = ICC.thetaEst(K = K, returns = returnsMatrix, stateSeq = states.est, sparse = 2)
distance.est = ICC.distance(returns = returnsMatrix, dist = distanceFunction, theta = theta.est, K = K)
d.hist =c(d.hist, -1*ICC.TotalDistance(distance.est, gamma = gamma, stateSeq = states.est))
plot(y = distance.est[1,], x = 1:Time, type='l', col = 'red', main = paste('iteration: ', it))
lines(y = distance.est[2,], x = 1:Time, col = 'blue')
if (nrow(distance.est) == 3 ){ lines(y = distance.est[3,], x = 1:Time, col = 'black')}
if (nrow(distance.est) == 4 ){ lines(y = distance.est[3,], x = 1:Time, col = 'yellow')}
if (nrow(distance.est) == 5 ){ lines(y = distance.est[3,], x = 1:Time, col = 'pink')}
| /tests/ErrorTesting.R | no_license | Una95Singo/MarketStates | R | false | false | 7,811 | r | # Error testing
# Unarine Singo
# 6 January 2021
# import R functions ------------
source("R/plotter.R")
source("R/simulate.R")
source("R/segmentation.R")
source("R/simulate.R")
#source("R/stylisedFacts.R")
source("R/05_ICC.R")
source("R/NohAnzatsStateSimulation.R")
# libraries ---------------------
library('tidyverse')
library(readxl)
library(MASS)
library(fossil)
library(markovchain)
library(depmixS4)
library(mclust)
library(xts)
library(PerformanceAnalytics)
library(hash)
library(parallel)
library(foreach)
library(doParallel)
# ---------------------------------------------
# Setup ---------------------------------------
# ---------------------------------------------
# identify estimate parameters from real data using a hidden markov model
#helper functions
naSums = function(x){sum(is.na(x))}
#set.seed(1)
# SnP 500 data clean -------------------------------------
allStocks = read_csv(file ="data/sandp500/all_stocks_5yr.csv")
allStocks = allStocks %>% dplyr::select(date, close, Name)
flatStocks = allStocks %>% spread(key = Name, value = close, fill = NA ) # explode matrix
survivorStocks = flatStocks %>% select_if(apply(flatStocks,naSums, MARGIN = 2) == 0) # removed stocks that did not trade in the whole period
# move this to a new document
#log returns
smaller = sample(2:400, size = 100)
GRet = survivorStocks[, c(smaller)]
GRet = diff(as.matrix(log(GRet)))
# find estimates
returns = rowMeans(GRet)
# ---------------------------------------------
# Simulate 2 state data -----------------------
# ---------------------------------------------
dimensions = 100
states = 4
No.iters = 1:5
intraClusterStength = seq( from = 0.1, to = 1,length.out = 10)
# setup --------------
hmm = depmix(returns ~ 1, family = gaussian(), nstates = states, data=data.frame(returns=returns))
hmmfit = fit(hmm, verbose = FALSE)
post_probs = posterior(hmmfit)
# Output both the true regimes and the
# posterior probabilities of the regimes
layout(1:3)
plot(100*cumsum(returns)+100, type ='l')
plot(returns, type ='l')
matplot(post_probs[,-1], type='l', main='Regime Posterior Probabilities', ylab='Probability')
# identified transition states
summary(hmmfit)
hmmTMatrix = matrix(getpars(hmmfit)[(nstates(hmmfit)+1):(nstates(hmmfit)^2+nstates(hmmfit))],
byrow=TRUE,nrow=nstates(hmmfit),ncol=nstates(hmmfit))
hmmStatePaths = posterior(hmmfit)$state
#State sample statistics
State1Returns = returns[hmmStatePaths==1]
State2Returns = returns[hmmStatePaths==2]
State3Returns = returns[hmmStatePaths==3]
State4Returns = returns[hmmStatePaths==4]
mean(State1Returns)
mean(State2Returns)
mean(State3Returns)
mean(State4Returns)
mean(returns)
sd(State2Returns)
sd(State1Returns)
sd(State3Returns)
sd(State4Returns)
sd(returns)
layout(1:1)
# simulate a fixed data set to identify a resonable gamma
DimData = list()
result = matrix(-99, nrow = length(No.iters), ncol = 10)
marketStates = c("1", "2", "3", "4")
byRow = TRUE
twoState = new("markovchain", states = marketStates, byrow = byRow, transitionMatrix = hmmTMatrix, name = "twoStateMarket")
param = list()
param[[1]] = list('mu' = mean(State1Returns),
'sigma' = sd(State1Returns))
param[[2]] = list('mu' = mean(State2Returns),
'sigma' = sd(State2Returns))
param[[3]] = list('mu' = mean(State3Returns),
'sigma' = sd(State3Returns))
param[[4]] = list('mu' = mean(State4Returns),
'sigma' = sd(State4Returns))
twoStateSequence = as(rmarkovchain(n = 600, object = twoState, t0 = '1'), 'numeric')
stocks = c(100)
#----------------------------------------
# ICC results - gamma 0, no penalisation 2 state market
#----------------------------------------
# using SnP based hmm
# 100 stocks
it = 1
stockSize = 100
intra = c(0.25, 0.5, 0.75, 1)
ICC.RandIndex.Scene1 = matrix(NA, nrow =length(intra) , ncol = it)
ICC.Mean.Scene1 = matrix(NA, nrow =length(intra)*4 , ncol = it) # store mean estimates
ICC.Std.Scene1 = matrix(NA, nrow =length(intra)*4 , ncol = it) #store sd estimates
Noh.Mean.Scene1 = matrix(NA, nrow =length(intra)*4 , ncol = it) # store mean estimates
Noh.Std.Scene1 = matrix(NA, nrow =length(intra)*4 , ncol = it) #store sd estimates
row.names(ICC.RandIndex.Scene1) = intra
row.names(ICC.Mean.Scene1) = c("0.25_1", "0.5_1", "0.75_1", "1_1", "0.25_2", "0.5_2", "0.75_2", "1_2", "0.25_3", "0.5_3", "0.75_3", "1_3", "0.25_4", "0.5_4", "0.75_4", "1_4")
row.names(ICC.Std.Scene1) = c("0.25_1", "0.5_1", "0.75_1", "1_1", "0.25_2", "0.5_2", "0.75_2", "1_2", "0.25_3", "0.5_3", "0.75_3", "1_3", "0.25_4", "0.5_4", "0.75_4", "1_4")
row.names(Noh.Mean.Scene1) = c("0.25_1", "0.5_1", "0.75_1", "1_1", "0.25_2", "0.5_2", "0.75_2", "1_2", "0.25_3", "0.5_3", "0.75_3", "1_3", "0.25_4", "0.5_4", "0.75_4", "1_4")
row.names(Noh.Std.Scene1) = c("0.25_1", "0.5_1", "0.75_1", "1_1", "0.25_2", "0.5_2", "0.75_2", "1_2", "0.25_3", "0.5_3", "0.75_3", "1_3", "0.25_4", "0.5_4", "0.75_4", "1_4")
dataNoh = NohAnzats.simulate(stateSequence = twoStateSequence, g = c(intra[i], intra[i], intra[i], intra[i]), clusters = 4, stocks = stockSize, gaussian = T, param)
plotter.series(colMeans((dataNoh$stockMatrix)),dataNoh$stateSeq
, title = paste('Simlated market returns'))
plotter.series(colMeans((dataNoh$stockMatrix)),dataNoh$stateSeq
, title = paste('Simlated market returns'), S0 = 100)
ICC.Output = ICC.cluster(returnsMatrix = dataNoh$stockMatrix, sparseMethod = 2, gamma = 0, K = 4, max.iters = 15)
State1Returns = colMeans((dataNoh$stockMatrix))[ICC.Output$OptimalPath==1]
State2Returns = colMeans((dataNoh$stockMatrix))[ICC.Output$OptimalPath==2]
State3Returns = colMeans((dataNoh$stockMatrix))[ICC.Output$OptimalPath==3]
State4Returns = colMeans((dataNoh$stockMatrix))[ICC.Output$OptimalPath==4]
ICC.RandIndex.Scene1[i,j] = adj.rand.index(dataNoh$stateSeq, ICC.Output$OptimalPath)
sparseMethod = 2
gamma = 0
K = 4
max.iters = 15
distanceFunction = 1
returnsMatrix = dataNoh$stockMatrix
trace(print)
Time = ncol(returnsMatrix)
optimalViterbi = NA
optimalStateNum = 0
iters = 0
ErrorRate = 0
max.it = max.iters
stop.crit = 0.00001
ds = NA
d.hist= c()
#initialise cluster parameters
states.est = ICC.shuffle(K = K, Time = Time)
theta.est = ICC.thetaEst(K = K, returns = returnsMatrix, stateSeq = states.est, sparse = sparseMethod)
#print( det(theta.est$precision[[1]]))
#print( det(theta.est$precision[[2]]))
#det(LoGo.solve(abs(cov(t(returnsMatrix[, which(states.est == 1)])))))
distance.est = ICC.distance(returns = returnsMatrix, dist = distanceFunction, theta = theta.est, K = K)
#plot(y = distance.est[1,], x = 1:Time, type='l', col = 'red', main = 'initialisation')
#lines(y = distance.est[2,], x = 1:Time, col = 'blue')
optimalViterbi = viterbi(D = t(distance.est), K=K, gamma=gamma)
states.est = optimalViterbi$Final_Path
states.est
# check if assingment is valid. if not start with random assingment
if (length(unique(states.est)) != K || sum(diff(states.est)!=0)<3 ) {
print(paste('Produced single state path at iteration', it))
states.est = ICC.shuffle(K = K, Time = Time)
}
theta.est = ICC.thetaEst(K = K, returns = returnsMatrix, stateSeq = states.est, sparse = 2)
distance.est = ICC.distance(returns = returnsMatrix, dist = distanceFunction, theta = theta.est, K = K)
d.hist =c(d.hist, -1*ICC.TotalDistance(distance.est, gamma = gamma, stateSeq = states.est))
plot(y = distance.est[1,], x = 1:Time, type='l', col = 'red', main = paste('iteration: ', it))
lines(y = distance.est[2,], x = 1:Time, col = 'blue')
if (nrow(distance.est) == 3 ){ lines(y = distance.est[3,], x = 1:Time, col = 'black')}
if (nrow(distance.est) == 4 ){ lines(y = distance.est[3,], x = 1:Time, col = 'yellow')}
if (nrow(distance.est) == 5 ){ lines(y = distance.est[3,], x = 1:Time, col = 'pink')}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/pnbd.R
\name{pnbd.PlotFrequencyInCalibration}
\alias{pnbd.PlotFrequencyInCalibration}
\title{Pareto/NBD Plot Frequency in Calibration Period}
\usage{
pnbd.PlotFrequencyInCalibration(
params,
cal.cbs,
censor,
hardie = TRUE,
plotZero = TRUE,
xlab = "Calibration period transactions",
ylab = "Customers",
title = "Frequency of Repeat Transactions"
)
}
\arguments{
\item{params}{Pareto/NBD parameters - a vector with r, alpha, s, and beta, in
that order. r and alpha are unobserved parameters for the NBD transaction
process. s and beta are unobserved parameters for the Pareto (exponential
gamma) dropout process.}
\item{cal.cbs}{calibration period CBS (customer by sufficient statistic). It
must contain columns for frequency ("x") and total time observed ("T.cal").}
\item{censor}{integer used to censor the data. See details.}
\item{hardie}{if TRUE, have \code{\link{pnbd.pmf}} use \code{\link{h2f1}}
instead of \code{\link[hypergeo]{hypergeo}}.}
\item{plotZero}{if FALSE, the histogram will exclude the zero bin.}
\item{xlab}{descriptive label for the x axis.}
\item{ylab}{descriptive label for the y axis.}
\item{title}{title placed on the top-center of the plot.}
}
\value{
Calibration period repeat transaction frequency comparison matrix
(actual vs. expected).
}
\description{
Plots a histogram and returns a matrix comparing the actual and expected
number of customers who made a certain number of repeat transactions in the
calibration period, binned according to calibration period frequencies.
}
\details{
This function requires a censor number, which cannot be higher than the
highest frequency in the calibration period CBS. The output matrix will have
(censor + 1) bins, starting at frequencies of 0 transactions and ending at a
bin representing calibration period frequencies at or greater than the censor
number. The plot may or may not include a bin for zero frequencies, depending
on the plotZero parameter.
}
\examples{
data(cdnowSummary)
cal.cbs <- cdnowSummary$cbs
# cal.cbs already has column names required by method
# parameters estimated using pnbd.EstimateParameters
est.params <- cdnowSummary$est.params
# the maximum censor number that can be used
max(cal.cbs[,"x"])
pnbd.PlotFrequencyInCalibration(params = est.params,
cal.cbs = cal.cbs,
censor = 7,
hardie = TRUE)
}
| /man/pnbd.PlotFrequencyInCalibration.Rd | no_license | cran/BTYD | R | false | true | 2,496 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/pnbd.R
\name{pnbd.PlotFrequencyInCalibration}
\alias{pnbd.PlotFrequencyInCalibration}
\title{Pareto/NBD Plot Frequency in Calibration Period}
\usage{
pnbd.PlotFrequencyInCalibration(
params,
cal.cbs,
censor,
hardie = TRUE,
plotZero = TRUE,
xlab = "Calibration period transactions",
ylab = "Customers",
title = "Frequency of Repeat Transactions"
)
}
\arguments{
\item{params}{Pareto/NBD parameters - a vector with r, alpha, s, and beta, in
that order. r and alpha are unobserved parameters for the NBD transaction
process. s and beta are unobserved parameters for the Pareto (exponential
gamma) dropout process.}
\item{cal.cbs}{calibration period CBS (customer by sufficient statistic). It
must contain columns for frequency ("x") and total time observed ("T.cal").}
\item{censor}{integer used to censor the data. See details.}
\item{hardie}{if TRUE, have \code{\link{pnbd.pmf}} use \code{\link{h2f1}}
instead of \code{\link[hypergeo]{hypergeo}}.}
\item{plotZero}{if FALSE, the histogram will exclude the zero bin.}
\item{xlab}{descriptive label for the x axis.}
\item{ylab}{descriptive label for the y axis.}
\item{title}{title placed on the top-center of the plot.}
}
\value{
Calibration period repeat transaction frequency comparison matrix
(actual vs. expected).
}
\description{
Plots a histogram and returns a matrix comparing the actual and expected
number of customers who made a certain number of repeat transactions in the
calibration period, binned according to calibration period frequencies.
}
\details{
This function requires a censor number, which cannot be higher than the
highest frequency in the calibration period CBS. The output matrix will have
(censor + 1) bins, starting at frequencies of 0 transactions and ending at a
bin representing calibration period frequencies at or greater than the censor
number. The plot may or may not include a bin for zero frequencies, depending
on the plotZero parameter.
}
\examples{
data(cdnowSummary)
cal.cbs <- cdnowSummary$cbs
# cal.cbs already has column names required by method
# parameters estimated using pnbd.EstimateParameters
est.params <- cdnowSummary$est.params
# the maximum censor number that can be used
max(cal.cbs[,"x"])
pnbd.PlotFrequencyInCalibration(params = est.params,
cal.cbs = cal.cbs,
censor = 7,
hardie = TRUE)
}
|
\name{sdb}
\alias{sdb}
\docType{package}
\title{sdb: an R-MySQL interface}
\description{
\pkg{sdb} is a meta-package on top of RMySQL, RODBC and mysql CLI
}
\note{
\itemize{
\item Under Windows \pkg{sdb} needs \pkg{RODBC} and the \href{https://dev.mysql.com/downloads/connector/odbc}{ODBC connector}
\item Under Linux \pkg{sdb} needs \pkg{RMySQL}.
\item \pkg{sdb} has host = "scidb.mpio.orn.mpg.de" as default argument for several functions but host is not hard-wired.
}
}
\keyword{package}
\author{
Mihai Valcu \email{valcu@orn.mpg.de} }
\examples{
\dontrun{
# "Generic" connection; no database is chosen.
con = dbcon(user = "test", password = "test")
d = dbq(con, "select * from test.table1")
d
# Connection to the "test" database.
con = dbcon(user = "test", password = "test", database = "test")
d = dbq(con, "select * from table1")
d
# You can save your settings (and optionally, a default database) on disk.
saveCredentials(user = 'test', password = 'test', database = 'test')
# ... then you can run dbq without an explicit connection.
d = dbq(q = "select * from table1")
d
# Using mysql native interface and a saved connection.
#(for the moment only available for Linux).
qstr =c("SET @v1 = 1",
"SET @v2 = 'a'",
"SELECT * FROM table1
WHERE Column1 = @v1 and Column2 = @v2")
d = dbq(q = qstr, native = TRUE)
d
# remove conection file if you like
removeCredentials()
}
}
| /man/000_sdb.Rd | no_license | alrutten/sdb | R | false | false | 1,589 | rd | \name{sdb}
\alias{sdb}
\docType{package}
\title{sdb: an R-MySQL interface}
\description{
\pkg{sdb} is a meta-package on top of RMySQL, RODBC and mysql CLI
}
\note{
\itemize{
\item Under Windows \pkg{sdb} needs \pkg{RODBC} and the \href{https://dev.mysql.com/downloads/connector/odbc}{ODBC connector}
\item Under Linux \pkg{sdb} needs \pkg{RMySQL}.
\item \pkg{sdb} has host = "scidb.mpio.orn.mpg.de" as default argument for several functions but host is not hard-wired.
}
}
\keyword{package}
\author{
Mihai Valcu \email{valcu@orn.mpg.de} }
\examples{
\dontrun{
# "Generic" connection; no database is chosen.
con = dbcon(user = "test", password = "test")
d = dbq(con, "select * from test.table1")
d
# Connection to the "test" database.
con = dbcon(user = "test", password = "test", database = "test")
d = dbq(con, "select * from table1")
d
# You can save your settings (and optionally, a default database) on disk.
saveCredentials(user = 'test', password = 'test', database = 'test')
# ... then you can run dbq without an explicit connection.
d = dbq(q = "select * from table1")
d
# Using mysql native interface and a saved connection.
#(for the moment only available for Linux).
qstr =c("SET @v1 = 1",
"SET @v2 = 'a'",
"SELECT * FROM table1
WHERE Column1 = @v1 and Column2 = @v2")
d = dbq(q = qstr, native = TRUE)
d
# remove conection file if you like
removeCredentials()
}
}
|
###이상치 정제하기 - 정상범주에서 크게 벗어난 값
##이상치 제거하기 - 존재할 수 없는 값(sex:3,score:6)
outlier <- data.frame(sex=c(1,2,1,3,2,1),
score=c(5,4,3,4,2,6))
#이상치 확인
table(outlier$sex)
table(outlier$score)
#결측 처리하기-sex
#sex가 3이면 NA할당
outlier$sex <- ifelse(outlier$sex==3,NA,outlier$sex)
outlier
#score가 1~5 아니면 NA 할당
outlier$score <- ifelse(outlier$score >5,NA,outlier$score)
outlier
##결측치 제외하고 분석
outlier %>%
filter(!is.na(sex) & !is.na(score)) %>%
group_by(sex) %>%
summarise(mean_score=mean(score))
###이상치 제거하기 - 극단적인 값
#정상 범위 기준 정해서 벗어나면 결측 처리
#논리적 판단: 성인 몸무게 40~150kg 벗어나면 극단치
#통계적 판단: 상하위 0.3% 극단치 또는 상자그림 1.5IQR 벗어나면 극단치
#상자그림으로 극단치 기준 정해서 제거하기
#상자그램 생성
mpg <- as.data.frame(ggplot2::mpg)
boxplot(mpg$hwy)
#상자그림 통계치 출력
boxplot(mpg$hwy)$stats #상자그림 통계치 출력
#결측치 처리
#12~37 벗어나면 NA 할당
mpg$hwy <- ifelse(mpg$hwy>12|mpg$hwy <37,NA,mpg$hwy)
table(is.na(mpg$hwy))
#결측치 제외하고 분석하기
mpg %>%
group_by(drv) %>%
summarise(mean_hwy=mean(hwy,na.rm=T))
###혼자서 해보기
| /Basic/day5-이상치 정제하기.R | no_license | qkqwof/R | R | false | false | 1,380 | r | ###이상치 정제하기 - 정상범주에서 크게 벗어난 값
##이상치 제거하기 - 존재할 수 없는 값(sex:3,score:6)
outlier <- data.frame(sex=c(1,2,1,3,2,1),
score=c(5,4,3,4,2,6))
#이상치 확인
table(outlier$sex)
table(outlier$score)
#결측 처리하기-sex
#sex가 3이면 NA할당
outlier$sex <- ifelse(outlier$sex==3,NA,outlier$sex)
outlier
#score가 1~5 아니면 NA 할당
outlier$score <- ifelse(outlier$score >5,NA,outlier$score)
outlier
##결측치 제외하고 분석
outlier %>%
filter(!is.na(sex) & !is.na(score)) %>%
group_by(sex) %>%
summarise(mean_score=mean(score))
###이상치 제거하기 - 극단적인 값
#정상 범위 기준 정해서 벗어나면 결측 처리
#논리적 판단: 성인 몸무게 40~150kg 벗어나면 극단치
#통계적 판단: 상하위 0.3% 극단치 또는 상자그림 1.5IQR 벗어나면 극단치
#상자그림으로 극단치 기준 정해서 제거하기
#상자그램 생성
mpg <- as.data.frame(ggplot2::mpg)
boxplot(mpg$hwy)
#상자그림 통계치 출력
boxplot(mpg$hwy)$stats #상자그림 통계치 출력
#결측치 처리
#12~37 벗어나면 NA 할당
mpg$hwy <- ifelse(mpg$hwy>12|mpg$hwy <37,NA,mpg$hwy)
table(is.na(mpg$hwy))
#결측치 제외하고 분석하기
mpg %>%
group_by(drv) %>%
summarise(mean_hwy=mean(hwy,na.rm=T))
###혼자서 해보기
|
## generate random graph(DAG) of G genes and their correspond differential network
## B_1 & B_2
## @param Ng gene number nodes
## @param e Expected number of edges per node
## @param d Expected ratio of differential edges per node (0.1)
## @param dag DAG or not
require(igraph)
require(Matrix)
require(glmnet)
getrandDAG = function(Ng,
e,
dag = TRUE,
d = 0.1,
Bmin = 0.5,
Bmax = 1,
maxit = Ng * Ng) {
B1 = matrix(0,
nrow = Ng,
ncol = Ng)
Nc = Ng * Ng
Ne = rbinom(1, Nc, e / (Ng - 1))
## iteration mark
iter1 = 0
while (sum(B1) < Ne & iter1 < maxit) {
edge = runif(1, min = 1, max = Nc)
B1[edge] = TRUE
if (dag) {
g = graph_from_adjacency_matrix(B1)
B1[edge] = is.dag(g)
}
iter1 = iter1 + 1
}
B2 = B1
nn = which(B1 != 0)
nz = which(B1 == 0)
Nd = ceiling(Ne * d)
Ndf = rbinom(1, Nd, 0.5)
while (sum(abs(B1 - B2)) < Ndf) {
edge = sample(nn, 1)
B2[edge] = FALSE
}
iter2 = 0
while (sum(B2) < Ne & iter2 < maxit) {
edge = sample(nz, 1)
B2[edge] = TRUE
if (dag) {
g = graph_from_adjacency_matrix(B2)
B2[edge] = is.dag(g)
}
iter2 = iter2 + 1
}
ne = which(B1 & B2)
n1 = which(B1 & !(B2))
n2 = which(!(B1) & B2)
B1[ne] = B2[ne] = runif(length(ne), min = Bmin, max = Bmax) * sample(c(-1, 1), length(ne), replace = T)
B1[n1] = runif(length(n1), min = Bmin, max = Bmax) * sample(c(-1, 1), length(n1), replace = T)
B2[n2] = runif(length(n2), min = Bmin, max = Bmax) * sample(c(-1, 1), length(n2), replace = T)
if(iter1 < maxit & iter2 < maxit & any(B1 != B2)) {
if(!dag) {
detIB1 = det(diag(Ng) - B1)
detIB2 = det(diag(Ng) - B2)
if(abs(detIB1) > 1e-6 & abs(detIB2) > 1e-6){
list(B1 = B1, B2 = B2)
} else {
NULL
}
} else {
list(B1 = B1, B2 = B2)
}
} else {
NULL
}
}
#' @param N number of sample
#' @param Ng number of gene
#' @param k number of eQTL
getrandeQTL = function(N, Ng, Nk) {
step = Nk / Ng
X = round(2 * matrix(runif(Nk * N), nrow = Nk)) + 1
G = matrix(0,
nrow = Ng,
ncol = Nk)
ix = lapply(1:Ng,
function(i) {
s = seq(0, step - 1) * Ng + i
G[i, s] <<- 1
s
})
list(G = G, X = X, sk = ix)
}
## randNetinit
## randomly generate regulatory network for fixed seed
randNetinit = function(Ng = 10,
Nk = 10,
r = 0.3,
d = 0.1,
...) {
B = getrandDAG(Ng,
e = Ng * r,
dag = dag,
d = d,
...)
while (is.null(B)) {
B = getrandDAG(Ng,
e = Ng * r,
dag = dag,
d = d,
...)
}
B
}
require(mvtnorm)
getrandsem = function(N = 200,
Ng = 10,
Nk = 10,
r = 0.3,
d = 0.1,
dag = TRUE,
sigma = 0.1,
B = NULL,
...) {
if (is.null(B)) {
B = getrandDAG(Ng,
e = Ng * r,
dag = dag,
d = d,
...)
while (is.null(B)) {
B = getrandDAG(Ng,
e = Ng * r,
dag = dag,
d = d,
...)
}
}
Q = getrandeQTL(N, Ng, Nk)
F = Q[[1]]
X = Q[[2]]
sk = Q[[3]]
E1 = sigma * t(rmvnorm(N, mean = rep(0, Ng), sigma = diag(Ng)))
E2 = sigma * t(rmvnorm(N, mean = rep(0, Ng), sigma = diag(Ng)))
Y1 = solve(diag(Ng) - B[[1]]) %*% (F %*% X + E1)
Y2 = solve(diag(Ng) - B[[2]]) %*% (F %*% X + E2)
list(
obs = list(
Y1 = Y1,
Y2 = Y2,
X = X,
sk = sk
),
var = list(
B1 = Matrix(B[[1]], sparse = T),
B2 = Matrix(B[[2]], sparse = T),
F = Matrix(F, sparse = T),
N = N,
Ng = Ng,
Nk = Nk
)
)
}
## data = getrandsem(N = 200, Ng = 30, Nk = 90, r = 0.1, d = 0.1, sigma = 1, dag = TRUE)
## datn = getrandsem(N = 200, Ng = 30, Nk = 90, r = 0.1, d = 0.1, sigma = 1, dag = FALSE)
## ultility funcitons
center = function(X) {
apply(X, 1, function(x) {
x - mean(x)
})
}
submatX = function(data) {
## submatrix for X on eQTL
sk = data$obs$sk
X = data$obs$X
lapply(sk, function(ix) {
X[ix, , drop = F]
})
}
## X(X^TX)^{-1}X^T
projection = function(X) {
X %*% solve(crossprod(X)) %*% t(X)
}
## use QR more slowly
projection.QR = function(X) {
qr = qr.default(X, LAPACK = TRUE)
Q = qr.qy(qr, diag(1, nrow = nrow(qr$qr), ncol = qr$rank))
tcrossprod(Q)
}
## centeralized Y (gene expression) and X (eQTL quantitive)
centralize = function(X, Y) {
meanX = lapply(X, rowMeans)
meanY = rowMeans(Y)
X = lapply(X, center)
Y = center(Y)
list(X = X,
Y = Y,
muX = meanX,
muY = meanY)
}
## ridge regression for estimate sigma2 in gene expression
#' @example
#' X = submatX(data)
#' Y = data$obs$Y1
#' B = constrained_L2reg(X, Y, rho = 0.1)
constrained_L2reg = function(X, Y, rho) {
# gene number(M) & sample number(N)
std = centralize(X, Y)
X = std$X
Y = std$Y
meanX = std$muX
meanY = std$muY
M = ncol(Y)
N = nrow(Y)
B = Matrix(0,
nrow = M,
ncol = M,
sparse = T)
f = list()
err = 0
for (i in 1:M) {
Xi = X[[i]] ## n x sk
Pi = diag(N) - projection(Xi) ## n x n
yi = Y[, i, drop = F] ## n x 1
Yi = Y[,-i, drop = F] ## n x (p-1)
## (Y^TPY + rho)^{-1}Y^TPy
bi = solve(crossprod(Yi, Pi %*% Yi) + rho * diag(M - 1)) %*% t(Yi) %*% Pi %*% yi
## bi = glmnet(Pi %*% Yi, Pi %*% yi, alpha = 0, lambda = rho)[["beta"]][, 1]
B[i, -i] = bi
f[[i]] = solve(crossprod(Xi)) %*% t(Xi) %*% (yi - Yi %*% bi)
err = err + crossprod(yi - Yi %*% bi - Xi %*% f[[i]])
}
sigma2 = err / (M * N - 1)
mu = (diag(M) - B) %*% meanY - sapply(1:M, function(i) {
meanX[[i]] %*% f[[i]]
})
list(
B = as.matrix(B),
F = f,
sigma2 = sigma2,
mu = mu
)
}
## cross-validation on ridge regression to estimate sigma2
#' @param nrho number of L2 penalty's coefficient
#' @param ncv number of cross-validation
#' @example
#' sigma2 = getsigma2_L2reg(X, Y, nrho = 15, ncv = 5)
getsigma2_L2reg = function(X, Y, nrho = 10, ncv = 5) {
rho_factors = 10 ** (seq(-6, 2, length.out = nrho))
N = ncol(Y)
M = nrow(Y)
cv.err = matrix(0, nrow = nrho, ncol = ncv)
cv.fold = sample(seq(1, ncv), size = N, replace = T)
irho = 1
for (rho in rho_factors) {
for (cv in 1:ncv) {
ytrain = Y[, cv.fold != cv]
xtrain = lapply(X, function(x) {
x[, cv.fold != cv, drop = F]
})
ytest = Y[, cv.fold == cv]
xtest = lapply(X, function(x) {
x[, cv.fold == cv, drop = F]
})
fit = constrained_L2reg(xtrain, ytrain, rho)
ftest = lapply(1:M, function(i) {
crossprod(fit$F[[i]], xtest[[i]])
})
ftest = do.call(rbind, ftest)
cv.err[irho, cv] = norm((diag(M) - fit$B) %*% ytest - ftest - fit$mu, type = "f") ^
2
}
irho = irho + 1
}
cv.mean = rowMeans(cv.err)
rho.min = rho_factors[which.min(cv.mean)]
fit = constrained_L2reg(X, Y, rho.min)
list(rho.opt = rho.min, sigma2.opt = fit$sigma2)
}
##---------------------------------
# utility functions for SML-lasso #
##---------------------------------
obj_cwiseSML = function(N, a0, a1, a2, lambda, w, sigma2) {
function(x) {
-N / 2 * sigma2 * log((a0 - x) ^ 2) - a1 * x + 1 / 2 * a2 * x ^ 2 + lambda * w * abs(x)
}
}
## a0 = det(I - B) / cij + Bij => c = 1
grad_cwiseSML = function(N, a0, a1, a2, lambda, w, sigma2) {
## t := conditions of Bij
## t = {1 | Bij > 0}
## t = {-1 | Bij < 0}
## t = {0 | Bij = 0}
function(t) {
list(
a = -a2,
b = a1 + a2 * a0 - lambda * w * t ,
c = N * sigma2 + (lambda * w * t - a1) * a0
)
}
}
# ax^2 + bx + c = 0
poly2_solver = function(a, b, c) {
r = b ^ 2 - 4 * a * c
if (r < 0) {
list(n = 0, x = NULL)
} else if (r == 0) {
list(n = 1, x = c(-b / (2 * a)))
} else {
list(n = 2, x = c((-b - sqrt(r)) / (2 * a), (-b + sqrt(r)) / (2 * a)))
}
}
## solve SML problem by component-wise update
## component-wise --> row-wise update Bij
#' @param sigma2 extimated from constrained_L2reg
#' @param B B0 initialization
#' @param f F initialization
#' @example
#' X = submatX(data)
#' Y = data$obs$Y1
#' sigma2 = getsigma2_L2reg(X, Y, nrho = 20, ncv = 5)
#' param.init = constrained_L2reg(X, Y, rho = sigma2$rho.opt)
#' param.opt = sparse_maximum_likehood_cwise(B = param.init$B, f = param.init$F, Y = Y, X = X, sigma2 = param.init$sigma2[1], N = data$var$N, Ng = data$var$Ng, lambda = 15, maxit = 100)
#' B = param.init$B; f=param.init$F; Y = Y; X = X; sigma2 = param.init$sigma2; N = data$var$N; Ng = data$var$Ng; Nk = data$var$Nk; lambda = 0.1
sparse_maximum_likehood_cwise = function(B,
f,
Y,
X,
sigma2,
N,
Ng,
lambda,
weighted = TRUE,
maxit = 100,
verbose = 2) {
## data centralization
std = centralize(X, Y)
X = std$X
Y = std$Y
meanX = std$muX
meanY = std$muY
## update for eQTL coeffs
f0 = list()
f1 = list()
for (i in 1:Ng) {
Xi = X[[i]] # n x sk
yi = Y[, i, drop = F] # n x 1 (for specific gene i)
Pi = solve(crossprod(Xi)) %*% t(Xi)
f0[[i]] = Pi %*% yi
f1[[i]] = Pi %*% Y # f[[i]] = f0[[i]] - f1[[i]] %*% B[i,]
}
## update for gnet coeffs
niter = 1
ImB = diag(Ng) - B
IBinv = solve(ImB)
wB = 1 / abs(B)
while (niter <= maxit) {
B.prev = B
f.prev = f
for (i in 1:Ng) {
## IBinv i column and j row -> c_{ij}
## c_{ij} / det(I - B) = (I - B)^{-1}_{j, i}
ci = IBinv[, i]
dbi = vector("numeric", Ng)
for (j in 1:Ng) {
## update B[i, j] for i != j
if (i != j) {
bij.prev = bij = B[i, j]
wij = if (weighted) {
wB[i, j]
} else {
1
}
mij = ci[j]
## i-th row of B
bi = ImB[i, ]
bi[j] = 0
## Yej is the j-th column of Y
Yej = Y[, j, drop = F]
a1 = crossprod(Y %*% bi - X[[i]] %*% f[[i]], Yej)
a2 = crossprod(Yej)
## if mij == 0, cij == 0
if (mij == 0) {
if (abs(a1) > lambda * wij) {
bij = sign(a1) * (abs(a1) - lambda * wij) / a2
} else {
bij = 0
}
} else {
a0 = 1 / mij + bij.prev
cond = list(1,-1, 0) # bij condition
obj = obj_cwiseSML(N, a0, a1, a2, lambda, wij, sigma2)
grad = grad_cwiseSML(N, a0, a1, a2, lambda, wij, sigma2)
params = lapply(cond, function(t) {
x = if (t != 0) {
tmp = do.call(poly2_solver, grad(t))
tmp$x[tmp$x * t > 0]
} else {
0
}
list(x = x)
})
params = unlist(params)
objval = sapply(params, obj)
mix = which.min(objval)
bij = params[mix]
}
dbij = bij.prev - bij
dbi[j] = dbij
B[i, j] = bij
ci = ci / (1 + dbij * mij)
##IBinv = IBinv - IBinv[,i,drop = F] %*% IBinv[j,,drop = F] / (1/dbij + mij)
ImB = diag(Ng) - B
}
} ## for(j in 1:Ng)
## (ImB + ei^T %*% 1 %*% dbi)^{-1}
IBinv = IBinv - IBinv[, i, drop = F] %*% dbi %*% IBinv / (1 + dbi %*% ImB[, i, drop =
F])[1]
f[[i]] = f0[[i]] - f1[[i]] %*% B[i, ]
} ## for(i in 1:Ng)
Berr = norm(B - B.prev, type = "f") / (1 + norm(B.prev, type = "f"))
Ferr = sum(sapply(1:Ng, function(i) {
norm(f[[i]] - f.prev[[i]], type = "f")
})) / (1 + sum(sapply(1:Ng, function(i) {
norm(f.prev[[i]], type = "f")
})))
err = Berr + Ferr
if (verbose >= 2) {
cat(sprintf("SML: iteration = %d, error = %f\n", niter, err))
}
niter = niter + 1
if (err < 1e-4 || niter > maxit) {
mu = (diag(Ng) - B) %*% meanY - sapply(1:Ng, function(i) {
meanX[[i]] %*% f[[i]]
})
B = Matrix(B, sparse = T)
break
}
} ## while(niter <= maxit)
list(
B = B,
f = f,
mu = mu,
niter = niter,
err = err
)
}
##---------------------------------
# utility functions for SML-CD #
##---------------------------------
## objective function for one condition
#' @param B gene network coefficients
#' @param f eQTL coefficients
#' @param Y gene expression (centralized)
#' @param X eQTL quantitive (centralized)
#' @param sigma2 estimated sigma2 in logliklihood
#' @param N number of sample
#' @param lambda lambda coefficient in weighted lasso
#' @param weighted weighted lasso
#' @description argmin -N / 2 * log(det(I - B))^2 + 1 / (2 * sigma2) * ||(I - B) %*% Y - F %*% X - mu||_F^2 + lambda * abs(weight * B)
sparse_likelihood = function(B,
f,
Y,
X,
mu,
sigma2,
N,
lambda,
weight,
detIB,
type = c("lang", "prim", "err")) {
logdet = -N / 2 * log(detIB ^ 2)
IBerr2 = 0
for (i in 1:length(f)) {
err = Y[i, ] - B[i,-i] %*% Y[-i, ] - crossprod(f[[i]], X[[i]]) - mu[i]
IBerr2 = IBerr2 + sum(err ^ 2)
}
if (match.arg(type) == "lang") {
logdet + IBerr2 / (2 * sigma2) + lambda * sum(abs(weight * B))
} else if (match.arg(type) == "prim") {
logdet + IBerr2 / (2 * sigma2)
} else {
IBerr2
}
}
#' @description gradient row-wise
#' @param c vector c = ci / det(I-B); (ng-1) x 1
grad_rwise_SML = function(N, c, Yp, Hy, sigma2) {
function(x) {
N * c + (Yp %*% x - t(Hy)) / sigma2
}
}
#' @description lipshitz moduli row-wise
#' @param o solve((I-B)[-i,] %*% t((I-B)[-i,]))
#' @param gs (I-B)[-i,-i]
#' @param si gi[,i]
#' @param Yp
lips_rwise_SML = function(N,
o,
gs,
si,
c2i,
detIBi,
maxEigenYp,
sigma2,
Ng) {
ogs = o %*% gs
ImO = diag(Ng - 1) - crossprod(gs, ogs)
sOg = crossprod(si, ogs)
c = 1 - crossprod(si, o %*% si)
lambda = 1e-6
x = -1 * tcrossprod(chol2inv(ImO + diag(Ng - 1) * lambda), sOg)
L = N * c2i / (crossprod(x, ImO %*% x) + 2 * sOg %*% x + c) / (detIBi + 1e-6) + maxEigenYp / sigma2
while(L < 0) {
lambda = lambda * 10
x = -1 * tcrossprod(chol2inv(ImO + diag(Ng - 1) * lambda), sOg)
L = N * c2i / (crossprod(x, ImO %*% x) + 2 * sOg %*% x + c) / (detIBi + 1e-6) + maxEigenYp / sigma2
}
L
}
#' @description proximal operator for lasso
#' argmin lambda * |w * x| + c / 2 * |x - u|_2^2
prox_lasso = function(lambda, c, u, w) {
pmax(u - lambda * w / c, 0) + pmin(u + lambda * w / c, 0)
}
## solve SML problem by coordinate descent
## row-wise --> row-wise update B[i,]
#' @param sigma2 extimated from constrained_L2reg
#' @param B B0 initialization (Derived from ridge regression)
#' @param f F initialization
#' @example
#' X = submatX(data)
#' Y = data$obs$Y1
#' sigma2 = getsigma2_L2reg(X, Y, nrho = 20, ncv = 5)
#' param.init = constrained_L2reg(X, Y, rho = sigma2$rho.opt)
#' param.opt1 = sparse_maximum_likehood_bcd(B = param.init$B, f = param.init$F, Y = Y, X = X, sigma2 = param.init$sigma2[1], N = data$var$N, Ng = data$var$Ng, lambda = 15, maxit = 1000, rho = sigma2$rho.opt)
sparse_maximum_likehood_bcd = function(B,
f,
Y,
X,
sigma2,
N,
Ng,
lambda,
weighted = TRUE,
maxit = 100,
verbose = 2) {
## data centralization
std = centralize(X, Y)
X = std$X
Y = std$Y
meanX = std$muX
meanY = std$muY
## update for eQTL row-wise (specific for genes)
f0 = list()
f1 = list()
Yp = list()
Hy = list()
Yp.maxEigen = list()
for (i in 1:Ng) {
Xi = X[[i]] # n x sk
yi = Y[, i, drop = F] # n x 1 (for specific gene i)
Yi = Y[, -i] # n x (ng-1) (for specific gene i)
Pi = solve(crossprod(Xi)) %*% t(Xi)
f0[[i]] = Pi %*% yi # f0 \in sk x 1; f1 \in sk x (ng - 1)
f1[[i]] = Pi %*% Yi # f[[i]] = f0[[i]] - f1[[i]] %*% bi | bi = B[i,-i]
Hi = diag(N) - Xi %*% Pi # n x n projection matrix
Yp[[i]] = t(Yi) %*% Hi %*% Yi # (ng-1) x (ng-1)
Hy[[i]] = t(yi) %*% Hi %*% Yi # 1 x (ng-1)
## maximized eigen-value for Yp
Yp.maxEigen[[i]] = eigen(Yp[[i]])$values[1]
}
## update for gnet row-wise
niter = 1
ImB = diag(Ng) - B
IBinv = solve(ImB)
detIB = det(ImB)
wB = 1 / abs(B)
while (niter <= maxit) {
B.prev = B
f.prev = f
for (i in 1:Ng) {
## -N*sigma2*log(det(I-B)^2) + \sum_{i=1}^{Ng} bi^T %*% Yp %*% bi - Hy %*% bi
## ci / det(I-B); (ng-1) x 1
ci = IBinv[-i, i, drop = F]
bi = t(B[i,-i, drop = F])
grad = grad_rwise_SML(N, ci, Yp[[i]], Hy[[i]], sigma2[1])
## Lipschitz for row-i
oi = solve(tcrossprod(ImB[-i,]))
deti = det(tcrossprod(ImB[-i,]))
gii = ImB[-i, -i]
si = ImB[-i, i, drop = F]
c2i = sum((ci * detIB) ^ 2)
gi = grad(bi)
Li = lips_rwise_SML(N, oi, gii, si, c2i, deti, Yp.maxEigen[[i]], sigma2, Ng)
## proximal operator for lasso
## argmin(lambda * w * |x| + Li/2||x - ui||_2^2)
ui = bi - gi / Li[1]
wBi = wB[i,-i]
B[i, -i] = prox_lasso(lambda, Li[1], ui[, 1], wBi)
dbi = B.prev[i,] - B[i,]
ImB = diag(Ng) - B
## update det(I-B) & (I-B)^{-1}
detIB = (ImB[i,] %*% IBinv[, i, drop = F])[1] * detIB
# IBinv = IBinv - IBinv[, i, drop = F] %*% dbi %*% IBinv / (1 + dbi %*% ImB[, i, drop = F])[1]
IBinv = solve(ImB)
f[[i]] = f0[[i]] - f1[[i]] %*% B[i, -i]
}
Berr = norm(B - B.prev, type = "f") / norm(B.prev, type = "f")
Ferr = sum(sapply(1:Ng, function(i) {
norm(f[[i]] - f.prev[[i]], type = "f")
})) / sum(sapply(1:Ng, function(i) {
norm(f.prev[[i]], type = "f")
}))
err = Berr + Ferr
if (verbose >= 2) {
cat(sprintf("SML: iteration = %d, error = %f\n", niter, err))
}
niter = niter + 1
if (err < 1e-4 || niter > maxit) {
mu = (diag(Ng) - B) %*% meanY - sapply(1:Ng, function(i) {
meanX[[i]] %*% f[[i]]
})
B = Matrix(B, sparse = T)
break
}
} ## while (niter <= maxit)
list(
B = B,
f = f,
mu = mu,
niter = niter,
err = err,
detIB = detIB
)
}
## inertial PALM
# utility functions
inertial_pars = function(opts = c("cont", "lin"),
init = 0) {
switch(
opts,
"cont" = function(k) {
init
},
"lin" = function(k) {
(k - 1) / (k + 2)
}
)
}
## solve SML problem by coordinate descent
## row-wise --> row-wise update B[i,]
#' @param sigma2 extimated from constrained_L2reg
#' @param B B0 initialization (Derived from ridge regression)
#' @param f F initialization
#' @param rho stable for lipschitz calculation(deprecated)
#' @example
#' X = submatX(data)
#' Y = data$obs$Y1
#' sigma2 = getsigma2_L2reg(X, Y, nrho = 20, ncv = 5)
#' param.init = constrained_L2reg(X, Y, rho = sigma2$rho.opt)
#' param.opt2 = sparse_maximum_likehood_iPALM(B = param.init$B, f = param.init$F, Y = Y, X = X, sigma2 = param.init$sigma2[1], N = data$var$N, Ng = data$var$Ng, lambda = 15, maxit = 200)
sparse_maximum_likehood_iPALM = function(B,
f,
Y,
X,
sigma2,
N,
Ng,
lambda,
weighted = TRUE,
inertial = inertial_pars("lin"),
maxit = 100,
verbose = 2,
threshold = 1e-4) {
## data centralization
std = centralize(X, Y)
X = std$X
Y = std$Y
meanX = std$muX
meanY = std$muY
## update for eQTL row-wise (specific for genes)
f0 = list()
f1 = list()
Yp = list()
Hy = list()
Yp.maxEigen = list()
for (i in 1:Ng) {
Xi = X[[i]] # n x sk
yi = Y[, i, drop = F] # n x 1 (for specific gene i)
Yi = Y[, -i] # n x (ng-1) (for specific gene i)
Pi = solve(crossprod(Xi)) %*% t(Xi)
f0[[i]] = Pi %*% yi # f0 \in sk x 1; f1 \in sk x (ng - 1)
f1[[i]] = Pi %*% Yi # f[[i]] = f0[[i]] - f1[[i]] %*% bi | bi = B[i,-i]
Hi = diag(N) - Xi %*% Pi # n x n projection matrix
Yp[[i]] = t(Yi) %*% Hi %*% Yi # (ng-1) x (ng-1)
Hy[[i]] = t(yi) %*% Hi %*% Yi # 1 x (ng-1)
## maximized eigen-value for Yp
Yp.maxEigen[[i]] = eigen(Yp[[i]])$values[1]
}
## update for gnet row-wise
niter = 1
ImB = diag(Ng) - B
IBinv = solve(ImB)
detIB = det(ImB)
wB = 1 / abs(B)
B.prevs = list(B, B)
while (niter <= maxit) {
inert.pars = inertial(niter)
B.inert = B.prevs[[2]] + inert.pars * (B.prevs[[2]] - B.prevs[[1]])
f.prev = f
for (i in 1:Ng) {
## -N*sigma2*log(det(I-B)^2) + \sum_{i=1}^{Ng} bi^T %*% Yp %*% bi - Hy %*% bi
## ci / det(I-B); (ng-1) x 1
ci = IBinv[-i, i, drop = F]
bi = t(B.inert[i,-i, drop = F])
grad = grad_rwise_SML(N, ci, Yp[[i]], Hy[[i]], sigma2[1])
## Lipschitz for row-i
oi = solve(tcrossprod(ImB[-i,]))
deti = det(tcrossprod(ImB[-i,]))
gii = ImB[-i, -i]
si = ImB[-i, i, drop = F]
c2i = sum((ci * detIB) ^ 2)
gi = grad(bi)
Li = lips_rwise_SML(N, oi, gii, si, c2i, deti, Yp.maxEigen[[i]], sigma2, Ng)
## proximal operator for lasso
## argmin(lambda * w * |x| + Li/2||x - ui||_2^2)
ui = bi - gi / Li[1]
wBi = wB[i,-i]
B[i, -i] = prox_lasso(lambda, Li[1], ui[, 1], wBi)
dbi = B.prevs[[2]][i,] - B[i,]
ImB = diag(Ng) - B
## update det(I-B) & (I-B)^{-1}
detIB = (ImB[i,] %*% IBinv[, i, drop = F])[1] * detIB
IBinv = IBinv - IBinv[, i, drop = F] %*% dbi %*% IBinv / (1 + dbi %*% IBinv[, i, drop = F])[1]
f[[i]] = f0[[i]] - f1[[i]] %*% B[i, -i]
}
sigma2 = sigma2_sml(X, Y, B, f, Ng, N)
Berr = norm(B - B.prevs[[2]], type = "f") / (1 + norm(B.prevs[[2]], type = "f"))
Ferr = sum(sapply(1:Ng, function(i) {
norm(f[[i]] - f.prev[[i]], type = "f")
})) / (1 + sum(sapply(1:Ng, function(i) {
norm(f.prev[[i]], type = "f")
})))
err = Berr + Ferr
if (verbose >= 2) {
cat(sprintf("SML: iteration = %d, error = %f\n", niter, err))
}
niter = niter + 1
B.prevs = list(B.prevs[[2]], B)
if (err < threshold || niter > maxit) {
mu = (diag(Ng) - B) %*% meanY - sapply(1:Ng, function(i) {
meanX[[i]] %*% f[[i]]
})
break
}
} ## while (niter <= maxit)
list(
B = B,
f = f,
mu = mu,
sigma2 = sigma2,
niter = niter,
err = err,
detIB = detIB
)
}
###### fused lasso ########
## centeralized Ys (gene expression) and Xs (eQTL quantitive)
#' @example
#' Ys = data$obs[c("Y1", "Y2")]
#' Xs = data$obs$X
centralize_mult = function(Xs, Ys) {
meanX = lapply(Xs, rowMeans)
meanY = lapply(Ys, rowMeans)
Xs = lapply(Xs, center)
Ys = lapply(Ys, center)
list(X = Xs,
Y = Ys,
muX = meanX,
muY = meanY)
}
## ridge regression for estimate sigma2 initialization in gene expression
#' @param M number of gene
#' @param N number of sample
#' @example
#' M = data$var$Ng
#' N = data$var$N
#' B = constrained_L2reg_multi(Xs, Ys, sigma2$rho.opt, M, N)
constrained_L2reg_multi = function(X, Ys, rho, M, N) {
K = length(Ys)
B = list()
F = list()
mu = list()
err = 0
df = 0
for (i in 1:K) {
fit = constrained_L2reg(X, Ys[[i]], rho)
B[[i]] = as.matrix(fit$B)
F[[i]] = fit$F
mu[[i]] = fit$mu
err = err + fit$sigma2 * (N * M - 1)
df = df + (N * M - 1)
}
sigma2 = err / df
list(
B = B,
F = F,
sigma2 = sigma2,
mu = mu
)
}
## cross-validation on ridge regression to estimate sigma2
#' @param nrho number of L2 penalty's coefficient
#' @param ncv number of cross-validation
#' @example
#' Ys = data$obs[c("Y1", "Y2")]
#' Xs = submatX(data)
#' M = data$var$Ng
#' N = data$var$N
#' sigma2 = getsigma2_L2reg_multi(Xs, Ys, nrho = 20, M = M, N = N)
getsigma2_L2reg_multi = function(X,
Ys,
nrho = 10,
ncv = 5,
M,
N) {
rho_factors = 10 ** (seq(-6, 2, length.out = nrho))
cv.err = matrix(0, nrow = nrho, ncol = ncv)
cv.fold = sample(seq(1, ncv), size = N, replace = T)
irho = 1
for (rho in rho_factors) {
for (cv in 1:ncv) {
ytrain = lapply(Ys, function(y) {
y[, cv.fold != cv, drop = F]
})
xtrain = lapply(X, function(x) {
x[, cv.fold != cv, drop = F]
})
ytest = lapply(Ys, function(y) {
y[, cv.fold == cv, drop = F]
})
xtest = lapply(X, function(x) {
x[, cv.fold == cv, drop = F]
})
Ntrain = sum(cv.fold != cv)
fit = constrained_L2reg_multi(xtrain, ytrain, rho, M, Ntrain)
for (k in 1:length(Ys)) {
ftest = lapply(1:M, function(i) {
crossprod(fit$F[[k]][[i]], xtest[[i]])
})
ftest = do.call(rbind, ftest)
cv.err[irho, cv] = cv.err[irho, cv] + norm((diag(M) - fit$B[[k]]) %*% ytest[[k]] - ftest - fit$mu[[k]], type = "f")
}
}
irho = irho + 1
}
cv.mean = rowMeans(cv.err)
rho.min = rho_factors[which.min(cv.mean)]
fit = constrained_L2reg_multi(X, Ys, rho.min, M, N)
list(
rho.opt = rho.min,
sigma2.opt = fit$sigma2[1],
cv.ram = list(rho = rho_factors, cvm = cv.mean)
)
}
##---------------------------------------
# utility functions for SML-fused_lasso #
##---------------------------------------
#' @param lambda lasso penalty
#' @param rho fused lasso penalty
#' @param w weight for lasso term
#' @param r weight for fused lasso term
#' @param c ci / det(B)
#' @param b bij[1,...,K]
obj_multiSML = function(N, c, b, a1, a2, lambda, rho, w, r, sigma2) {
a0 = 1 / c + b
if (c[1] == 0 & c[2] == 0) {
function(x, y) {
-a1[1] * x - a1[2] * y +
1 / 2 * (a2[1] * x ^ 2 + a2[2] * y ^ 2) +
lambda * (w[1] * abs(x) + w[2] * abs(y)) +
rho * r * (abs(x - y))
}
} else if (c[1] == 0 & c[2] != 0) {
function(x, y) {
sigma2 * (-N[2] / 2 * log((a0[2] - y) ^ 2)) -
a1[1] * x - a1[2] * y +
1 / 2 * (a2[1] * x ^ 2 + a2[2] * y ^ 2) +
lambda * (w[1] * abs(x) + w[2] * abs(y)) +
rho * r * (abs(x - y))
}
} else if (c[1] != 0 & c[2] == 0) {
function(x, y) {
sigma2 * (-N[1] / 2 * log((a0[1] - x) ^ 2)) -
a1[1] * x - a1[2] * y +
1 / 2 * (a2[1] * x ^ 2 + a2[2] * y ^ 2) +
lambda * (w[1] * abs(x) + w[2] * abs(y)) +
rho * r * (abs(x - y))
}
} else {
function(x, y) {
sigma2 * (-N[1] / 2 * log((a0[1] - x) ^ 2) - N[2] / 2 * log((a0[2] - y) ^
2)) -
a1[1] * x - a1[2] * y +
1 / 2 * (a2[1] * x ^ 2 + a2[2] * y ^ 2) +
lambda * (w[1] * abs(x) + w[2] * abs(y)) +
rho * r * (abs(x - y))
}
}
}
grad_multiSML = function(N, c, b, a1, a2, lambda, rho, w, r, sigma2) {
a0 = 1 / c + b
if (c[1] == 0 & c[2] == 0) {
function(t) {
xt = t[1]
yt = t[2]
dxy = t[3]
if (dxy != 0) {
x = if (xt != 0) {
structure((a1[1] - lambda * w[1] * xt - rho * r * dxy) / a2[1], class = "value")
} else {
structure(0, class = "value")
}
y = if (yt != 0) {
structure((a1[2] - lambda * w[2] * yt + rho * r * dxy) / a2[2], class = "value")
} else {
structure(0, class = "value")
}
list(x = x, y = y)
} else {
tau = xt
wxy = w[1] + w[2]
xy = if (tau != 0) {
structure((a1[1] + a1[2] - lambda * wxy * tau) / (a2[1] + a2[2]), class = "value")
} else {
structure(0, class = "value")
}
list(xy = xy)
}
}
} else if (c[1] != 0 & c[2] == 0) {
function(t) {
xt = t[1]
yt = t[2]
dxy = t[3]
if (dxy != 0) {
x = if (xt != 0) {
structure(
list(
a = -a2[1],
b = a1[1] + a2[1] * a0[1] - lambda * w[1] * xt - rho * r * dxy,
c = N[1] * sigma2 + (lambda * w[1] * xt + rho * r * dxy - a1[1]) * a0[1]
),
class = "grad2"
)
} else {
structure(0, class = "value")
}
y = if (yt != 0) {
structure((a1[2] - lambda * w[2] * yt + rho * r * dxy) / a2[2], class = "value")
} else {
structure(0, class = "value")
}
list(x = x, y = y)
} else {
tau = xt
wxy = w[1] + w[2]
xy = if (tau != 0) {
structure(list(
a = -(a2[1] + a2[2]),
b = (a1[1] + a1[2]) + (a2[1] + a2[2]) * a0[1] - lambda * wxy * tau,
c = N[1] * sigma2 + (lambda * wxy * tau - a1[1] - a1[2]) * a0[1]
),
class = "grad2")
} else {
structure(0, class = "value")
}
list(xy = xy)
}
}
} else if (c[1] == 0 & c[2] != 0) {
function(t) {
xt = t[1]
yt = t[2]
dxy = t[3]
if (dxy != 0) {
x = if (xt != 0) {
structure((a1[1] - lambda * w[1] * xt - rho * r * dxy) / a2[1], class = "value")
} else {
structure(0, class = "value")
}
y = if (yt != 0) {
structure(
list(
a = -a2[2],
b = a1[2] + a2[2] * a0[2] - lambda * w[2] * yt + rho * r * dxy,
c = N[2] * sigma2 + (lambda * w[2] * yt - rho * r * dxy - a1[2]) * a0[2]
),
class = "grad2"
)
} else {
structure(0, class = "value")
}
list(x = x, y = y)
} else {
tau = xt
wxy = w[1] + w[2]
xy = if (tau != 0) {
structure(list(
a = -(a2[1] + a2[2]),
b = (a1[1] + a1[2]) + (a2[1] + a2[2]) * a0[2] - lambda * wxy * tau,
c = N[1] * sigma2 + (lambda * wxy * tau - a1[1] - a1[2]) * a0[2]
),
class = "grad2")
} else {
structure(0, class = "value")
}
list(xy = xy)
}
}
} else {
function(t) {
xt = t[1]
yt = t[2]
dxy = t[3]
if (dxy != 0) {
x = if (xt != 0) {
structure(
list(
a = -a2[1],
b = a1[1] + a2[1] * a0[1] - lambda * w[1] * xt - rho * r * dxy,
c = N[1] * sigma2 + (lambda * w[1] * xt + rho * r * dxy - a1[1]) * a0[1]
),
class = "grad2"
)
} else {
structure(0, class = "value")
}
y = if (yt != 0) {
structure(
list(
a = -a2[2],
b = a1[2] + a2[2] * a0[2] - lambda * w[2] * yt + rho * r * dxy,
c = N[2] * sigma2 + (lambda * w[2] * yt - rho * r * dxy - a1[2]) * a0[2]
),
class = "grad2"
)
} else {
structure(0, class = "value")
}
list(x = x, y = y)
} else {
tau = xt
wxy = w[1] + w[2]
xy = if (tau != 0) {
structure(
list(
a = a2[1] + a2[2],
b = lambda * wxy * tau - (a1[1] + a1[2]) - (a2[1] + a2[2]) * (a0[1] + a0[2]),
c = (a1[1] + a1[2] - lambda * wxy * tau) * (a0[1] + a0[2]) + (a2[1] + a2[2]) * a0[1] * a0[2] - (N[1] + N[2]) * sigma2,
d = (N[2] * a0[1] + N[1] * a0[2]) * sigma2 + (lambda * wxy * tau - (a1[1] + a1[2])) * a0[1] * a0[2]
),
class = "grad3"
)
} else {
structure(0, class = "value")
}
list(xy = xy)
}
}
}
}
poly3_solver = function(a, b, c, d, eps = 1e-6) {
r = solver3P_(b / a, c / a, d / a)
r$x = r$x * (abs(r$x) > eps)
r
}
# x^3 + ax^2 + bx + c = 0
solver3P_ = function(a, b, c) {
a2 = a ^ 2
q = (a2 - 3 * b) / 9
r = (a * (2 * a2 - 9 * b) + 27 * c) / 54
r2 = r ^ 2
q3 = q ^ 3
if (r2 <= q3) {
t = r / sqrt(q3)
if (t < -1) {
t = -1
}
if (t > 1) {
t = 1
}
t = acos(t)
a = a / 3
q = -2 * sqrt(q)
list(n = 3, x = c(q * cos(t / 3) - a, q * cos((t + 2 * pi) / 3) - a, q * cos((t - 2 * pi) / 3) - a))
} else {
4
A = -(abs(r) + sqrt(r2 - q3)) ^ (1 / 3)
if (r < 0) {
A = -A
}
B = if (A == 0) {
0
} else {
q / A
}
a = a / 3
Re = -(A + B) / 2 - a
Im = sqrt(3) / 2 * (A - B)
if (abs(Re) <= 1e-6) {
Im = Re
list(n = 2, x = c(A + B - a, Re))
} else {
list(n = 1, x = c(A + B - a))
}
}
}
### call function for grad list
grad_solver = function(g, t) {
gsolver_ = function(g) {
if (class(g) == "value") {
list(n = 1, x = 0)
} else if (class(g) == "grad2") {
do.call(poly2_solver, g)
} else {
do.call(poly3_solver, g)
}
}
xt = t[1]
yt = t[2]
dxy = t[3]
res = list()
if (dxy != 0) {
cand.x = gsolver_(g[["x"]])
cand.x = cand.x[["x"]][sign(cand.x[["x"]]) == xt]
cand.y = gsolver_(g[["y"]])
cand.y = cand.y[["x"]][sign(cand.y[["x"]]) == yt]
i = 1
for (x in cand.x) {
for (y in cand.y) {
if (sign(x - y) == dxy) {
res[[i]] = list(x = x, y = y)
i = i + 1
}
}
}
} else {
cand.xy = gsolver_(g[["xy"]])
cand.xy = cand.xy[["x"]][sign(cand.xy[["x"]]) == xt]
i = 1
for (xy in cand.xy) {
res[[i]] = list(x = xy, y = xy)
i = i + 1
}
}
res
}
## solve SML problem by component-wise update
#' @param lambda lasso penalty
#' @param rho fused lasso penalty
#' @example
#' M = data$var$Ng
#' N = data$var$N
#' Ys = data$obs[c("Y1", "Y2")]
#' Xs = submatX(data)
#' sigma2 = getsigma2_L2reg_multi(Xs, Ys, nrho = 20, M = M, N = N)
#' params.init = constrained_L2reg_multi(Xs, Ys, sigma2$rho.opt, M, N)
#' params.opt = multiSML_cwise(Bs = params.init$B, fs = params.init$F, Ys = Ys, Xs = Xs,
#' sigma2 = params.init$sigma2[1], Ng = data$var$Ng,
#' lambda = 0.1, rho = 0.1, maxit = 1000)
multiSML_cwise = function(Bs,
fs,
Ys,
Xs,
sigma2,
Ng,
lambda,
rho,
weighted = TRUE,
wBs = inverse(Bs),
rB = flinv(Bs),
maxit = 100,
threshold = 1e-4,
verbose = 2) {
std = centralize_mult(Xs, Ys)
Xs = std$X
Ys = std$Y
meanXs = std$muX
meanYs = std$muY
K = length(Ys) # number of conditions = 2
if (verbose == 2) {
cat(sprintf("conditions must be restricted to 2.. K = %d\n", K))
}
## update for eQTL coeffs
f0 = vector("list", K)
f1 = vector("list", K)
for (i in 1:Ng) {
Xi = Xs[[i]]
Pi = solve(crossprod(Xi)) %*% t(Xi)
for (k in 1:K) {
yi_k = Ys[[k]][, i, drop = F] # n x 1 for gene i
f0[[k]][[i]] = Pi %*% yi_k
f1[[k]][[i]] = Pi %*% Ys[[k]] # f = f0 - f1 %*% B[i,]
}
}
## update for gnet coeffs
niter = 1
Ns = sapply(Ys, nrow)
ImBs = lapply(Bs, function(B) {
diag(Ng) - B
})
IBsinv = lapply(ImBs, solve)
while (niter <= maxit) {
Bs.prev = Bs
fs.prev = fs
for (i in 1:Ng) {
ci = lapply(IBsinv, function(IBi) {
IBi[, i]
})
dbi = lapply(1:K, function(k) {
vector("numeric", Ng)
})
for (j in 1:Ng) {
## update B[[k]][i,j] for i != j
if (i != j) {
bij.prev = bij = sapply(Bs, function(B)
(B[i, j]))
wij = sapply(wBs, function(w) {
if (weighted) {
w[i, j]
} else {
1
}
})
rij = if (weighted) {
rB[i, j]
} else {
1
}
mij = sapply(ci, function(c) {
c[j]
})
bi = lapply(ImBs, function(ImB) {
bi_k = ImB[i, ]
bi_k[j] = 0
bi_k
})
## j-th column of Ys
Yej = lapply(Ys, function(Y) {
Y[, j, drop = F]
})
a1 = sapply(1:K, function(k) {
crossprod(Ys[[k]] %*% bi[[k]] - Xs[[i]] %*% fs[[k]][[i]], Yej[[k]])
})
a2 = sapply(1:K, function(k) {
crossprod(Yej[[k]])
})
## a0 = 1/mij + bij.prev
cond = list(
c(1, 1, 1),
c(1,-1, 1),
c(1, 0, 1),
c(-1,-1, 1),
c(0,-1, 1),
c(1, 1,-1),
c(0, 1,-1),
c(-1,-1,-1),
c(-1, 0,-1),
c(-1, 1,-1),
c(1, 1, 0),
c(-1,-1, 0),
c(0, 0, 0)
)
obj = obj_multiSML(Ns, mij, bij.prev, a1, a2, lambda, rho, wij, rij, sigma2)
grad = grad_multiSML(Ns, mij, bij.prev, a1, a2, lambda, rho, wij, rij, sigma2)
params = list()
for (t in cond) {
cand.grad = grad(t)
params = c(params, grad_solver(cand.grad, t))
}
objval = sapply(params, function(args) {
do.call(obj, args)
})
mix = which.min(objval)
bij = unlist(params[[mix]])
dbij = bij.prev - bij
for (k in 1:K) {
dbi[[k]][j] = dbij[k]
Bs[[k]][i, j] = bij[k]
ci[[k]] = ci[[k]] / (1 + dbij[k] * mij[k])
ImBs[[k]] = diag(Ng) - Bs[[k]]
}
}
} ## for(j in 1:Ng)
## (ImB + ei^T %*% dbi)^{-1}
for (k in 1:K) {
## IBsinv[[k]] = IBsinv[[k]] - IBsinv[[k]][, i, drop = F] %*% dbi[[k]] %*% IBsinv[[k]] / (1 + dbi[[k]] %*% IBsinv[[k]][, i, drop = F])[1]
IBsinv[[k]] = solve(ImBs[[k]])
fs[[k]][[i]] = f0[[k]][[i]] - f1[[k]][[i]] %*% Bs[[k]][i,]
}
} ## for(i in 1:Ng)
Berr = sum(sapply(1:K, function(k) {
norm(Bs[[k]] - Bs.prev[[k]], type = "f") / norm(Bs.prev[[k]], type = "f")
}))
Ferr = sum(sapply(1:K, function(k) {
sum(sapply(1:Ng, function(i) {
norm(fs[[k]][[i]] - fs.prev[[k]][[i]], type = "f")
})) / sum(sapply(1:Ng, function(i) {
norm(fs.prev[[k]][[i]], type = "f")
}))
}))
err = Berr + Ferr
cat(sprintf("SML: iteration = %d, error = %f\n", niter, err))
niter = niter + 1
if (err < threshold || niter > maxit || is.nan(err)) {
mu = lapply(1:K, function(k) {
(diag(Ng) - Bs[[k]]) %*% meanYs[[k]] - sapply(1:Ng, function(i) {
meanXs[[i]] %*% fs[[k]][[i]]
})
})
Bs = lapply(Bs, Matrix, sparse = T)
break
}
} ## while(niter <= maxit)
list(
B = Bs,
f = fs,
mu = mu,
niter = niter,
err = err
)
}
## ultility function for multiple condition
#' @param Bs multiple gene regulatory networks (list)
#' @param fs multiple eQTL coefficient (list)
#' @param Ys multiple gene expression matrix (list)
#' @param Xs multiple eQTLs quantitive (list)
#' @param Ng number of genes
#' @param lambda lambda coefficient in lasso penalty term
#' @param rho rho coefficient in fused penalty term
#' @param type type of likelihood:
#' o objective function
#' c cross validation function
#' e independent error function
SML_error = function(Xs, Ys, Bs, fs, Ng, Ns, K) {
std = centralize_mult(Xs, Ys)
X = lapply(std$X, t)
Y = lapply(std$Y, t)
err = 0
for (k in 1:K) {
for (i in 1:Ng) {
Xi = X[[i]] # sk x N
bi = Bs[[k]][i, -i, drop = F] # (ng-1) x 1
yi = Y[[k]][i, , drop = F] # 1 x N
Yi = Y[[k]][-i, , drop = F] # (ng-1) x N
fi = fs[[k]][[i]] # sk x 1
err = err + tcrossprod(yi - bi %*% Yi - crossprod(fi, Xi))
}
}
err
}
SML_logLik = function(Xs, Ys, Bs, fs, Ng, Ns, K, detIBs, sigma2) {
std = centralize_mult(Xs, Ys)
X = lapply(std$X, t)
Y = lapply(std$Y, t)
Ls = 0
err = 0
for (k in 1:K) {
Ls = Ls - Ns[k] / 2 * log(detIBs[k] ^ 2)
for (i in 1:Ng) {
Xi = X[[i]] # sk x N
bi = Bs[[k]][i, -i, drop = F] # (ng-1) x 1
yi = Y[[k]][i, , drop = F] # 1 x N
Yi = Y[[k]][-i, , drop = F] # (ng-1) x N
fi = fs[[k]][[i]] # sk x 1
err = err + tcrossprod(yi - bi %*% Yi - crossprod(fi, Xi))
}
}
Ls + err / (2 * sigma2) + Ng * sum(Ns) / 2 * log(sigma2)
}
#' @description proximal operator for fused lasso
#' argmin lambda * |w1 * x| + lambda * |w2 * y| + rho * r * |y - x| + c/2 (|x - u1|_2^2 + |y - u_2|_2^2)
#' @param lambda lasso parameter
#' @param rho fused lasso parameter
#' @note FLSA algorithm is used in this step
prox_flsa = function(lambda, rho, c, us, ws, r) {
## lambda = 0
du = us[[1]] - us[[2]]
eq = (abs(du) <= 2 * rho * r / c)
df = 1 - eq
rho = min(rho, 1e16)
x = list((us[[1]] + us[[2]]) / 2 * eq + (us[[1]] - sign(du) * rho * r / c) * df,
(us[[1]] + us[[2]]) / 2 * eq + (us[[2]] + sign(du) * rho * r / c) * df)
x = lapply(x, as.numeric)
lapply(1:2, function(i) {
xe = pmax(x[[i]] - lambda * (ws[[1]] + ws[[2]]) / 2 / c, 0) + pmin(x[[i]] + lambda * (ws[[1]] + ws[[2]]) / 2 / c, 0)
xd = pmax(x[[i]] - lambda * ws[[i]] / c, 0) + pmin(x[[i]] + lambda * ws[[i]] / c, 0)
xe * eq + xd * df
})
}
## sigma2 estimation from SEM logLikihood function
sigma2_sem = function(Xs, Ys, B, f, Ng, Ns, K) {
X = lapply(Xs, t)
Y = lapply(Ys, t)
err = 0
for (k in 1:K) {
for (i in 1:Ng) {
Xi = X[[i]] # sk x N
bi = B[[k]][i, -i, drop = F] # (ng-1) x 1
yi = Y[[k]][i, , drop = F] # 1 x N
Yi = Y[[k]][-i, , drop = F] # (ng-1) x N
fi = f[[k]][[i]] # sk x 1
err = err + tcrossprod(yi - bi %*% Yi - crossprod(fi, Xi))
}
}
sigma2 = err / (Ng * sum(Ns))
sigma2[1]
}
## solve SML problem by block coordinate descent
#' @param lambda lambda hyper-parameter for lasso term
#' @param rho fused lasso hyper-parameter for fused lasso term
#' @param gamma invertible matrix stablize parameter gamma
#' M = data$var$Ng
#' N = data$var$N
#' Ys = data$obs[c("Y1", "Y2")]
#' Xs = submatX(data)
#' sigma2 = getsigma2_L2reg_multi(Xs, Ys, nrho = 20, M = M, N = N)
#' params.init = constrained_L2reg_multi(Xs, Ys, sigma2$rho.opt, M, N)
#' params.opt2 = multiSML_bcd(Bs = params.init$B, fs = params.init$F, Ys = Ys, Xs = Xs,
#' sigma2 = params.init$sigma2[1], Ng = data$var$Ng,
#' lambda = 0.1, rho = 0.1, maxit = 500, gamma = sigma2$rho.opt)
multiSML_bcd = function(Bs,
fs,
Ys,
Xs,
sigma2,
Ng,
lambda,
rho,
weighted = TRUE,
maxit = 100,
threshold = 1e-3,
verbose = 2) {
std = centralize_mult(Xs, Ys)
Xs = std$X
Ys = std$Y
meanXs = std$muX
meanYs = std$muY
K = length(Ys) # number of conditions = 2
if (verbose == 2) {
cat(sprintf("conditions must be restricted to 2.. K = %d\n", K))
}
## update for eQTL row-wise (specific for genes)
f0 = vector("list", K)
f1 = vector("list", K)
Yp = vector("list", K)
Hy = vector("list", K)
Yp.maxEigen = vector("list", K)
Ns = sapply(Ys, nrow)
for (i in 1:Ng) {
Xi = Xs[[i]]
Pi = solve(crossprod(Xi)) %*% t(Xi)
for (k in 1:K) {
# specific condition
yi_k = Ys[[k]][, i, drop = F] # n[k] x 1 for gene i
Yi_k = Ys[[k]][, -i] # n[k] x (ng-1) (for specific gene i)
f0[[k]][[i]] = Pi %*% yi_k
f1[[k]][[i]] = Pi %*% Yi_k # f[[k]][[i]] = f0[[k]][[i]] - f1[[k]][[i]] %*% bi^k | bi^k = B[[k]][i,-i]
Hi_k = diag(Ns[k]) - Xi %*% Pi # n[k] x n[k] projection matrix
Yp[[k]][[i]] = t(Yi_k) %*% Hi_k %*% Yi_k # (ng - 1) x (ng - 1)
Hy[[k]][[i]] = t(yi_k) %*% Hi_k %*% Yi_k # 1 x (ng - 1)
## maximized eigen-value for Yp
Yp.maxEigen[[k]][[i]] = eigen(Yp[[k]][[i]])$values[1]
}
}
## update for gnet row-wise
niter = 1
ImBs = lapply(Bs, function(B) {
diag(Ng) - B
})
detIBs = sapply(ImBs, det)
IBsinv = lapply(ImBs, solve)
wBs = lapply(Bs, function(B) {
1 / abs(B)
})
rB = 1 / abs(Bs[[1]] - Bs[[2]])
Ls = logLik(detIBs, Bs, wBs, rB, lambda, rho, K) + Ng * sum(Ns) / 2 * log(sigma2)
while (niter <= maxit) {
Bs.prev = Bs
fs.prev = fs
Ls.prev = Ls
for (i in 1:Ng) {
## sum(-Ns[k] * sigma2 * log(det(I-B[[k]])^2)) + \sum_{i=1}^{Ng} bi^T %*% Yp %*% bi - Hy %*% bi)
ci = lapply(IBsinv, function(IBi) {
# ci / det(I - B); from (I - B)^{-1}
IBi[-i, i, drop = F]
})
bi = lapply(Bs, function(B) {
t(B[i, -i, drop = F])
})
gi = lapply(1:K, function(k) {
grad = grad_rwise_SML(Ns[k], ci[[k]], Yp[[k]][[i]], Hy[[k]][[i]], sigma2[1])
grad(bi[[k]])
})
## Lipschitz moduli for row-i
Lis = sapply(1:K, function(k) {
gtg = tcrossprod(ImBs[[k]][-i,])
oi = chol2inv(gtg)
deti = det(gtg)
gii = ImBs[[k]][-i, -i]
si = ImBs[[k]][-i, i, drop = F]
c2i = sum((ci[[k]] * detIBs[k]) ^ 2)
lips_rwise_SML(Ns[k],
oi,
gii,
si,
c2i,
deti,
Yp.maxEigen[[k]][[i]],
sigma2,
Ng)[1]
})
Li = max(Lis)
ui = lapply(1:K, function(k) {
bi[[k]] - gi[[k]] / Li
})
wBi = lapply(wBs, function(wB) {
wB[i, -i]
})
rBi = rB[i, -i]
xi = prox_flsa(lambda, rho, Li, ui, wBi, rBi)
for (k in 1:K) {
Bs[[k]][i, -i] = xi[[k]]
dbi = Bs.prev[[k]][i,] - Bs[[k]][i,]
IBsinv[[k]] = IBsinv[[k]] - IBsinv[[k]][, i, drop = F] %*% dbi %*% IBsinv[[k]] / (1 + dbi %*% IBsinv[[k]][, i, drop = F])[1]
ImBs[[k]] = diag(Ng) - Bs[[k]]
detIBs[k] = (ImBs[[k]][i,] %*% IBsinv[[k]][, i, drop = F])[1] * detIBs[k]
## IBsinv[[k]] = solve(ImBs[[k]])
fs[[k]][[i]] = f0[[k]][[i]] - f1[[k]][[i]] %*% Bs[[k]][i, -i]
}
} # row-wise update
Berr = sum(sapply(1:K, function(k) {
norm(Bs[[k]] - Bs.prev[[k]], type = "f") / norm(Bs.prev[[k]], type = "f")
}))
Ferr = sum(sapply(1:K, function(k) {
sum(sapply(1:Ng, function(i) {
norm(fs[[k]][[i]] - fs.prev[[k]][[i]], type = "f")
})) / sum(sapply(1:Ng, function(i) {
norm(fs.prev[[k]][[i]], type = "f")
}))
}))
err = Berr + Ferr
sigma2 = sigma2_sem(Xs, Ys, Bs, fs, Ng, Ns, K)
Ls = logLik(detIBs, Bs, wBs, rB, lambda, rho, K) + Ng * sum(Ns) / 2 * log(sigma2)
Lerr = abs(Ls.prev - Ls) / (1 + abs(Ls.prev))
if (verbose >= 2) {
cat(
sprintf(
"SML: iteration = %d, error = %f, logLik = %f, sigma2 = %f\n",
niter,
err,
Ls,
sigma2
)
)
}
niter = niter + 1
if ((err <= threshold & Lerr <= threshold) || niter > maxit) {
mu = lapply(1:K, function(k) {
(diag(Ng) - Bs[[k]]) %*% meanYs[[k]] - sapply(1:Ng, function(i) {
meanXs[[i]] %*% fs[[k]][[i]]
})
})
sigma2 = sigma2_sem(Xs, Ys, Bs, fs, Ng, Ns, K)
Bs = lapply(Bs, Matrix, sparse = T)
break
}
} # while(niter <= maxit)
list(
B = Bs,
f = fs,
mu = mu,
sigma2 = sigma2,
niter = niter,
err = err,
detIB = detIBs
)
}
## stepwise update sigma2
err2_sem = function(Xs, Ys, B, f, Ng, Ns, K) {
X = lapply(Xs, t)
Y = lapply(Ys, t)
err2 = 0
for (k in 1:K) {
for (i in 1:M) {
Xi = X[[i]] # sk x N
bi = B[[k]][i, -i, drop = F] # (ng-1) x 1
yi = Y[[k]][i, , drop = F] # 1 x N
Yi = Y[[k]][-i, , drop = F] # (ng-1) x N
fi = f[[k]][[i]] # sk x 1
err2 = err2 + tcrossprod(yi - bi %*% Yi - crossprod(fi, Xi))
}
}
err2
}
## ultility function for objective function
logLik = function(detIBs, Bs, ws, r, lambda, rho, Ns, K) {
Ls = 0
l1norm = 0
rho = min(rho, 1e16)
lfnorm = rho * r * abs(Bs[[2]] - Bs[[1]])
for (k in 1:K) {
l1norm = l1norm + lambda * (ws[[k]] * abs(Bs[[k]]))
Ls = Ls - Ns[k] / 2 * log(detIBs[k] ^ 2)
}
diag(l1norm) = 0
diag(lfnorm) = 0
Ls + sum(l1norm) + sum(lfnorm)
}
## inverse
inverse = function(Bs) {
lapply(Bs, function(B) {
1 / abs(B)
})
}
## invone
invone = function(Bs) {
lapply(Bs, function(B) {
w = matrix(1, nrow = nrow(B), ncol = ncol(B))
diag(w) = Inf
w
})
}
## flinv
flinv = function(Bs) {
1 / abs(Bs[[1]] - Bs[[2]])
}
## flone
flone = function(Bs) {
w = matrix(1, nrow = nrow(Bs[[1]]), ncol = ncol(Bs[[2]]))
diag(w) = Inf
w
}
## solve SML problem by block coordinate descent by backtracking inert-PALM
#' @param lambda lambda hyper-parameter for lasso term
#' @param rho fused lasso hyper-parameter for fused lasso term
#' @param gamma invertible matrix stablize parameter gamma
#' params.opt4 = multiSML_iPALM(Bs = params.init$B, fs = params.init$F, Ys = Ys, Xs = Xs,
#' sigma2 = params.init$sigma2[1], Ng = data$var$Ng,
#' lambda = 0.1, rho = 0.1, maxit = 500)
multiSML_iPALM = function(Bs,
fs,
Ys,
Xs,
sigma2,
Ng,
lambda,
rho,
wBs = inverse(Bs),
rB = flinv(Bs),
maxit = 100,
acc = TRUE,
inertial = inertial_pars("lin"),
threshold = 1e-3,
sparse = FALSE,
use.strict = TRUE,
verbose = 2) {
std = centralize_mult(Xs, Ys)
Xs = std$X
Ys = std$Y
meanXs = std$muX
meanYs = std$muY
K = length(Ys) # number of conditions = 2
if (verbose == 2) {
cat(sprintf("conditions must be restricted to 2.. K = %d\n", K))
}
## update for eQTL row-wise (specific for genes)
f0 = vector("list", K)
f1 = vector("list", K)
Yp = vector("list", K)
Hy = vector("list", K)
Yp.maxEigen = vector("list", K)
Ns = sapply(Ys, nrow)
for (i in 1:Ng) {
Xi = Xs[[i]]
Pi = solve(crossprod(Xi)) %*% t(Xi)
for (k in 1:K) {
# specific condition
yi_k = Ys[[k]][, i, drop = F] # n[k] x 1 for gene i
Yi_k = Ys[[k]][, -i] # n[k] x (ng-1) (for specific gene i)
f0[[k]][[i]] = Pi %*% yi_k
f1[[k]][[i]] = Pi %*% Yi_k # f[[k]][[i]] = f0[[k]][[i]] - f1[[k]][[i]] %*% bi^k | bi^k = B[[k]][i,-i]
Hi_k = diag(Ns[k]) - Xi %*% Pi # n[k] x n[k] projection matrix
Yp[[k]][[i]] = t(Yi_k) %*% Hi_k %*% Yi_k # (ng - 1) x (ng - 1)
Hy[[k]][[i]] = t(yi_k) %*% Hi_k %*% Yi_k # 1 x (ng - 1)
## maximized eigen-value for Yp
Yp.maxEigen[[k]][[i]] = eigen(Yp[[k]][[i]])$values[1]
}
}
## update for gnet row-wise
niter = 1
ImBs = lapply(Bs, function(B) {
diag(Ng) - B
})
detIBs = sapply(ImBs, det)
IBsinv = lapply(ImBs, solve)
Ls = logLik(detIBs, Bs, wBs, rB, lambda, rho, Ns, K) + N * sum(Ns) / 2 * log(sigma2)
## history
Bs.prevs = list(Bs, Bs)
inert = acc
while (niter <= maxit) {
inert.pars = inertial(niter)
Bs.inert = if (inert) {
lapply(1:K, function(k) {
Bs.prevs[[2]][[k]] + inert.pars * (Bs.prevs[[2]][[k]] - Bs.prevs[[1]][[k]])
})
} else {
Bs
}
fs.prev = fs
Ls.prev = Ls
for (i in 1:Ng) {
## sum(-Ns[k] * sigma2 * log(det(I-B[[k]])^2)) + \sum_{i=1}^{Ng} bi^T %*% Yp %*% bi - Hy %*% bi)
ci = lapply(IBsinv, function(IBi) {
# ci / det(I - B); from (I - B)^{-1}
IBi[-i, i, drop = F]
})
bi = lapply(Bs.inert, function(B.inert) {
t(B.inert[i, -i, drop = F])
})
gi = lapply(1:K, function(k) {
grad = grad_rwise_SML(Ns[k], ci[[k]], Yp[[k]][[i]], Hy[[k]][[i]], sigma2[1])
grad(bi[[k]])
})
## Lipschitz moduli for row-i
Lis = sapply(1:K, function(k) {
gtg = tcrossprod(ImBs[[k]][-i,])
oi = chol2inv(gtg)
deti = crossprod(IBsinv[[k]][,i])[1] * (detIBs[k]^2)
gii = ImBs[[k]][-i, -i]
si = ImBs[[k]][-i, i, drop = F]
c2i = sum((ci[[k]] * detIBs[k]) ^ 2)
Li = lips_rwise_SML(Ns[k],
oi,
gii,
si,
c2i,
deti,
Yp.maxEigen[[k]][[i]],
sigma2,
Ng)[1]
Li
})
Li = max(Lis)
Li = (1 + 2 * inert.pars) * Li / (2 * (1 - inert.pars))
detZero = TRUE
cl = 1
while(detZero) {
ui = lapply(1:K, function(k) {
bi[[k]] - gi[[k]] / (cl * Li)
})
wBi = lapply(wBs, function(wB) {
wB[i, -i]
})
rBi = rB[i, -i]
xi = prox_flsa(lambda, rho, Li, ui, wBi, rBi)
dIBu = sapply(1:K, function(k) {
IBsinv[[k]][i, i] - (t(xi[[k]]) %*% IBsinv[[k]][-i, i, drop = F])[1]
})
cl = cl * 2
detZero = any(dIBu == 0)
}
for (k in 1:K) {
Bs[[k]][i, -i] = xi[[k]]
ImBs[[k]] = diag(Ng) - Bs[[k]]
detIBs[k] = (ImBs[[k]][i,] %*% IBsinv[[k]][, i, drop = F])[1] * detIBs[k]
# detIBs[k] = det(ImBs[[k]])
dbi = Bs.prevs[[2]][[k]][i,] - Bs[[k]][i,]
# IBsinv[[k]] = solve(ImBs[[k]])
IBsinv[[k]] = IBsinv[[k]] - IBsinv[[k]][, i, drop = F] %*% dbi %*% IBsinv[[k]] / (1 + dbi %*% IBsinv[[k]][, i, drop = F])[1]
fs[[k]][[i]] = f0[[k]][[i]] - f1[[k]][[i]] %*% Bs[[k]][i, -i]
}
# sigma2 = sigma2_sem(Xs, Ys, Bs, fs, Ng, Ns, K)
} # row-wise update
Berr = sum(sapply(1:K, function(k) {
norm(Bs[[k]] - Bs.prevs[[2]][[k]], type = "f") / (1 + norm(Bs.prevs[[2]][[k]], type = "f"))
}))
Ferr = sum(sapply(1:K, function(k) {
sum(sapply(1:Ng, function(i) {
norm(fs[[k]][[i]] - fs.prev[[k]][[i]], type = "f")
})) / (1 + sum(sapply(1:Ng, function(i) {
norm(fs.prev[[k]][[i]], type = "f")
})))
}))
err = Berr + Ferr
sigma2 = sigma2_sem(Xs, Ys, Bs, fs, Ng, Ns, K)
Ls = logLik(detIBs, Bs, wBs, rB, lambda, rho, Ns, K) + Ng * sum(Ns) / 2 * log(sigma2)
Lerr = abs(Ls.prev - Ls) / (1 + abs(Ls.prev))
# inert = ifelse(Lerr < 1e-6, FALSE, acc)
if (verbose >= 2) {
cat(
sprintf(
"SML: iteration = %d, error = %f, logLik = %f, sigma2 = %f, inert=%s\n",
niter,
err,
Ls,
sigma2,
inert
)
)
}
niter = niter + 1
Bs.prevs = list(Bs.prevs[[2]], Bs)
opt.cond = if (use.strict) {
(err < threshold && Lerr < threshold)
} else {
(err < threshold || Lerr < threshold)
}
if (opt.cond || niter > maxit || is.nan(err)) {
mu = lapply(1:K, function(k) {
(diag(Ng) - Bs[[k]]) %*% meanYs[[k]] - sapply(1:Ng, function(i) {
meanXs[[i]] %*% fs[[k]][[i]]
})
})
sigma2 = sigma2_sem(Xs, Ys, Bs, fs, Ng, Ns, K)
if (sparse) {
Bs = lapply(Bs, Matrix, sparse = T)
}
break
}
} # while(niter <= maxit)
list(
B = Bs,
f = fs,
mu = mu,
sigma2 = sigma2,
niter = niter,
err = err,
detIB = detIBs
)
}
## cross-validation and EBIC for hyper-parameter tuning
## lambda max can be estimated
#' @description get_lambda.max
#' @example
#' lamax = get_lambda.max(params.init$B, Ys, Xs, Ng)
get_lambda.max = function(Bs, Ys, Xs, Ng, weighted = TRUE) {
std = centralize_mult(Xs, Ys)
Xs = std$X ## N x sk
Ys = std$Y ## N x p
K = length(Ys)
Ns = sapply(Ys, nrow)
R = vector("list", K) ## Ng
w = if(weighted) {
inverse(Bs)
} else {
invone(Bs)
}
for (k in 1:K) {
R[[k]] = matrix(0, nrow = Ng, ncol = Ns[k]) ## Ng x N
}
for (i in 1:Ng) {
Xi = Xs[[i]]
Pi = solve(crossprod(Xi)) %*% t(Xi)
for (k in 1:K) {
yi = Ys[[k]][, i, drop = F] # n x 1
fi = solve(crossprod(Xi)) %*% t(Xi) %*% yi
Xf = Xi %*% fi # n x 1
R[[k]][i,] = yi - Xf
}
}
err = 0
for (k in 1:K) {
err = err + norm(R[[k]], type = "f") ^ 2
}
sigma2 = err / (Ng * sum(Ns))
Ry = vector("list", K) ## Ng
for (k in 1:K) {
Ry[[k]] = R[[k]] %*% Ys[[k]]
Ry[[k]] = abs(Ry[[k]] / sigma2) / w[[k]]
}
max(sapply(Ry, max))
}
## cross-validation and EBIC for hyper-parameter tuning
## rho max can be estimated, rho is the fused lasso
## regularized hyper parameter
#' @description get_rho.max
#' @example
#' rhomax = get_rho.max(params.init$B, params.init$F, Ys, Xs, params.init$sigma2[1], data$var$Ng)
get_rho.max = function(Bs, fs, Ys, Xs, sigma2, Ng, weighted = TRUE) {
if(weighted) {
wBs = inverse(Bs)
rB = flinv(Bs)
} else {
wBs = invone(Bs)
rB = flone(Bs)
}
params.rho = multiSML_iPALM(
Bs,
fs,
Ys,
Xs,
sigma2,
Ng,
lambda = 0,
rho = Inf,
wBs = wBs,
rB = rB,
maxit = 2000,
threshold = 1e-4,
use.strict = F,
sparse = T,
verbose = 1
)
weight.rho = if(weighted) {
flinv(Bs)
} else {
flone(Bs)
}
Bs = params.rho$B[[1]]
fs = params.rho$f
sigma2 = params.rho$sigma2
std = centralize_mult(Xs, Ys)
Xs = std$X ## Ng (n x sk)
Ys = std$Y ## n x ng
## multiple
K = length(Ys)
Ns = sapply(Ys, nrow)
Bc = vector("list", K)
YY = vector("list", K)
FX = vector("list", K)
Dx = vector("list", K)
for (k in 1:K) {
Bc[[k]] = -Ns[k] * t(solve(diag(Ng) - Bs))
YY[[k]] = crossprod(Ys[[k]]) ## Y %*% t(Y)
FX[[k]] = matrix(0, nrow = Ng, ncol = Ns[k]) ## F %*% X (p x k x k x n = p x n)
for (i in 1:Ng) {
FX[[k]][i, ] = as.numeric(Xs[[i]] %*% fs[[k]][[i]])
}
Dx[[k]] = abs(Bc[[k]] + ((diag(Ng) - Bs) %*% YY[[k]] - FX[[k]] %*% Ys[[k]]) / sigma2) / weight.rho
diag(Dx[[k]]) = -Inf
}
## Dxy = abs((diag(Ng) - Bs) %*% (YY[[2]] - YY[[1]]) - (FX[[2]] %*% Ys[[2]] - FX[[1]] %*% Ys[[1]])) / sigma2 / 2 / weight.rho
## diag(Dxy) = -Inf
max(c(max(Dx[[1]]), max(Dx[[2]])))
## max(Dxy)
}
## cross validation for hyper-parameter tuning
#' @description 5-fold cross-validation
#' @param dyn dynamic updated rho.max by given lambda
#' @example
#' cv.params = cv_multiSML(Bs = params.init$B, fs = params.init$F, Ys = Ys, Xs = Xs, sigma2 = params.init$sigma2[1], Ng = data$var$Ng, nlambda = 20, nrho = 20)
cv_multiSML = function(Bs,
fs,
Ys,
Xs,
sigma2,
Ng,
nlambda = 20,
nrho = 20,
threshold = 1e-4,
verbose = 1) {
lambda.max = get_lambda.max(Bs, Ys, Xs, Ng)
lambda.factors = 10 ^ seq(0, -4, length.out = nlambda) * lambda.max
wBs = inverse(Bs)
rB = flinv(Bs)
rho.max = get_rho.max(Bs, fs, Ys, Xs, sigma2, Ng)
rho.factors = 10 ^ seq(0, -4, length.out = nrho) * rho.max
ncv = 5
Ns = sapply(Ys, ncol)
K = length(Ys)
Ytrain = vector("list", ncv)
Xtrain = vector("list", ncv)
Ytest = vector("list", ncv)
Xtest = vector("list", ncv)
cv.fold = sample(seq(1, ncv), size = Ns[1], replace = T)
for (i in 1:ncv) {
Ytrain[[i]] = lapply(Ys, function(y) {
y[, cv.fold != i, drop = F]
})
Xtrain[[i]] = lapply(Xs, function(x) {
x[, cv.fold != i, drop = F]
})
Ytest[[i]] = lapply(Ys, function(y) {
y[, cv.fold == i, drop = F]
})
Xtest[[i]] = lapply(Xs, function(x) {
x[, cv.fold == i, drop = F]
})
}
cverrs = vector("list", nrho * nlambda)
cvlls = vector("list", nrho * nlambda)
hyper.params = list()
for (cv in 1:ncv) {
params.opt = list()
Nc = sapply(Ytest[[cv]], ncol)
ix = 1
for (rho in rho.factors) {
for (lambda in lambda.factors) {
cat(sprintf("lambda = %4f, rho = %4f, foldid = %d\n", lambda, rho, cv))
if (ix %% nlambda == 1) {
params.opt[[ix]] = multiSML_iPALM(
Bs,
fs,
Ytrain[[cv]],
Xtrain[[cv]],
sigma2,
Ng,
lambda,
rho,
wBs,
rB,
maxit = 1000,
threshold = threshold,
acc = TRUE,
sparse = FALSE,
use.strict = FALSE,
verbose = verbose
)
} else {
params.opt[[ix]] = multiSML_iPALM(
params.opt[[ix - 1]]$B,
params.opt[[ix - 1]]$f,
Ytrain[[cv]],
Xtrain[[cv]],
params.opt[[ix - 1]]$sigma2,
Ng,
lambda,
rho,
wBs,
rB,
maxit = 1000,
threshold = threshold,
acc = TRUE,
sparse = FALSE,
use.strict = FALSE,
verbose = verbose
)
}
loglik = SML_logLik(
Xtest[[cv]],
Ytest[[cv]],
params.opt[[ix]]$B,
params.opt[[ix]]$f,
Ng,
Nc,
K,
params.opt[[ix]]$detIB,
params.opt[[ix]]$sigma2
)[1]
err = SML_error(Xtest[[cv]],
Ytest[[cv]],
params.opt[[ix]]$B,
params.opt[[ix]]$f,
Ng,
Nc,
K)[1]
cverrs[[ix]] = c(cverrs[[ix]], err)
cvlls[[ix]] = c(cvlls[[ix]], loglik)
if (cv == 1) {
hyper.params[[ix]] = c(lambda, rho)
}
ix = ix + 1
}
}
}
list(opt.hyperparams = hyper.params,
cverrs = cverrs,
loglik = cvlls)
}
## ultility funciton
cvsurface = function(cvparams, type = c("err", "loglik")) {
cvm = if(type == "err") {
cvparams$cverrs
} else {
cvparams$loglik
}
cvfuns = data.frame(
lambda = sapply(cvparams$opt.hyperparams, `[`, 1),
rho = sapply(cvparams$opt.hyperparams, `[`, 2),
cvmean = sapply(cvm, mean),
cvsd = sapply(cvm, sd)
)
lambda = sort(unique(cvfuns$lambda))
rho = sort(unique(cvfuns$rho))
cvmean = matrix(nrow = length(lambda), ncol = length(rho))
cvfuns$lambda = sapply(cvfuns$lambda, function(l) {
which(lambda == l)
})
cvfuns$rho = sapply(cvfuns$rho, function(r) {
which(rho == r)
})
apply(cvfuns, 1, function(x) {
cvmean[x[1], x[2]] <<- x[3]
})
require(plotly)
if(type == "err") {
surface = plot_ly(x = log10(lambda),
y = log10(rho),
z = log10(cvmean)) %>% add_surface()
} else {
surface = plot_ly(x = log10(lambda),
y = log10(rho),
z = cvmean) %>% add_surface()
}
list(
lambda = lambda,
rho = rho,
cvm = cvmean,
surf = surface
)
}
## ultility functions
## pick lambda
optimLambda_cv = function(cvparams, type = c("err", "loglik"), se = TRUE, fused.sparse = TRUE) {
cvm = if(type == "err") {
cvparams$cverrs
} else {
cvparams$loglik
}
cvfuns = data.frame(
lambda = sapply(cvparams$opt.hyperparams, `[`, 1),
rho = sapply(cvparams$opt.hyperparams, `[`, 2),
cvmean = sapply(cvm, mean),
cvsd = sapply(cvm, sd)
)
cv.min = which.min(cvfuns$cvmean)
cv.1se = cvfuns[cv.min, 3] + cvfuns[cv.min, 4]
cvfun.1se = cvfuns[cvfuns$cvmean <= cv.1se, c(1, 2, 3)]
lambda = cvfun.1se[, 1]
rho = cvfun.1se[, 2]
if(fused.sparse) {
rho.1se = max(rho)
lambda.1se = lambda[which.min(cvfun.1se[cvfun.1se$rho == rho.1se, 3])]
} else {
lambda.1se = max(lambda)
# rho.1se = min(rho[lambda == lambda.1se])
rho.1se = rho[which.min(cvfun.1se[cvfun.1se$lambda == lambda.1se, 3])]
}
if (se) {
list(lambda = lambda.1se, rho = rho.1se)
} else {
list(lambda = cvfuns[cv.min, 1], rho = cvfuns[cv.min, 2])
}
}
## optimLambda_eBIC
optimLambda_eBIC = function(eBICparams) {
eBICfuns = data.frame(
lambda = sapply(eBICparams$opt.hyperparams, `[`, 1),
rho = sapply(eBICparams$opt.hyperparams, `[`, 2),
eBIC = eBICparams$eBIC
)
eBIC.min = which.min(eBICfuns$eBIC)
lambda.min = eBICfuns[eBIC.min, 1]
rho.min = eBICfuns[eBIC.min, 2]
list(lambda = lambda.min, rho = rho.min)
}
## optimGamma_eBIC
optimGamma_eBIC = function(BICparams) {
optim = vector("list", 11)
i = 1
eBIC = vector("numeric", 11)
for (gamma in seq(0, 1, by = 0.1)) {
eBICs = BICparams$BIC + gamma * BICparams$extend
eBICparams = list(opt.hyperparams = BICparams$opt.hyperparams,
eBIC = eBICs)
optim[[i]] = optimLambda_eBIC(eBICparams)
eBIC[i] = min(eBICs)
i = i + 1
}
list(
gamma = seq(0, 1, by = 0.1),
optimLambda = optim,
eBIC = eBIC
)
}
#' extended BIC
#' @param gamma [0,1]
eBIC_multSML = function(Bs,
fs,
Ys,
Xs,
sigma2,
Ng,
Nk,
nlambda = 20,
nrho = 20,
verbose = 1) {
lambda.max = get_lambda.max(Bs, Ys, Xs, Ng)
lambda.factors = 10 ^ seq(0, -4, length.out = nlambda) * lambda.max
wBs = inverse(Bs)
rB = flinv(Bs)
rho.max = get_rho.max(Bs, fs, Ys, Xs, sigma2, Ng)
rho.factors = 10 ^ seq(0, -4, length.out = nrho) * rho.max
ncv = 5
Ns = sapply(Ys, ncol)
K = length(Ys)
hyper.params = vector("list", nrho * nlambda)
params.prev = NULL
BIC = vector("numeric", nrho * nlambda)
extend = vector("numeric", nrho * nlambda)
ix = 1
for (rho in rho.factors) {
for (lambda in lambda.factors) {
cat(sprintf("lambda = %f, rho = %f\n", lambda, rho))
if (ix %% nlambda == 1) {
params.opt = multiSML_iPALM(
Bs,
fs,
Ys,
Xs,
sigma2,
Ng,
lambda,
rho,
wBs,
rB,
maxit = 1000,
threshold = 1e-4,
acc = TRUE,
sparse = FALSE,
verbose = verbose
)
} else {
params.opt = multiSML_iPALM(
params.prev$B,
params.prev$f,
Ys,
Xs,
params.prev$sigma2,
Ng,
lambda,
rho,
wBs,
rB,
maxit = 1000,
threshold = 1e-4,
acc = TRUE,
sparse = FALSE,
verbose = verbose
)
}
logLik = SML_logLik(
Xs,
Ys,
params.opt$B,
params.opt$f,
Ng,
Ns,
K,
params.opt$detIB,
params.opt$sigma2
)[1]
df = sum(params.opt$B[[1]] != 0) +
sum(params.opt$B[[2]] != 0) -
sum(params.opt$B[[2]] == params.opt$B[[1]] &
params.opt$B[[1]] != 0) + 2 * Nk
BIC[ix] = 2 * logLik + df * log(sum(Ns))
extend[ix] = 2 * log(choose(2 * (Ng ^ 2 - Ng + Nk), df))
hyper.params[[ix]] = c(lambda, rho)
params.prev = params.opt
ix = ix + 1
}
}
list(opt.hyperparams = hyper.params,
BIC = BIC,
extend = extend)
}
## ultility
## ultility funciton
eBICsurface = function(eBICparams, gamma = 0) {
ebicfuns = data.frame(
lambda = sapply(eBICparams$opt.hyperparams, `[`, 1),
rho = sapply(eBICparams$opt.hyperparams, `[`, 2),
eBIC = eBICparams$BIC + gamma * eBICparams$extend
)
lambda = sort(unique(ebicfuns$lambda))
rho = sort(unique(ebicfuns$rho))
eBIC = matrix(nrow = length(lambda), ncol = length(rho))
ebicfuns$lambda = sapply(ebicfuns$lambda, function(l) {
which(lambda == l)
})
ebicfuns$rho = sapply(ebicfuns$rho, function(r) {
which(rho == r)
})
apply(ebicfuns, 1, function(x) {
eBIC[x[1], x[2]] <<- x[3]
})
require(plotly)
surface = plot_ly(x = log(lambda),
y = log(rho),
z = log(eBIC)) %>% add_surface()
list(
lambda = lambda,
rho = rho,
ebics = eBIC,
surf = surface
)
}
## eQTL and gene regulatory network are all different with each other under
## different conditions
#' @description getdiffeQTL
#' @param N number of sample
#' @param Ng number of gene
#' @param k number of eQTL
#' @param d differential ratio = 0.1
getdiffeQTL = function(N, Ng, Nk, d = 0.1) {
step = Nk / Ng
X = vector("list", 2)
X[[1]] = round(2 * matrix(runif(Nk * N), nrow = Nk)) + 1
X[[2]] = apply(X[[1]], 2, function(x) {
Nd = Nk * d
dx = sample(1:Nk, Nd, replace = F)
x[dx] = round(2 * runif(Nd)) + 1
x
})
G = matrix(0,
nrow = Ng,
ncol = Nk)
ix = lapply(1:Ng,
function(i) {
s = seq(0, step - 1) * Ng + i
G[i, s] <<- 1
s
})
list(G = G, X = X, sk = ix)
}
#' @description getdiffsem
#' @details eQTL measurement for different condition are generated with proportional difference.
#' @param f difference proportion of each gene's eQTL measurement, such as SNP
getdiffsem = function(N = 200,
Ng = 10,
Nk = 10,
r = 0.3,
d = 0.1,
f = 0.1,
dag = TRUE,
sigma = 0.1) {
B = getrandDAG(Ng, e = Ng * r, dag = dag, d = d)
Q = getdiffeQTL(N, Ng, Nk, f)
F = Q[[1]]
X = Q[[2]]
sk = Q[[3]]
E1 = sigma * t(rmvnorm(N, mean = rep(0, Ng), sigma = diag(Ng)))
E2 = sigma * t(rmvnorm(N, mean = rep(0, Ng), sigma = diag(Ng)))
Y1 = solve(diag(Ng) - B[[1]]) %*% (F %*% X[[1]] + E1)
Y2 = solve(diag(Ng) - B[[2]]) %*% (F %*% X[[2]] + E2)
list(
obs = list(
Y1 = Y1,
Y2 = Y2,
X1 = X[[1]],
X2 = X[[2]],
sk = sk
),
var = list(
B1 = Matrix(B[[1]], sparse = T),
B2 = Matrix(B[[2]], sparse = T),
F = Matrix(F, sparse = T),
N = N,
Ng = Ng,
Nk = Nk
)
)
}
## data = getdiffsem(N = 200, Ng = 30, Nk = 90, r = 0.1, d = 0.1, f = 0.1, sigma = 1, dag = TRUE)
#' @description build sumbatrix for eQTLs observation for subset of corresponding genes
submatXs = function(data) {
sk = data$obs$sk
Xs = list(X1 = data$obs$X1, X2 = data$obs$X2)
lapply(Xs, function(X) {
lapply(sk, function(ix) {
X[ix, , drop = F]
})
})
}
## centralized for multiple Ys and Xs
#' @description generalized centralization of Ys and Xs
#' Xs -> n x sk
#' Ys -> n x ng
#' @example
#' Ys = data$obs[c("Y1", "Y2")]
#' Xs = submatXs(data)
centralize_gen = function(Xs, Ys) {
meanX = lapply(Xs, function(X) {
lapply(X, rowMeans)
})
meanY = lapply(Ys, rowMeans)
Xs = lapply(Xs, function(X) {
lapply(X, center)
})
Ys = lapply(Ys, center)
list(X = Xs,
Y = Ys,
muX = meanX,
muY = meanY)
}
## ridge regression for estimate sigma2 initialization
## on different gene expressionand different eQTLs
#' @param M number of gene
#' @param N number of sample
#' @example
#' M = data$var$Ng
#' N = data$var$N
#' params.init = constrained_L2reg_gen(Xs, Ys, sigma2$rho.opt, M, N)
constrained_L2reg_gen = function(Xs, Ys, rho, M, N) {
K = length(Ys)
B = list()
F = list()
mu = list()
err = 0
df = 0
for (i in 1:K) {
fit = constrained_L2reg(Xs[[i]], Ys[[i]], rho)
B[[i]] = as.matrix(fit$B)
F[[i]] = fit$F
mu[[i]] = fit$mu
err = err + fit$sigma2 * (N[i] * M - 1)
df = df + (N[i] * M - 1)
}
sigma2 = err / df
list(
B = B,
F = F,
sigma2 = sigma2,
mu = mu
)
}
## generalized cross-validation on ridge regression to estimate sigma2
## on different gene expressionand different eQTLs
#' @param nrho number of L2 penalty's coefficient
#' @param ncv number of cross-validation
#' @example
#' Ys = data$obs[c("Y1", "Y2")]
#' Xs = submatXs(data)
#' M = data$var$Ng
#' N = data$var$N
#' sigma2 = getsigma2_L2reg_gen(Xs, Ys, nrho = 20, M = M, N = N)
getsigma2_L2reg_gen = function(Xs,
Ys,
nrho = 10,
ncv = 5,
M,
N) {
rho_factors = 10 ** (seq(-6, 2, length.out = nrho))
cv.err = matrix(0, nrow = nrho, ncol = ncv)
cv.fold = list()
cv.fold[[1]] = sample(seq(1, ncv), size = N[1], replace = T)
cv.fold[[2]] = sample(seq(1, ncv), size = N[2], replace = T)
irho = 1
for (rho in rho_factors) {
for (cv in 1:ncv) {
ytrain = lapply(1:2, function(ix) {
Ys[[ix]][, cv.fold[[ix]] != cv, drop = F]
})
xtrain = lapply(1:2, function(ix) {
lapply(Xs[[ix]], function(x) {
x[, cv.fold[[ix]] != cv, drop = F]
})
})
ytest = lapply(1:2, function(ix) {
Ys[[ix]][, cv.fold[[ix]] == cv, drop = F]
})
xtest = lapply(1:2, function(ix) {
lapply(Xs[[ix]], function(x) {
x[, cv.fold[[ix]] == cv, drop = F]
})
})
Ntrain = sapply(cv.fold, function(f){ sum(f != cv) })
fit = constrained_L2reg_gen(xtrain, ytrain, rho, M, Ntrain)
for (k in 1:length(Ys)) {
ftest = lapply(1:M, function(i) {
crossprod(fit$F[[k]][[i]], xtest[[k]][[i]])
})
ftest = do.call(rbind, ftest)
cv.err[irho, cv] = cv.err[irho, cv] + norm((diag(M) - fit$B[[k]]) %*% ytest[[k]] - ftest - fit$mu[[k]], type = "f")
}
}
irho = irho + 1
}
cv.mean = rowMeans(cv.err)
rho.min = rho_factors[which.min(cv.mean)]
fit = constrained_L2reg_gen(Xs, Ys, rho.min, M, N)
list(
rho.opt = rho.min,
sigma2.opt = fit$sigma2[1],
cv.ram = list(rho = rho_factors, cvm = cv.mean)
)
}
## solve SML problem by component-wise update with generalized configuration
## different gene expression and different eQTLs
#' @param lambda lasso penalty
#' @param rho fused lasso penalty
#' @example
#' M = data$var$Ng
#' N = data$var$N
#' Ys = data$obs[c("Y1", "Y2")]
#' Xs = submatX(data)
#' sigma2 = getsigma2_L2reg_multi(Xs, Ys, nrho = 20, M = M, N = N)
#' params.init = constrained_L2reg_multi(Xs, Ys, sigma2$rho.opt, M, N)
#' params.opt = genSML_cwise(Bs = params.opt4$B, fs = params.opt4$f, Ys = Ys, Xs = Xs,
#' sigma2 = params.init$sigma2[1], Ng = data$var$Ng,
#' wBs = inverse(params.init$B), rB = flinv(params.init$B),
#' lambda = 10, rho = 40, maxit = 1000)
genSML_cwise = function(Bs,
fs,
Ys,
Xs,
sigma2,
Ng,
lambda,
rho,
weighted = TRUE,
wBs = inverse(Bs),
rB = flinv(Bs),
maxit = 100,
threshold = 1e-4,
verbose = 2) {
std = centralize_gen(Xs, Ys)
Xs = std$X
Ys = std$Y
meanXs = std$muX
meanYs = std$muY
K = length(Ys) # number of conditions = 2
if (verbose == 2) {
cat(sprintf("conditions must be restricted to 2.. K = %d\n", K))
}
## update for eQTL coeffs
f0 = vector("list", K)
f1 = vector("list", K)
for (i in 1:Ng) {
for (k in 1:K) {
Xi = Xs[[k]][[i]]
Pi = solve(crossprod(Xi)) %*% t(Xi)
yi_k = Ys[[k]][, i, drop = F] # n x 1 for gene i
f0[[k]][[i]] = Pi %*% yi_k
f1[[k]][[i]] = Pi %*% Ys[[k]] # f = f0 - f1 %*% B[i,]
}
}
## update for gnet coeffs
niter = 1
Ns = sapply(Ys, nrow)
ImBs = lapply(Bs, function(B) {
diag(Ng) - B
})
IBsinv = lapply(ImBs, solve)
while (niter <= maxit) {
Bs.prev = Bs
fs.prev = fs
for (i in 1:Ng) {
ci = lapply(IBsinv, function(IBi) {
IBi[, i]
})
dbi = lapply(1:K, function(k) {
vector("numeric", Ng)
})
for (j in 1:Ng) {
## update B[[k]][i,j] for i != j
if (i != j) {
bij.prev = bij = sapply(Bs, function(B)
(B[i, j]))
wij = sapply(wBs, function(w) {
if (weighted) {
w[i, j]
} else {
1
}
})
rij = if (weighted) {
rB[i, j]
} else {
1
}
mij = sapply(ci, function(c) {
c[j]
})
bi = lapply(ImBs, function(ImB) {
bi_k = ImB[i,]
bi_k[j] = 0
bi_k
})
## j-th column of Ys
Yej = lapply(Ys, function(Y) {
Y[, j, drop = F]
})
a1 = sapply(1:K, function(k) {
crossprod(Ys[[k]] %*% bi[[k]] - Xs[[k]][[i]] %*% fs[[k]][[i]], Yej[[k]])
})
a2 = sapply(1:K, function(k) {
crossprod(Yej[[k]])
})
## a0 = 1/mij + bij.prev
cond = list(
c(1, 1, 1),
c(1, -1, 1),
c(1, 0, 1),
c(-1, -1, 1),
c(0, -1, 1),
c(1, 1, -1),
c(0, 1, -1),
c(-1, -1, -1),
c(-1, 0, -1),
c(-1, 1, -1),
c(1, 1, 0),
c(-1, -1, 0),
c(0, 0, 0)
)
obj = obj_multiSML(Ns, mij, bij.prev, a1, a2, lambda, rho, wij, rij, sigma2)
grad = grad_multiSML(Ns, mij, bij.prev, a1, a2, lambda, rho, wij, rij, sigma2)
params = list()
for (t in cond) {
cand.grad = grad(t)
params = c(params, grad_solver(cand.grad, t))
}
objval = sapply(params, function(args) {
do.call(obj, args)
})
mix = which.min(objval)
bij = unlist(params[[mix]])
dbij = bij.prev - bij
for (k in 1:K) {
dbi[[k]][j] = dbij[k]
Bs[[k]][i, j] = bij[k]
ci[[k]] = ci[[k]] / (1 + dbij[k] * mij[k])
ImBs[[k]] = diag(Ng) - Bs[[k]]
}
}
} ## for(j in 1:Ng)
## (ImB + ei^T %*% dbi)^{-1}
for (k in 1:K) {
## IBsinv[[k]] = IBsinv[[k]] - IBsinv[[k]][, i, drop = F] %*% dbi[[k]] %*% IBsinv[[k]] / (1 + dbi[[k]] %*% IBsinv[[k]][, i, drop = F])[1]
IBsinv[[k]] = solve(ImBs[[k]])
fs[[k]][[i]] = f0[[k]][[i]] - f1[[k]][[i]] %*% Bs[[k]][i, ]
}
} ## for(i in 1:Ng)
Berr = sum(sapply(1:K, function(k) {
norm(Bs[[k]] - Bs.prev[[k]], type = "f") / norm(Bs.prev[[k]], type = "f")
}))
Ferr = sum(sapply(1:K, function(k) {
sum(sapply(1:Ng, function(i) {
norm(fs[[k]][[i]] - fs.prev[[k]][[i]], type = "f")
})) / sum(sapply(1:Ng, function(i) {
norm(fs.prev[[k]][[i]], type = "f")
}))
}))
err = Berr + Ferr
cat(sprintf("SML: iteration = %d, error = %f\n", niter, err))
niter = niter + 1
if (err < threshold || niter > maxit || is.nan(err)) {
mu = lapply(1:K, function(k) {
(diag(Ng) - Bs[[k]]) %*% meanYs[[k]] - sapply(1:Ng, function(i) {
meanXs[[k]][[i]] %*% fs[[k]][[i]]
})
})
Bs = lapply(Bs, Matrix, sparse = T)
break
}
} ## while(niter <= maxit)
list(
B = Bs,
f = fs,
mu = mu,
niter = niter,
err = err
)
}
sigma2_gen = function(Xs, Ys, B, f, Ng, Ns, K) {
X = lapply(Xs, function(X) {
lapply(X, t)
})
Y = lapply(Ys, t)
err = 0
for (k in 1:K) {
for (i in 1:Ng) {
Xi = X[[k]][[i]] # sk x N
bi = B[[k]][i,-i, drop = F] # (ng-1) x 1
yi = Y[[k]][i, , drop = F] # 1 x N
Yi = Y[[k]][-i, , drop = F] # (ng-1) x N
fi = f[[k]][[i]] # sk x 1
err = err + tcrossprod(yi - bi %*% Yi - crossprod(fi, Xi))
}
}
sigma2 = err / (Ng * sum(Ns))
sigma2[1]
}
## solve SML problem by block coordinate descent by backtracking inert-PALM on
## different gene expression and different eQTLs
#' @param lambda lambda hyper-parameter for lasso term
#' @param rho fused lasso hyper-parameter for fused lasso term
#' @param gamma invertible matrix stablize parameter gamma
#' params.init = constrained_L2reg_gen(Xs, Ys, sigma2$rho.opt, M, N)
#' params.opt = genSML_iPALM(Bs = params.init$B, fs = params.init$F, Ys = Ys, Xs = Xs,
#' sigma2 = params.init$sigma2[1], Ng = data$var$Ng,
#' lambda = 0.1, rho = 0.1, maxit = 500)
genSML_iPALM = function(Bs,
fs,
Ys,
Xs,
sigma2,
Ng,
lambda,
rho,
wBs = inverse(Bs),
rB = flinv(Bs),
maxit = 100,
acc = TRUE,
inertial = inertial_pars("lin"),
threshold = 1e-3,
sparse = FALSE,
use.strict = TRUE,
verbose = 2) {
std = centralize_gen(Xs, Ys)
Xs = std$X
Ys = std$Y
meanXs = std$muX
meanYs = std$muY
K = length(Ys) # number of conditions = 2
if (verbose == 2) {
cat(sprintf("conditions must be restricted to 2.. K = %d\n", K))
}
## update for eQTL row-wise (specific for genes)
f0 = vector("list", K)
f1 = vector("list", K)
Yp = vector("list", K)
Hy = vector("list", K)
Yp.maxEigen = vector("list", K)
Ns = sapply(Ys, nrow)
for (i in 1:Ng) {
for (k in 1:K) {
Xi = Xs[[k]][[i]]
Pi = solve(crossprod(Xi)) %*% t(Xi)
# specific condition
yi_k = Ys[[k]][, i, drop = F] # n[k] x 1 for gene i
Yi_k = Ys[[k]][,-i] # n[k] x (ng-1) (for specific gene i)
f0[[k]][[i]] = Pi %*% yi_k
f1[[k]][[i]] = Pi %*% Yi_k # f[[k]][[i]] = f0[[k]][[i]] - f1[[k]][[i]] %*% bi^k | bi^k = B[[k]][i,-i]
Hi_k = diag(Ns[k]) - Xi %*% Pi # n[k] x n[k] projection matrix
Yp[[k]][[i]] = t(Yi_k) %*% Hi_k %*% Yi_k # (ng - 1) x (ng - 1)
Hy[[k]][[i]] = t(yi_k) %*% Hi_k %*% Yi_k # 1 x (ng - 1)
## maximized eigen-value for Yp
Yp.maxEigen[[k]][[i]] = eigen(Yp[[k]][[i]])$values[1]
}
}
## update for gnet row-wise
niter = 1
ImBs = lapply(Bs, function(B) {
diag(Ng) - B
})
detIBs = sapply(ImBs, det)
IBsinv = lapply(ImBs, solve)
Ls = logLik(detIBs, Bs, wBs, rB, lambda, rho, Ns, K) + N * sum(Ns) / 2 * log(sigma2)
## history
Bs.prevs = list(Bs, Bs)
inert = acc
while (niter <= maxit) {
inert.pars = inertial(niter)
Bs.inert = if (inert) {
lapply(1:K, function(k) {
Bs.prevs[[2]][[k]] + inert.pars * (Bs.prevs[[2]][[k]] - Bs.prevs[[1]][[k]])
})
} else {
Bs
}
fs.prev = fs
Ls.prev = Ls
for (i in 1:Ng) {
## sum(-Ns[k] * sigma2 * log(det(I-B[[k]])^2)) + \sum_{i=1}^{Ng} bi^T %*% Yp %*% bi - Hy %*% bi)
ci = lapply(IBsinv, function(IBi) {
# ci / det(I - B); from (I - B)^{-1}
IBi[-i, i, drop = F]
})
bi = lapply(Bs.inert, function(B.inert) {
t(B.inert[i,-i, drop = F])
})
gi = lapply(1:K, function(k) {
grad = grad_rwise_SML(Ns[k], ci[[k]], Yp[[k]][[i]], Hy[[k]][[i]], sigma2[1])
grad(bi[[k]])
})
## Lipschitz moduli for row-i
Lis = sapply(1:K, function(k) {
gtg = tcrossprod(ImBs[[k]][-i, ])
oi = chol2inv(gtg)
deti = det(gtg)
gii = ImBs[[k]][-i,-i]
si = ImBs[[k]][-i, i, drop = F]
c2i = sum((ci[[k]] * detIBs[k]) ^ 2)
lips_rwise_SML(Ns[k],
oi,
gii,
si,
c2i,
deti,
Yp.maxEigen[[k]][[i]],
sigma2,
Ng)[1]
})
Li = max(Lis)
Li = (1 + 2 * inert.pars) * Li / (2 * (1 - inert.pars))
ui = lapply(1:K, function(k) {
bi[[k]] - gi[[k]] / Li
})
wBi = lapply(wBs, function(wB) {
wB[i,-i]
})
rBi = rB[i,-i]
xi = prox_flsa(lambda, rho, Li, ui, wBi, rBi)
for (k in 1:K) {
Bs[[k]][i,-i] = xi[[k]]
ImBs[[k]] = diag(Ng) - Bs[[k]]
detIBs[k] = (ImBs[[k]][i, ] %*% IBsinv[[k]][, i, drop = F])[1] * detIBs[k]
dbi = Bs.prevs[[2]][[k]][i, ] - Bs[[k]][i, ]
IBsinv[[k]] = IBsinv[[k]] - IBsinv[[k]][, i, drop = F] %*% dbi %*% IBsinv[[k]] / (1 + dbi %*% IBsinv[[k]][, i, drop = F])[1]
fs[[k]][[i]] = f0[[k]][[i]] - f1[[k]][[i]] %*% Bs[[k]][i,-i]
}
} # row-wise update
Berr = sum(sapply(1:K, function(k) {
norm(Bs[[k]] - Bs.prevs[[2]][[k]], type = "f") / (1 + norm(Bs.prevs[[2]][[k]], type = "f"))
}))
Ferr = sum(sapply(1:K, function(k) {
sum(sapply(1:Ng, function(i) {
norm(fs[[k]][[i]] - fs.prev[[k]][[i]], type = "f")
})) / (1 + sum(sapply(1:Ng, function(i) {
norm(fs.prev[[k]][[i]], type = "f")
})))
}))
err = Berr + Ferr
sigma2 = sigma2_gen(Xs, Ys, Bs, fs, Ng, Ns, K)
Ls = logLik(detIBs, Bs, wBs, rB, lambda, rho, Ns, K) + Ng * sum(Ns) / 2 * log(sigma2)
Lerr = abs(Ls.prev - Ls) / (1 + abs(Ls.prev))
# inert = ifelse(Lerr < 1e-8, FALSE, acc)
if (verbose >= 2) {
cat(
sprintf(
"SML: iteration = %d, error = %f, logLik = %f, sigma2 = %f, inert = %s\n",
niter,
err,
Ls,
sigma2,
inert
)
)
}
niter = niter + 1
Bs.prevs = list(Bs.prevs[[2]], Bs)
opt.cond = if (use.strict) {
(err < threshold && Lerr < threshold)
} else {
(err < threshold || Lerr < threshold)
}
if (opt.cond || niter > maxit || is.nan(err)) {
mu = lapply(1:K, function(k) {
(diag(Ng) - Bs[[k]]) %*% meanYs[[k]] - sapply(1:Ng, function(i) {
meanXs[[k]][[i]] %*% fs[[k]][[i]]
})
})
sigma2 = sigma2_gen(Xs, Ys, Bs, fs, Ng, Ns, K)
if (sparse) {
Bs = lapply(Bs, Matrix, sparse = T)
}
break
}
} # while(niter <= maxit)
list(
B = Bs,
f = fs,
mu = mu,
sigma2 = sigma2,
niter = niter,
err = err,
detIB = detIBs
)
}
######################
# comparable method
######################
## cross validation for hyper-parameter tuning
#' @description 5-fold cross-validation
#' @param dyn dynamic updated rho.max by given lambda
#' @example
#' cv.params = cv_multiSML(Bs = params.init$B, fs = params.init$F, Ys = Ys, Xs = Xs, sigma2 = params.init$sigma2[1], Ng = data$var$Ng, nlambda = 20, nrho = 20)
cv_genSML = function(Bs,
fs,
Ys,
Xs,
sigma2,
Ng,
nlambda = 20,
nrho = 20,
weighted = TRUE,
threshold = 1e-4,
use.strict = FALSE,
verbose = 1) {
lambda.max = gen_lambda.max(Bs, Ys, Xs, Ng, weighted)
lambda.factors = 10 ^ seq(0,-5, length.out = nlambda) * lambda.max
if(weighted) {
wBs = inverse(Bs)
rB = flinv(Bs)
} else{
wBs = invone(Bs)
rB = flone(Bs)
}
rho.max = gen_rho.max(Bs, fs, Ys, Xs, sigma2, Ng, weighted)
rho.factors = 10 ^ seq(0,-5, length.out = nrho) * rho.max
ncv = 5
Ns = sapply(Ys, ncol)
K = length(Ys)
Ytrain = vector("list", ncv)
Xtrain = vector("list", ncv)
Ytest = vector("list", ncv)
Xtest = vector("list", ncv)
cv.fold = list()
cv.fold[[1]] = sample(seq(1, ncv), size = Ns[1], replace = T)
cv.fold[[2]] = sample(seq(1, ncv), size = Ns[2], replace = T)
for (i in 1:ncv) {
Ytrain[[i]] = lapply(1:2, function(ix) {
Ys[[ix]][, cv.fold[[ix]] != i, drop = F]
})
Xtrain[[i]] = lapply(1:2, function(ix) {
lapply(Xs[[ix]], function(x) {
x[, cv.fold[[ix]] != i, drop = F]
})
})
Ytest[[i]] = lapply(1:2, function(ix) {
Ys[[ix]][, cv.fold[[ix]] == i, drop = F]
})
Xtest[[i]] = lapply(1:2, function(ix) {
lapply(Xs[[ix]], function(x) {
x[, cv.fold[[ix]] == i, drop = F]
})
})
}
cverrs = vector("list", nrho * nlambda)
cvlls = vector("list", nrho * nlambda)
hyper.params = list()
for (cv in 1:ncv) {
params.opt = list()
Nc = sapply(Ytest[[cv]], ncol)
ix = 1
for (rho in rho.factors) {
for (lambda in lambda.factors) {
cat(sprintf("lambda = %4f, rho = %4f, foldid = %d\n", lambda, rho, cv))
if (ix %% nlambda == 1) {
params.opt[[ix]] = genSML_iPALM(
Bs,
fs,
Ytrain[[cv]],
Xtrain[[cv]],
sigma2,
Ng,
lambda,
rho,
wBs,
rB,
maxit = 1000,
threshold = threshold,
acc = TRUE,
sparse = FALSE,
use.strict = use.strict,
verbose = verbose
)
} else {
params.opt[[ix]] = genSML_iPALM(
params.opt[[ix - 1]]$B,
params.opt[[ix - 1]]$f,
Ytrain[[cv]],
Xtrain[[cv]],
params.opt[[ix - 1]]$sigma2,
Ng,
lambda,
rho,
wBs,
rB,
maxit = 1000,
threshold = threshold,
acc = TRUE,
sparse = FALSE,
use.strict = use.strict,
verbose = verbose
)
}
loglik = gen_logLik(
Xtest[[cv]],
Ytest[[cv]],
params.opt[[ix]]$B,
params.opt[[ix]]$f,
Ng,
Nc,
K,
params.opt[[ix]]$detIB,
params.opt[[ix]]$sigma2
)[1]
err = gen_error(Xtest[[cv]],
Ytest[[cv]],
params.opt[[ix]]$B,
params.opt[[ix]]$f,
Ng,
Nc,
K)[1]
cverrs[[ix]] = c(cverrs[[ix]], err)
cvlls[[ix]] = c(cvlls[[ix]], loglik)
if (cv == 1) {
hyper.params[[ix]] = c(lambda, rho)
}
ix = ix + 1
}
}
}
list(opt.hyperparams = hyper.params,
cverrs = cverrs,
loglik = rbind, cvlls)
}
gen_lambda.max = function(Bs, Ys, Xs, Ng, weighted = TRUE) {
std = centralize_gen(Xs, Ys)
Xs = std$X ## N x sk
Ys = std$Y ## N x p
K = length(Ys)
Ns = sapply(Ys, nrow)
R = vector("list", K) ## Ng
w = if(weighted) {
inverse(Bs)
} else {
invone(Bs)
}
for (k in 1:K) {
R[[k]] = matrix(0, nrow = Ng, ncol = Ns[k]) ## Ng x N
}
for (i in 1:Ng) {
for (k in 1:K) {
Xi = Xs[[k]][[i]]
Pi = solve(crossprod(Xi)) %*% t(Xi)
yi = Ys[[k]][, i, drop = F] # n x 1
fi = solve(crossprod(Xi)) %*% t(Xi) %*% yi
Xf = Xi %*% fi # n x 1
R[[k]][i,] = yi - Xf
}
}
err = 0
for (k in 1:K) {
err = err + norm(R[[k]], type = "f") ^ 2
}
sigma2 = err / (Ng * sum(Ns))
Ry = vector("list", K) ## Ng
for (k in 1:K) {
Ry[[k]] = R[[k]] %*% Ys[[k]]
Ry[[k]] = abs(Ry[[k]] / sigma2 - Ns[[k]]) / w[[k]]
}
max(sapply(Ry, max))
}
## cross-validation and EBIC for hyper-parameter tuning
## rho max can be estimated, rho is the fused lasso
## regularized hyper parameter
#' @description get_rho.max
#' @example
#' rhomax = get_rho.max(params.init$B, params.init$F, Ys, Xs, params.init$sigma2[1], data$var$Ng)
gen_rho.max = function(Bs, fs, Ys, Xs, sigma2, Ng, weighted = TRUE) {
if(weighted) {
wBs = inverse(Bs)
rB = flinv(Bs)
} else {
wBs = invone(Bs)
rB = flone(Bs)
}
params.rho = genSML_iPALM(
Bs,
fs,
Ys,
Xs,
sigma2,
Ng,
lambda = 0,
rho = Inf,
wBs = wBs,
rB = rB,
maxit = 2000,
threshold = 1e-4,
use.strict = F,
sparse = T,
verbose = 1
)
weight.rho = flinv(Bs)
Bs = params.rho$B[[1]]
fs = params.rho$f
sigma2 = params.rho$sigma2
std = centralize_gen(Xs, Ys)
Xs = std$X ## Ng (n x sk)
Ys = std$Y ## n x ng
## multiple
K = length(Ys)
Ns = sapply(Ys, nrow)
Bc = vector("list", K)
YY = vector("list", K)
FX = vector("list", K)
Dx = vector("list", K)
for (k in 1:K) {
Bc[[k]] = -Ns[k] * t(solve(diag(Ng) - Bs))
YY[[k]] = crossprod(Ys[[k]]) ## Y %*% t(Y)
FX[[k]] = matrix(0, nrow = Ng, ncol = Ns[k]) ## F %*% X (p x k x k x n = p x n)
for (i in 1:Ng) {
FX[[k]][i, ] = as.numeric(Xs[[k]][[i]] %*% fs[[k]][[i]])
}
Dx[[k]] = abs(Bc[[k]] + ((diag(Ng) - Bs) %*% YY[[k]] - FX[[k]] %*% Ys[[k]]) / sigma2 / weight.rho)
diag(Dx[[k]]) = -Inf
}
## Dxy = abs((diag(Ng) - Bs) %*% (YY[[2]] - YY[[1]]) - (FX[[2]] %*% Ys[[2]] - FX[[1]] %*% Ys[[1]])) / sigma2 / 2 / weight.rho
## diag(Dxy) = -Inf
max(c(max(Dx[[1]]), max(Dx[[2]])))
## max(Dxy)
}
gen_logLik = function(Xs, Ys, Bs, fs, Ng, Ns, K, detIBs, sigma2) {
std = centralize_gen(Xs, Ys)
X = lapply(1:K, function(k) {
lapply(std$X[[k]], t)
})
Y = lapply(std$Y, t)
Ls = 0
err = 0
for (k in 1:K) {
Ls = Ls - Ns[k] / 2 * log(detIBs[k] ^ 2)
for (i in 1:Ng) {
Xi = X[[k]][[i]] # sk x N
bi = Bs[[k]][i, -i, drop = F] # (ng-1) x 1
yi = Y[[k]][i, , drop = F] # 1 x N
Yi = Y[[k]][-i, , drop = F] # (ng-1) x N
fi = fs[[k]][[i]] # sk x 1
err = err + tcrossprod(yi - bi %*% Yi - crossprod(fi, Xi))
}
}
Ls + err / (2 * sigma2) + Ng * sum(Ns) / 2 * log(sigma2)
}
gen_error = function(Xs, Ys, Bs, fs, Ng, Ns, K) {
std = centralize_gen(Xs, Ys)
X = lapply(1:K, function(k) {
lapply(std$X[[k]], t)
})
Y = lapply(std$Y, t)
err = 0
for (k in 1:K) {
for (i in 1:Ng) {
Xi = X[[k]][[i]] # sk x N
bi = Bs[[k]][i, -i, drop = F] # (ng-1) x 1
yi = Y[[k]][i, , drop = F] # 1 x N
Yi = Y[[k]][-i, , drop = F] # (ng-1) x N
fi = fs[[k]][[i]] # sk x 1
err = err + tcrossprod(yi - bi %*% Yi - crossprod(fi, Xi))
}
}
err
}
## single SML for fused lasso method
#' @description SML method for single problem and fused lasso
cv_SMLasso = function(Bs,
fs,
Ys,
Xs,
sigma2,
Ng,
nlambda = 20,
threshold = 1e-4) {
lambda.max = get_lambda.max(Bs, Ys, Xs, Ng)
lambda.factors = 10 ^ seq(0,-4, length.out = nlambda) * lambda.max
wBs = inverse(Bs)
ncv = 5
Ns = sapply(Ys, ncol)
K = length(Ys)
Ytrain = vector("list", ncv)
Xtrain = vector("list", ncv)
Ytest = vector("list", ncv)
Xtest = vector("list", ncv)
cv.fold = sample(seq(1, ncv), size = Ns[1], replace = T)
for (i in 1:ncv) {
Ytrain[[i]] = lapply(Ys, function(y) {
y[, cv.fold != i, drop = F]
})
Xtrain[[i]] = lapply(Xs, function(x) {
x[, cv.fold != i, drop = F]
})
Ytest[[i]] = lapply(Ys, function(y) {
y[, cv.fold == i, drop = F]
})
Xtest[[i]] = lapply(Xs, function(x) {
x[, cv.fold == i, drop = F]
})
}
cverrs = vector("list", nlambda)
hyper.params = NULL
for (cv in 1:ncv) {
params.opt = list()
Nt = sapply(Ytrain[[cv]], ncol)
ix = 1
for (lambda in lambda.factors) {
cat(sprintf("lambda = %4f, foldid = %d\n", lambda, cv))
params.opt[[ix]] = vector("list", 2)
params.opt[[ix]][[1]] = sparse_maximum_likehood_iPALM(
B = Bs[[1]],
f = fs[[1]],
Y = Ytrain[[cv]][[1]],
X = Xtrain[[cv]],
sigma2 = sigma2[1],
N = Nt[1],
Ng = Ng,
lambda = lambda,
maxit = 50,
verbose = 1,
threshold = 1e-3
)
params.opt[[ix]][[2]] = sparse_maximum_likehood_iPALM(
B = Bs[[2]],
f = fs[[2]],
Y = Ytrain[[cv]][[2]],
X = Xtrain[[cv]],
sigma2 = sigma2[1],
N = Nt[2],
Ng = Ng,
lambda = lambda,
maxit = 50,
verbose = 1,
threshold = 1e-3
)
Nc = sapply(Ytest[[cv]], ncol)
err = SML_error(
Xtest[[cv]],
Ytest[[cv]],
list(params.opt[[ix]][[1]]$B, params.opt[[ix]][[2]]$B),
list(params.opt[[ix]][[1]]$f, params.opt[[ix]][[2]]$f),
Ng,
Nc,
K
)[1]
cverrs[[ix]] = c(cverrs[[ix]], err)
if (cv == 1) {
hyper.params = c(hyper.params, lambda)
}
ix = ix + 1
}
}
list(opt.hyperparams = hyper.params,
cverrs = do.call(rbind, cverrs))
}
optLasso_cv = function(cvparams, se = TRUE) {
cvfuns = data.frame(
lambda = cvparams$opt.hyperparams,
cvmean = apply(cvparams$cverrs, 1, mean),
cvsd = apply(cvparams$cverrs, 1, sd)
)
cv.min = which.min(cvfuns$cvmean)
cv.1se = cvfuns[cv.min, 2] + cvfuns[cv.min, 3]
cvfun.1se = cvfuns[cvfuns$cvmean <= cv.1se, c(1, 2, 3)]
lambda = cvfun.1se[, 1]
lambda.1se = max(lambda)
if (se) {
lambda.1se
} else {
cvfuns[cv.min, 1]
}
}
## stability selection
ssSML_iPALM = function(Bs,
fs,
Ys,
Xs,
sigma2,
Ng,
lambda,
rho,
wBs = inverse(Bs),
rB = flinv(Bs),
maxit = 100,
acc = TRUE,
inertial = inertial_pars("lin"),
threshold = 1e-3,
sparse = FALSE,
use.strict = TRUE,
Nbootstrap = 100,
Nsample = 0.75,
verbose = 2) {
N = ncol(Ys[[1]])
ss.fold = lapply(1:Nbootstrap,
function(n) {
sample(seq(1, N), ceiling(N * Nsample), replace = F)
})
ss.fit = list()
N.ss = vector("list", 2)
D.ss = NULL
for (i in 1:Nbootstrap) {
Yss = lapply(Ys, function(Y){Y[, ss.fold[[i]]]})
Xss = list()
for(k in 1:length(Xs)) {
Xss[[k]] = lapply(Xs[[k]], function(X){X[, ss.fold[[k]], drop = F]})
}
ss.fit[[i]] = genSML_iPALM(
Bs = params.init$B,
fs = params.init$F,
Ys = Yss,
Xs = Xss,
sigma2 = params.init$sigma2[1],
Ng = data$var$Ng,
lambda = cvlambda.opt$lambda,
rho = cvlambda.opt$rho,
maxit = maxit,
threshold = threshold,
use.strict = use.strict,
acc = acc,
sparse = sparse
)
err2abs = ss.fit[[i]]$err
if(is.null(N.ss[[1]])) {
N.ss[[1]] = ifelse(abs(ss.fit[[i]]$B[[1]]) > err2abs, 1, 0)
} else {
N.ss[[1]] = N.ss[[1]] + ifelse(abs(ss.fit[[i]]$B[[1]]) > err2abs, 1, 0)
}
if(is.null(N.ss[[2]])) {
N.ss[[2]] = ifelse(abs(ss.fit[[i]]$B[[2]]) > err2abs, 1, 0)
} else {
N.ss[[2]] = N.ss[[2]] + ifelse(abs(ss.fit[[i]]$B[[2]]) > err2abs, 1, 0)
}
nonzero = c(as.numeric(ss.fit[[i]]$B[[1]]), as.numeric(ss.fit[[i]]$B[[2]]))
nonzero = nonzero[nonzero != 0]
thresh.2 = sort(abs(nonzero))[round(0.2 * length(nonzero))+1]
if(is.null(D.ss)) {
D.ss = ifelse(abs(ss.fit[[i]]$B[[1]] - ss.fit[[i]]$B[[2]]) > pmin(abs(ss.fit[[i]]$B[[1]]), abs(ss.fit[[i]]$B[[2]])) &
abs(ss.fit[[i]]$B[[1]] - ss.fit[[i]]$B[[2]]) > thresh.2, 1, 0)
} else {
D.ss = D.ss + ifelse(abs(ss.fit[[i]]$B[[1]] - ss.fit[[i]]$B[[2]]) > pmin(abs(ss.fit[[i]]$B[[1]]), abs(ss.fit[[i]]$B[[2]])) &
abs(ss.fit[[i]]$B[[1]] - ss.fit[[i]]$B[[2]]) > thresh.2, 1, 0)
}
}
list(fit = ss.fit, Ns = N.ss, Ds = D.ss)
}
################################################################# adaptive hyper-parameter version ##########################
## cross-validation for adaptive hyper-parameter ###
## version 2, added Jun 25 ###
## ###
#############################################################################################################################
## Shrinkage fused lasso regularizer parameter
#--------------------------------------------------------------------
## cross-validation on adaptive rho(fused lasso params) of lambda
## same function names but different scheme
#--------------------------------------------------------------------
get_rho.max = function(Bs, fs, Ys, Xs, lambda, sigma2, Ng, weighted = TRUE) {
params.rho = multiSML_iPALM(
Bs,
fs,
Ys,
Xs,
sigma2,
Ng,
lambda = lambda,
rho = Inf,
maxit = 2000,
threshold = 1e-4,
use.strict = F,
sparse = T,
verbose = 1
)
weight.rho = flinv(Bs)
weight.lambda = inverse(Bs)[[1]]
Bs = params.rho$B[[1]]
fs = params.rho$f
sigma2 = params.rho$sigma2
std = centralize_mult(Xs, Ys)
Xs = std$X ## Ng (n x sk)
Ys = std$Y ## n x ng
## multiple
K = length(Ys)
Ns = sapply(Ys, nrow)
Bc = vector("list", K)
YY = vector("list", K)
FX = vector("list", K)
Dx = vector("list", K)
for (k in 1:K) {
Bc[[k]] = -Ns[k] * t(solve(diag(Ng) - Bs))
YY[[k]] = crossprod(Ys[[k]]) ## Y %*% t(Y)
FX[[k]] = matrix(0, nrow = Ng, ncol = Ns[k]) ## F %*% X (p x k x k x n = p x n)
for (i in 1:Ng) {
FX[[k]][i, ] = as.numeric(Xs[[i]] %*% fs[[k]][[i]])
}
Dx[[k]] = abs(Bc[[k]] + ((diag(Ng) - Bs) %*% YY[[k]] - FX[[k]] %*% Ys[[k]]) / sigma2 + lambda * weight.lambda * sign(Bs)) / weight.rho
diag(Dx[[k]]) = -Inf
}
max(c(max(Dx[[1]]), max(Dx[[2]])))
}
### debug switch
cv_multiSML = function(Bs,
fs,
Ys,
Xs,
sigma2,
Ng,
nlambda = 20,
nrho = 20,
threshold = 1e-4,
logLik = TRUE,
verbose = 1) {
lambda.max = get_lambda.max(Bs, Ys, Xs, Ng)
lambda.factors = 10 ^ seq(0,-4, length.out = nlambda) * lambda.max
wBs = inverse(Bs)
rB = flinv(Bs)
ncv = 5
Ns = sapply(Ys, ncol)
K = length(Ys)
Ytrain = vector("list", ncv)
Xtrain = vector("list", ncv)
Ytest = vector("list", ncv)
Xtest = vector("list", ncv)
cv.fold = sample(seq(1, ncv), size = Ns[1], replace = T)
for (i in 1:ncv) {
Ytrain[[i]] = lapply(Ys, function(y) {
y[, cv.fold != i, drop = F]
})
Xtrain[[i]] = lapply(Xs, function(x) {
x[, cv.fold != i, drop = F]
})
Ytest[[i]] = lapply(Ys, function(y) {
y[, cv.fold == i, drop = F]
})
Xtest[[i]] = lapply(Xs, function(x) {
x[, cv.fold == i, drop = F]
})
}
cverrs = vector("list", nrho * nlambda)
cvlls = vector("list", nrho * nlambda)
params = NULL
rho.factors = list()
il = 1
for (lambda in lambda.factors) {
rho.max = get_rho.max(Bs, fs, Ys, Xs, lambda, sigma2, Ng)
rho.factors[[il]] = 10 ^ seq(0,-4, length.out = nrho) * rho.max
params = c(params, lapply(rho.factors[[il]], function(rho) {
c(lambda, rho)
}))
il = il + 1
}
for (cv in 1:ncv) {
params.opt = list()
Nc = sapply(Ytest[[cv]], ncol)
ix = 1
il = 1
for (lambda in lambda.factors) {
for (rho in rho.factors[[il]]) {
cat(sprintf("lambda = %4f, rho = %4f, foldid = %d\n", lambda, rho, cv))
if (ix %% nrho == 1) {
params.opt[[ix]] = multiSML_iPALM(
Bs,
fs,
Ytrain[[cv]],
Xtrain[[cv]],
sigma2,
Ng,
lambda,
rho,
wBs,
rB,
maxit = 1000,
threshold = threshold,
acc = TRUE,
sparse = FALSE,
use.strict = FALSE,
verbose = verbose
)
} else {
params.opt[[ix]] = multiSML_iPALM(
params.opt[[ix - 1]]$B,
params.opt[[ix - 1]]$f,
Ytrain[[cv]],
Xtrain[[cv]],
params.opt[[ix - 1]]$sigma2,
Ng,
lambda,
rho,
wBs,
rB,
maxit = 1000,
threshold = threshold,
acc = TRUE,
sparse = FALSE,
use.strict = FALSE,
verbose = verbose
)
}
loglik = SML_logLik(
Xtest[[cv]],
Ytest[[cv]],
params.opt[[ix]]$B,
params.opt[[ix]]$f,
Ng,
Nc,
K,
params.opt[[ix]]$detIB,
params.opt[[ix]]$sigma2
)[1]
err = SML_error(Xtest[[cv]],
Ytest[[cv]],
params.opt[[ix]]$B,
params.opt[[ix]]$f,
Ng,
Nc,
K)[1]
cverrs[[ix]] = c(cverrs[[ix]], err)
cvlls[[ix]] = c(cvlls[[ix]], loglik)
ix = ix + 1
} ## rho
il = il + 1
} ## lambda
} ## ncv
list(opt.hyperparams = do.call(rbind, params),
cverrs = do.call(rbind, cverrs),
loglik = do.call(rbind, cvlls))
}
## ultility functions
## pick lambda
optimLambda_cv = function(cvparams, type = c("err", "loglik"), se = TRUE, fused.sparse = TRUE) {
cvm = if(type == "err") {
cvparams$cverrs
} else {
cvparams$loglik
}
cvfuns = data.frame(
lambda = cvparams$opt.hyperparams[, 1],
rho = cvparams$opt.hyperparams[, 2],
cvmean = apply(cvm, 1, mean),
cvsd = apply(cvm, 1, sd)
)
cv.min = which.min(cvfuns$cvmean)
cvlambda = split(cvfuns, cvfuns$lambda)
cvms = data.frame(
lambda = as.numeric(names(cvlambda)),
cvmean = sapply(cvlambda, function(x){mean(x$cvmean)}),
cvsd = sapply(cvlambda, function(x){sum(x$cvsd)/sqrt(length(x))})
)
lambda.1se = min_Lambda(cvms$lambda, cvms$cvmean, cvms$cvsd)
rhos = cvlambda[[as.character(lambda.1se)]]
rho.1se = min_Lambda(rhos$rho, rhos$cvmean, rhos$cvsd)
rho.min = rhos$rho[which.min(rhos$cvmean)]
# ggplot(cvlambda, aes(x = lambda, y = cvmean)) +
# geom_errorbar(aes(ymin = cvmean - cvsd, ymax = cvmean + cvsd)) +
# geom_line() + geom_point() + scale_x_log10() + xlab(expression(log(lambda))) +
# ylab("cv-mean") + ggtitle("cvmean ~ lambda")
if (se) {
list(lambda = lambda.1se, rho = rho.1se)
} else {
list(lambda = cvfuns[cv.min, 1], rho = cvfuns[cv.min, 2])
}
}
min_Lambda = function(lambda, cvmean, cvsd) {
cvmin = min(cvmean, na.rm = TRUE)
ixmin = cvmean <= cvmin
lambda.min = max(lambda[ixmin], na.rm = TRUE)
ixmin = match(lambda.min, lambda)
ix1se = cvmean <= (cvmean + cvsd)[ixmin]
max(lambda[ix1se], na.rm = TRUE)
}
################## adaptive version of genSML
######################
# comparable method
######################
## cross validation for hyper-parameter tuning
#' @description 5-fold cross-validation
#' @param dyn dynamic updated rho.max by given lambda
#' @example
#' cv.params = cv_multiSML(Bs = params.init$B, fs = params.init$F, Ys = Ys, Xs = Xs, sigma2 = params.init$sigma2[1], Ng = data$var$Ng, nlambda = 20, nrho = 20)
cv_genSML = function(Bs,
fs,
Ys,
Xs,
sigma2,
Ng,
nlambda = 20,
nrho = 20,
weighted = TRUE,
threshold = 1e-4,
use.strict = FALSE,
verbose = 1) {
lambda.max = gen_lambda.max(Bs, Ys, Xs, Ng, weighted)
lambda.factors = 10 ^ seq(0,-4, length.out = nlambda) * lambda.max
if(weighted) {
wBs = inverse(Bs)
rB = flinv(Bs)
} else{
wBs = invone(Bs)
rB = flone(Bs)
}
ncv = 5
Ns = sapply(Ys, ncol)
K = length(Ys)
Ytrain = vector("list", ncv)
Xtrain = vector("list", ncv)
Ytest = vector("list", ncv)
Xtest = vector("list", ncv)
cv.fold = list()
cv.fold[[1]] = sample(seq(1, ncv), size = Ns[1], replace = T)
cv.fold[[2]] = sample(seq(1, ncv), size = Ns[2], replace = T)
for (i in 1:ncv) {
Ytrain[[i]] = lapply(1:2, function(ix) {
Ys[[ix]][, cv.fold[[ix]] != i, drop = F]
})
Xtrain[[i]] = lapply(1:2, function(ix) {
lapply(Xs[[ix]], function(x) {
x[, cv.fold[[ix]] != i, drop = F]
})
})
Ytest[[i]] = lapply(1:2, function(ix) {
Ys[[ix]][, cv.fold[[ix]] == i, drop = F]
})
Xtest[[i]] = lapply(1:2, function(ix) {
lapply(Xs[[ix]], function(x) {
x[, cv.fold[[ix]] == i, drop = F]
})
})
}
cverrs = vector("list", nrho * nlambda)
cvlls = vector("list", nrho * nlambda)
hyper.params = list()
rho.factors = list()
il = 1
for (lambda in lambda.factors) {
rho.max = gen_rho.max(Bs, fs, Ys, Xs, lambda, sigma2, Ng)
rho.factors[[il]] = 10 ^ seq(0,-4, length.out = nrho) * rho.max
params = c(params, lapply(rho.factors[[il]], function(rho) {
c(lambda, rho)
}))
il = il + 1
}
for (cv in 1:ncv) {
params.opt = list()
Nc = sapply(Ytest[[cv]], ncol)
ix = 1
il = 1
for (lambda in lambda.factors) {
for (rho in rho.factors[[il]]) {
cat(sprintf("lambda = %4f, rho = %4f, foldid = %d\n", lambda, rho, cv))
if (ix %% nrho == 1) {
params.opt[[ix]] = genSML_iPALM(
Bs,
fs,
Ytrain[[cv]],
Xtrain[[cv]],
sigma2,
Ng,
lambda,
rho,
wBs,
rB,
maxit = 1000,
threshold = threshold,
acc = TRUE,
sparse = FALSE,
use.strict = use.strict,
verbose = verbose
)
} else {
params.opt[[ix]] = genSML_iPALM(
params.opt[[ix - 1]]$B,
params.opt[[ix - 1]]$f,
Ytrain[[cv]],
Xtrain[[cv]],
params.opt[[ix - 1]]$sigma2,
Ng,
lambda,
rho,
wBs,
rB,
maxit = 1000,
threshold = threshold,
acc = TRUE,
sparse = FALSE,
use.strict = use.strict,
verbose = verbose
)
}
loglik = gen_logLik(
Xtest[[cv]],
Ytest[[cv]],
params.opt[[ix]]$B,
params.opt[[ix]]$f,
Ng,
Nc,
K,
params.opt[[ix]]$detIB,
params.opt[[ix]]$sigma2
)[1]
err = gen_error(Xtest[[cv]],
Ytest[[cv]],
params.opt[[ix]]$B,
params.opt[[ix]]$f,
Ng,
Nc,
K)[1]
cverrs[[ix]] = c(cverrs[[ix]], err)
cvlls[[ix]] = c(cvlls[[ix]], loglik)
if (cv == 1) {
hyper.params[[ix]] = c(lambda, rho)
}
ix = ix + 1
} ## rho
il = il + 1
}
}
list(opt.hyperparams = hyper.params,
cverrs = cverrs,
loglik = cvlls)
}
## cross-validation and EBIC for hyper-parameter tuning
## rho max can be estimated, rho is the fused lasso
## regularized hyper parameter
#' @description get_rho.max
#' @example
#' rhomax = get_rho.max(params.init$B, params.init$F, Ys, Xs, params.init$sigma2[1], data$var$Ng)
gen_rho.max = function(Bs, fs, Ys, Xs, lambda, sigma2, Ng, weighted = TRUE) {
if(weighted) {
wBs = inverse(Bs)
rB = flinv(Bs)
} else {
wBs = invone(Bs)
rB = flone(Bs)
}
params.rho = genSML_iPALM(
Bs,
fs,
Ys,
Xs,
sigma2,
Ng,
lambda = lambda,
rho = Inf,
wBs = wBs,
rB = rB,
maxit = 2000,
threshold = 1e-4,
use.strict = F,
sparse = T,
verbose = 1
)
weight.rho = flinv(Bs)
weight.lambda = inverse(Bs)[[1]]
Bs = params.rho$B[[1]]
fs = params.rho$f
sigma2 = params.rho$sigma2
std = centralize_gen(Xs, Ys)
Xs = std$X ## Ng (n x sk)
Ys = std$Y ## n x ng
## multiple
K = length(Ys)
Ns = sapply(Ys, nrow)
Bc = vector("list", K)
YY = vector("list", K)
FX = vector("list", K)
Dx = vector("list", K)
for (k in 1:K) {
Bc[[k]] = -Ns[k] * t(solve(diag(Ng) - Bs))
YY[[k]] = crossprod(Ys[[k]]) ## Y %*% t(Y)
FX[[k]] = matrix(0, nrow = Ng, ncol = Ns[k]) ## F %*% X (p x k x k x n = p x n)
for (i in 1:Ng) {
FX[[k]][i, ] = as.numeric(Xs[[k]][[i]] %*% fs[[k]][[i]])
}
Dx[[k]] = abs(Bc[[k]] + ((diag(Ng) - Bs) %*% YY[[k]] - FX[[k]] %*% Ys[[k]]) / sigma2 + lambda * weight.lambda * sign(Bs)) / weight.rho
diag(Dx[[k]]) = -Inf
}
## Dxy = abs((diag(Ng) - Bs) %*% (YY[[2]] - YY[[1]]) - (FX[[2]] %*% Ys[[2]] - FX[[1]] %*% Ys[[1]])) / sigma2 / 2 / weight.rho
## diag(Dxy) = -Inf
max(c(max(Dx[[1]]), max(Dx[[2]])))
## max(Dxy)
}
############# new version
## ultility functions
## pick lambda
optimLambda_cv = function(cvparams, type = c("err", "loglik"), se = TRUE, fused.sparse = TRUE) {
cvm = if(type == "err") {
cvparams$cverrs
} else {
cvparams$loglik
}
cvfuns = data.frame(
lambda = cvparams$opt.hyperparams[, 1],
rho = cvparams$opt.hyperparams[, 2],
cvmean = apply(cvm, 1, mean),
cvsd = apply(cvm, 1, sd)
)
cv.min = which.min(cvfuns$cvmean)
cv.1se = cvfuns[cv.min, 3] + cvfuns[cv.min, 4]
cvfun.1se = cvfuns[cvfuns$cvmean <= cv.1se, c(1, 2, 3)]
lambda = cvfun.1se[, 1]
rho = cvfun.1se[, 2]
if(fused.sparse) {
rho.1se = max(rho)
lambda.1se = lambda[which.min(cvfun.1se[cvfun.1se$rho == rho.1se, 3])]
} else {
lambda.1se = max(lambda)
# rho.1se = min(rho[lambda == lambda.1se])
rho.1se = rho[which.min(cvfun.1se[cvfun.1se$lambda == lambda.1se, 3])]
}
if (se) {
list(lambda = lambda.1se, rho = rho.1se)
} else {
list(lambda = cvfuns[cv.min, 1], rho = cvfuns[cv.min, 2])
}
}
#############################################
## new version of genSML
#############################################
## solve SML problem by block coordinate descent by backtracking inert-PALM on
## different gene expression and different eQTLs
#' @param lambda lambda hyper-parameter for lasso term
#' @param rho fused lasso hyper-parameter for fused lasso term
#' @param gamma invertible matrix stablize parameter gamma
#' params.init = constrained_L2reg_gen(Xs, Ys, sigma2$rho.opt, M, N)
#' params.opt = genSML_iPALM(Bs = params.init$B, fs = params.init$F, Ys = Ys, Xs = Xs,
#' sigma2 = params.init$sigma2[1], Ng = data$var$Ng,
#' lambda = 0.1, rho = 0.1, maxit = 500)
genSML_iPALM = function(Bs,
fs,
Ys,
Xs,
sigma2,
Ng,
lambda,
rho,
wBs = inverse(Bs),
rB = flinv(Bs),
maxit = 100,
acc = TRUE,
inertial = inertial_pars("lin"),
threshold = 1e-3,
sparse = FALSE,
use.strict = TRUE,
verbose = 2) {
std = centralize_gen(Xs, Ys)
Xs = std$X
Ys = std$Y
meanXs = std$muX
meanYs = std$muY
K = length(Ys) # number of conditions = 2
if (verbose == 2) {
cat(sprintf("conditions must be restricted to 2.. K = %d\n", K))
}
## update for eQTL row-wise (specific for genes)
f0 = vector("list", K)
f1 = vector("list", K)
Yp = vector("list", K)
Hy = vector("list", K)
Yp.maxEigen = vector("list", K)
Ns = sapply(Ys, nrow)
for (i in 1:Ng) {
for (k in 1:K) {
Xi = Xs[[k]][[i]]
Pi = solve(crossprod(Xi)) %*% t(Xi)
# specific condition
yi_k = Ys[[k]][, i, drop = F] # n[k] x 1 for gene i
Yi_k = Ys[[k]][,-i] # n[k] x (ng-1) (for specific gene i)
f0[[k]][[i]] = Pi %*% yi_k
f1[[k]][[i]] = Pi %*% Yi_k # f[[k]][[i]] = f0[[k]][[i]] - f1[[k]][[i]] %*% bi^k | bi^k = B[[k]][i,-i]
Hi_k = diag(Ns[k]) - Xi %*% Pi # n[k] x n[k] projection matrix
Yp[[k]][[i]] = t(Yi_k) %*% Hi_k %*% Yi_k # (ng - 1) x (ng - 1)
Hy[[k]][[i]] = t(yi_k) %*% Hi_k %*% Yi_k # 1 x (ng - 1)
## maximized eigen-value for Yp
Yp.maxEigen[[k]][[i]] = eigen(Yp[[k]][[i]])$values[1]
}
}
## update for gnet row-wise
niter = 1
ImBs = lapply(Bs, function(B) {
diag(Ng) - B
})
detIBs = sapply(ImBs, det)
IBsinv = lapply(ImBs, solve)
Ls = logLik(detIBs, Bs, wBs, rB, lambda, rho, Ns, K) + N * sum(Ns) / 2 * log(sigma2)
## history
Bs.prevs = list(Bs, Bs)
inert = acc
while (niter <= maxit) {
inert.pars = inertial(niter)
Bs.inert = if (inert) {
lapply(1:K, function(k) {
Bs.prevs[[2]][[k]] + inert.pars * (Bs.prevs[[2]][[k]] - Bs.prevs[[1]][[k]])
})
} else {
Bs
}
fs.prev = fs
Ls.prev = Ls
for (i in 1:Ng) {
## sum(-Ns[k] * sigma2 * log(det(I-B[[k]])^2)) + \sum_{i=1}^{Ng} bi^T %*% Yp %*% bi - Hy %*% bi)
ci = lapply(IBsinv, function(IBi) {
# ci / det(I - B); from (I - B)^{-1}
IBi[-i, i, drop = F]
})
bi = lapply(Bs.inert, function(B.inert) {
t(B.inert[i,-i, drop = F])
})
gi = lapply(1:K, function(k) {
grad = grad_rwise_SML(Ns[k], ci[[k]], Yp[[k]][[i]], Hy[[k]][[i]], sigma2[1])
grad(bi[[k]])
})
## Lipschitz moduli for row-i
Lis = sapply(1:K, function(k) {
gtg = tcrossprod(ImBs[[k]][-i, ])
oi = chol2inv(gtg)
deti = det(gtg)
gii = ImBs[[k]][-i,-i]
si = ImBs[[k]][-i, i, drop = F]
c2i = sum((ci[[k]] * detIBs[k]) ^ 2)
lips_rwise_SML(Ns[k],
oi,
gii,
si,
c2i,
deti,
Yp.maxEigen[[k]][[i]],
sigma2,
Ng)[1]
})
Li = max(Lis)
Li = (1 + 2 * inert.pars) * Li / (2 * (1 - inert.pars))
detZero = TRUE
cl = 1
while (detZero) {
ui = lapply(1:K, function(k) {
bi[[k]] - gi[[k]] / Li
})
wBi = lapply(wBs, function(wB) {
wB[i, -i]
})
rBi = rB[i, -i]
xi = prox_flsa(lambda, rho, Li, ui, wBi, rBi)
dIBu = sapply(1:K, function(k) {
IBsinv[[k]][i, i] - (t(xi[[k]]) %*% IBsinv[[k]][-i, i, drop = F])[1]
})
cl = cl * 2
detZero = any(dIBu == 0)
}
for (k in 1:K) {
Bs[[k]][i,-i] = xi[[k]]
ImBs[[k]] = diag(Ng) - Bs[[k]]
detIBs[k] = (ImBs[[k]][i, ] %*% IBsinv[[k]][, i, drop = F])[1] * detIBs[k]
dbi = Bs.prevs[[2]][[k]][i, ] - Bs[[k]][i, ]
IBsinv[[k]] = IBsinv[[k]] - IBsinv[[k]][, i, drop = F] %*% dbi %*% IBsinv[[k]] / (1 + dbi %*% IBsinv[[k]][, i, drop = F])[1]
fs[[k]][[i]] = f0[[k]][[i]] - f1[[k]][[i]] %*% Bs[[k]][i,-i]
}
} # row-wise update
Berr = sum(sapply(1:K, function(k) {
norm(Bs[[k]] - Bs.prevs[[2]][[k]], type = "f") / (1 + norm(Bs.prevs[[2]][[k]], type = "f"))
}))
Ferr = sum(sapply(1:K, function(k) {
sum(sapply(1:Ng, function(i) {
norm(fs[[k]][[i]] - fs.prev[[k]][[i]], type = "f")
})) / (1 + sum(sapply(1:Ng, function(i) {
norm(fs.prev[[k]][[i]], type = "f")
})))
}))
err = Berr + Ferr
sigma2 = sigma2_gen(Xs, Ys, Bs, fs, Ng, Ns, K)
Ls = logLik(detIBs, Bs, wBs, rB, lambda, rho, Ns, K) + Ng * sum(Ns) / 2 * log(sigma2)
Lerr = abs(Ls.prev - Ls) / (1 + abs(Ls.prev))
# inert = ifelse(Lerr < 1e-8, FALSE, acc)
if (verbose >= 2) {
cat(
sprintf(
"SML: iteration = %d, error = %f, logLik = %f, sigma2 = %f, inert = %s\n",
niter,
err,
Ls,
sigma2,
inert
)
)
}
niter = niter + 1
Bs.prevs = list(Bs.prevs[[2]], Bs)
opt.cond = if (use.strict) {
(err < threshold && Lerr < threshold)
} else {
(err < threshold || Lerr < threshold)
}
if (opt.cond || niter > maxit || is.nan(err)) {
mu = lapply(1:K, function(k) {
(diag(Ng) - Bs[[k]]) %*% meanYs[[k]] - sapply(1:Ng, function(i) {
meanXs[[k]][[i]] %*% fs[[k]][[i]]
})
})
sigma2 = sigma2_gen(Xs, Ys, Bs, fs, Ng, Ns, K)
if (sparse) {
Bs = lapply(Bs, Matrix, sparse = T)
}
break
}
} # while(niter <= maxit)
list(
B = Bs,
f = fs,
mu = mu,
sigma2 = sigma2,
niter = niter,
err = err,
detIB = detIBs
)
}
############# new version
## ultility functions
## pick lambda
optimLambda_cv1 = function(cvparams,
type = c("err", "loglik"),
se = TRUE,
fused.sparse = TRUE) {
cvm = if (type == "err") {
cvparams$cverrs
} else {
cvparams$loglik
}
cvfuns = data.frame(
lambda = sapply(cvparams$opt.hyperparams, `[`, 1),
rho = sapply(cvparams$opt.hyperparams, `[`, 2),
cvmean = sapply(cvm, mean),
cvsd = sapply(cvm, sd)
)
cv.min = which.min(cvfuns$cvmean)
cv.1se = cvfuns[cv.min, 3] + cvfuns[cv.min, 4]
cvfun.1se = cvfuns[cvfuns$cvmean <= cv.1se, c(1, 2, 3)]
lambda = cvfun.1se[, 1]
rho = cvfun.1se[, 2]
if (fused.sparse) {
rho.1se = max(rho)
lambda.1se = lambda[which.min(cvfun.1se[cvfun.1se$rho == rho.1se, 3])]
} else {
lambda.1se = max(lambda)
# rho.1se = min(rho[lambda == lambda.1se])
rho.1se = rho[which.min(cvfun.1se[cvfun.1se$lambda == lambda.1se, 3])]
}
if (se) {
list(lambda = lambda.1se, rho = rho.1se)
} else {
list(lambda = cvfuns[cv.min, 1], rho = cvfuns[cv.min, 2])
}
}
#### select subset of hyper-parameter with in 1 sd
############# new version
## ultility functions
## pick lambda
subLambda_ss = function(cvparams,
type = c("err", "loglik"),
ntop = 10) {
cvm = if (type == "err") {
cvparams$cverrs
} else {
cvparams$loglik
}
cvfuns = data.frame(
lambda = sapply(cvparams$opt.hyperparams, `[`, 1),
rho = sapply(cvparams$opt.hyperparams, `[`, 2),
cvmean = sapply(cvm, mean),
cvsd = sapply(cvm, sd)
)
cv.min = which.min(cvfuns$cvmean)
cv.1se = cvfuns[cv.min, 3] + cvfuns[cv.min, 4]
cvfun.1se = cvfuns[cvfuns$cvmean < cv.1se, c(1, 2, 3, 4)]
cvfun.1se[order(cvfun.1se[,3], decreasing = F)[1:10],c(1,2)]
}
####################################################
## stability selection on a class of parameters
##################
##' @title ss_fssem
## stability selection
ss_fssem = function(Bs,
fs,
Ys,
Xs,
sigma2,
Ng,
params = NULL,
wBs = inverse(Bs),
rB = flinv(Bs),
maxit = 100,
acc = TRUE,
inertial = inertial_pars("lin"),
threshold = 1e-3,
sparse = FALSE,
use.strict = TRUE,
Nbootstrap = 100,
Nsample = 0.75,
verbose = 2) {
N = ncol(Ys[[1]])
ss.fold = vector("list", Nbootstrap)
i = 1
while(i <= Nbootstrap) {
subs = sample(seq(1, N), ceiling(N * Nsample), replace = F)
ss.fold[[i]] = sort(subs)
ss.fold[[i+1]] = setdiff(seq(1, N), subs)
i = i + 2
}
ss.fit = list()
N.ss = vector("list", 2)
D.ss = NULL
for (j in 1:nrow(params)) {
lambda = params[j, 1]
rho = params[j, 2]
for (i in 1:Nbootstrap) {
Yss = lapply(Ys, function(Y) {
Y[, ss.fold[[i]]]
})
Xss = list()
for (k in 1:length(Xs)) {
Xss[[k]] = lapply(Xs[[k]], function(X) {
X[, ss.fold[[k]], drop = F]
})
}
ss.fit[[i]] = genSML_iPALM(
Bs = params.init$B,
fs = params.init$F,
Ys = Yss,
Xs = Xss,
sigma2 = params.init$sigma2[1],
Ng = data$var$Ng,
lambda = lambda,
rho = rho,
maxit = maxit,
threshold = threshold,
use.strict = use.strict,
acc = acc,
sparse = sparse
)
err2abs = ss.fit[[i]]$err
if (is.null(N.ss[[1]])) {
N.ss[[1]] = ifelse(abs(ss.fit[[i]]$B[[1]]) > err2abs, 1, 0)
} else {
N.ss[[1]] = N.ss[[1]] + ifelse(abs(ss.fit[[i]]$B[[1]]) > err2abs, 1, 0)
}
if (is.null(N.ss[[2]])) {
N.ss[[2]] = ifelse(abs(ss.fit[[i]]$B[[2]]) > err2abs, 1, 0)
} else {
N.ss[[2]] = N.ss[[2]] + ifelse(abs(ss.fit[[i]]$B[[2]]) > err2abs, 1, 0)
}
nonzero = c(as.numeric(ss.fit[[i]]$B[[1]]), as.numeric(ss.fit[[i]]$B[[2]]))
nonzero = nonzero[nonzero != 0]
thresh.2 = sort(abs(nonzero))[round(0.2 * length(nonzero)) + 1]
if (is.null(D.ss)) {
D.ss = ifelse(
abs(ss.fit[[i]]$B[[1]] - ss.fit[[i]]$B[[2]]) > pmin(abs(ss.fit[[i]]$B[[1]]), abs(ss.fit[[i]]$B[[2]])) &
abs(ss.fit[[i]]$B[[1]] - ss.fit[[i]]$B[[2]]) > thresh.2,
1,
0
)
} else {
D.ss = D.ss + ifelse(
abs(ss.fit[[i]]$B[[1]] - ss.fit[[i]]$B[[2]]) > pmin(abs(ss.fit[[i]]$B[[1]]), abs(ss.fit[[i]]$B[[2]])) &
abs(ss.fit[[i]]$B[[1]] - ss.fit[[i]]$B[[2]]) > thresh.2,
1,
0
)
}
}
}
list(N = N.ss, D = D.ss)
}
bic_SMLasso = function(Bs,
fs,
Ys,
Xs,
sigma2,
Ng,
nlambda = 20,
threshold = 1e-3) {
lambda.max = get_lambda.max(Bs, Ys, Xs, Ng)
lambda.factors = 10 ^ seq(0,-3, length.out = nlambda) * lambda.max
wBs = inverse(Bs)
ncv = 5
Ns = sapply(Ys, ncol)
K = length(Ys)
allfit = list()
berr = vector("list", nlambda)
ix = 1
for (lambda in lambda.factors) {
cat(sprintf("lambda = %4f\n", lambda))
df = 0
fit = vector("list", 2)
for (i in 1:2) {
fit[[i]] = sparse_maximum_likehood_iPALM(
B = Bs[[i]],
f = fs[[i]],
Y = Ys[[i]],
X = Xs,
sigma2 = sigma2,
N = Ns[i],
Ng = Ng,
lambda = lambda,
maxit = 30,
verbose = 1,
threshold = 1e-3
)
}
BIC = SML_BIC(Xs, Ys, fit, Ng, Ns, 2)
berr[[ix]] = c(lambda, BIC)
allfit[[ix]] = fit
ix = ix + 1
}
berr = do.call(rbind, berr)
BICmin = which.min(berr[,2])
list(lambda = berr[BICmin,1], fit = allfit[[BICmin]])
}
SML_BIC = function(Xs, Ys, fit, Ng, Ns, K) {
std = centralize_mult(Xs, Ys)
X = lapply(std$X, t)
Y = lapply(std$Y, t)
err = 0
BIC = 0
Bs = lapply(fit, function(x){x$B})
fs = lapply(fit, function(x){x$f})
for (k in 1:K) {
nll = - Ns[k] / 2 * log(fit[[k]]$detIB**2)
for (i in 1:Ng) {
Xi = X[[i]] # sk x N
bi = Bs[[k]][i, -i, drop = F] # (ng-1) x 1
yi = Y[[k]][i, , drop = F] # 1 x N
Yi = Y[[k]][-i, , drop = F] # (ng-1) x N
fi = fs[[k]][[i]] # sk x 1
err = err + tcrossprod(yi - bi %*% Yi - crossprod(fi, Xi))
}
nll = nll + err / (2 * fit[[k]]$sigma2) + Ng * Ns[k] / 2 * log(2 * pi * fit[[k]]$sigma2)
BIC = BIC + (2 * nll + sum(Bs[[k]] != 0) * log(Ns[k]))
}
as.numeric(BIC[1,1])
}
sigma2_sml = function(X, Y, B, f, Ng, N) {
X = lapply(X, t)
Y = t(Y)
err = 0
for (i in 1:Ng) {
Xi = X[[i]] # sk x N
bi = B[i,-i, drop = F] # (ng-1) x 1
yi = Y[i, , drop = F] # 1 x N
Yi = Y[-i, , drop = F] # (ng-1) x N
fi = f[[i]] # sk x 1
err = err + tcrossprod(yi - bi %*% Yi - crossprod(fi, Xi))
}
sigma2 = err / (Ng * N)
sigma2[1]
}
| /inst/00_SparsemaximumLiklihood.R | no_license | Ivis4ml/fssemR | R | false | false | 131,665 | r | ## generate random graph(DAG) of G genes and their correspond differential network
## B_1 & B_2
## @param Ng gene number nodes
## @param e Expected number of edges per node
## @param d Expected ratio of differential edges per node (0.1)
## @param dag DAG or not
require(igraph)
require(Matrix)
require(glmnet)
getrandDAG = function(Ng,
e,
dag = TRUE,
d = 0.1,
Bmin = 0.5,
Bmax = 1,
maxit = Ng * Ng) {
B1 = matrix(0,
nrow = Ng,
ncol = Ng)
Nc = Ng * Ng
Ne = rbinom(1, Nc, e / (Ng - 1))
## iteration mark
iter1 = 0
while (sum(B1) < Ne & iter1 < maxit) {
edge = runif(1, min = 1, max = Nc)
B1[edge] = TRUE
if (dag) {
g = graph_from_adjacency_matrix(B1)
B1[edge] = is.dag(g)
}
iter1 = iter1 + 1
}
B2 = B1
nn = which(B1 != 0)
nz = which(B1 == 0)
Nd = ceiling(Ne * d)
Ndf = rbinom(1, Nd, 0.5)
while (sum(abs(B1 - B2)) < Ndf) {
edge = sample(nn, 1)
B2[edge] = FALSE
}
iter2 = 0
while (sum(B2) < Ne & iter2 < maxit) {
edge = sample(nz, 1)
B2[edge] = TRUE
if (dag) {
g = graph_from_adjacency_matrix(B2)
B2[edge] = is.dag(g)
}
iter2 = iter2 + 1
}
ne = which(B1 & B2)
n1 = which(B1 & !(B2))
n2 = which(!(B1) & B2)
B1[ne] = B2[ne] = runif(length(ne), min = Bmin, max = Bmax) * sample(c(-1, 1), length(ne), replace = T)
B1[n1] = runif(length(n1), min = Bmin, max = Bmax) * sample(c(-1, 1), length(n1), replace = T)
B2[n2] = runif(length(n2), min = Bmin, max = Bmax) * sample(c(-1, 1), length(n2), replace = T)
if(iter1 < maxit & iter2 < maxit & any(B1 != B2)) {
if(!dag) {
detIB1 = det(diag(Ng) - B1)
detIB2 = det(diag(Ng) - B2)
if(abs(detIB1) > 1e-6 & abs(detIB2) > 1e-6){
list(B1 = B1, B2 = B2)
} else {
NULL
}
} else {
list(B1 = B1, B2 = B2)
}
} else {
NULL
}
}
#' @param N number of sample
#' @param Ng number of gene
#' @param k number of eQTL
getrandeQTL = function(N, Ng, Nk) {
step = Nk / Ng
X = round(2 * matrix(runif(Nk * N), nrow = Nk)) + 1
G = matrix(0,
nrow = Ng,
ncol = Nk)
ix = lapply(1:Ng,
function(i) {
s = seq(0, step - 1) * Ng + i
G[i, s] <<- 1
s
})
list(G = G, X = X, sk = ix)
}
## randNetinit
## randomly generate regulatory network for fixed seed
randNetinit = function(Ng = 10,
Nk = 10,
r = 0.3,
d = 0.1,
...) {
B = getrandDAG(Ng,
e = Ng * r,
dag = dag,
d = d,
...)
while (is.null(B)) {
B = getrandDAG(Ng,
e = Ng * r,
dag = dag,
d = d,
...)
}
B
}
require(mvtnorm)
getrandsem = function(N = 200,
Ng = 10,
Nk = 10,
r = 0.3,
d = 0.1,
dag = TRUE,
sigma = 0.1,
B = NULL,
...) {
if (is.null(B)) {
B = getrandDAG(Ng,
e = Ng * r,
dag = dag,
d = d,
...)
while (is.null(B)) {
B = getrandDAG(Ng,
e = Ng * r,
dag = dag,
d = d,
...)
}
}
Q = getrandeQTL(N, Ng, Nk)
F = Q[[1]]
X = Q[[2]]
sk = Q[[3]]
E1 = sigma * t(rmvnorm(N, mean = rep(0, Ng), sigma = diag(Ng)))
E2 = sigma * t(rmvnorm(N, mean = rep(0, Ng), sigma = diag(Ng)))
Y1 = solve(diag(Ng) - B[[1]]) %*% (F %*% X + E1)
Y2 = solve(diag(Ng) - B[[2]]) %*% (F %*% X + E2)
list(
obs = list(
Y1 = Y1,
Y2 = Y2,
X = X,
sk = sk
),
var = list(
B1 = Matrix(B[[1]], sparse = T),
B2 = Matrix(B[[2]], sparse = T),
F = Matrix(F, sparse = T),
N = N,
Ng = Ng,
Nk = Nk
)
)
}
## data = getrandsem(N = 200, Ng = 30, Nk = 90, r = 0.1, d = 0.1, sigma = 1, dag = TRUE)
## datn = getrandsem(N = 200, Ng = 30, Nk = 90, r = 0.1, d = 0.1, sigma = 1, dag = FALSE)
## ultility funcitons
center = function(X) {
apply(X, 1, function(x) {
x - mean(x)
})
}
submatX = function(data) {
## submatrix for X on eQTL
sk = data$obs$sk
X = data$obs$X
lapply(sk, function(ix) {
X[ix, , drop = F]
})
}
## X(X^TX)^{-1}X^T
projection = function(X) {
X %*% solve(crossprod(X)) %*% t(X)
}
## use QR more slowly
projection.QR = function(X) {
qr = qr.default(X, LAPACK = TRUE)
Q = qr.qy(qr, diag(1, nrow = nrow(qr$qr), ncol = qr$rank))
tcrossprod(Q)
}
## centeralized Y (gene expression) and X (eQTL quantitive)
centralize = function(X, Y) {
meanX = lapply(X, rowMeans)
meanY = rowMeans(Y)
X = lapply(X, center)
Y = center(Y)
list(X = X,
Y = Y,
muX = meanX,
muY = meanY)
}
## ridge regression for estimate sigma2 in gene expression
#' @example
#' X = submatX(data)
#' Y = data$obs$Y1
#' B = constrained_L2reg(X, Y, rho = 0.1)
constrained_L2reg = function(X, Y, rho) {
# gene number(M) & sample number(N)
std = centralize(X, Y)
X = std$X
Y = std$Y
meanX = std$muX
meanY = std$muY
M = ncol(Y)
N = nrow(Y)
B = Matrix(0,
nrow = M,
ncol = M,
sparse = T)
f = list()
err = 0
for (i in 1:M) {
Xi = X[[i]] ## n x sk
Pi = diag(N) - projection(Xi) ## n x n
yi = Y[, i, drop = F] ## n x 1
Yi = Y[,-i, drop = F] ## n x (p-1)
## (Y^TPY + rho)^{-1}Y^TPy
bi = solve(crossprod(Yi, Pi %*% Yi) + rho * diag(M - 1)) %*% t(Yi) %*% Pi %*% yi
## bi = glmnet(Pi %*% Yi, Pi %*% yi, alpha = 0, lambda = rho)[["beta"]][, 1]
B[i, -i] = bi
f[[i]] = solve(crossprod(Xi)) %*% t(Xi) %*% (yi - Yi %*% bi)
err = err + crossprod(yi - Yi %*% bi - Xi %*% f[[i]])
}
sigma2 = err / (M * N - 1)
mu = (diag(M) - B) %*% meanY - sapply(1:M, function(i) {
meanX[[i]] %*% f[[i]]
})
list(
B = as.matrix(B),
F = f,
sigma2 = sigma2,
mu = mu
)
}
## cross-validation on ridge regression to estimate sigma2
#' @param nrho number of L2 penalty's coefficient
#' @param ncv number of cross-validation
#' @example
#' sigma2 = getsigma2_L2reg(X, Y, nrho = 15, ncv = 5)
getsigma2_L2reg = function(X, Y, nrho = 10, ncv = 5) {
rho_factors = 10 ** (seq(-6, 2, length.out = nrho))
N = ncol(Y)
M = nrow(Y)
cv.err = matrix(0, nrow = nrho, ncol = ncv)
cv.fold = sample(seq(1, ncv), size = N, replace = T)
irho = 1
for (rho in rho_factors) {
for (cv in 1:ncv) {
ytrain = Y[, cv.fold != cv]
xtrain = lapply(X, function(x) {
x[, cv.fold != cv, drop = F]
})
ytest = Y[, cv.fold == cv]
xtest = lapply(X, function(x) {
x[, cv.fold == cv, drop = F]
})
fit = constrained_L2reg(xtrain, ytrain, rho)
ftest = lapply(1:M, function(i) {
crossprod(fit$F[[i]], xtest[[i]])
})
ftest = do.call(rbind, ftest)
cv.err[irho, cv] = norm((diag(M) - fit$B) %*% ytest - ftest - fit$mu, type = "f") ^
2
}
irho = irho + 1
}
cv.mean = rowMeans(cv.err)
rho.min = rho_factors[which.min(cv.mean)]
fit = constrained_L2reg(X, Y, rho.min)
list(rho.opt = rho.min, sigma2.opt = fit$sigma2)
}
##---------------------------------
# utility functions for SML-lasso #
##---------------------------------
obj_cwiseSML = function(N, a0, a1, a2, lambda, w, sigma2) {
function(x) {
-N / 2 * sigma2 * log((a0 - x) ^ 2) - a1 * x + 1 / 2 * a2 * x ^ 2 + lambda * w * abs(x)
}
}
## a0 = det(I - B) / cij + Bij => c = 1
grad_cwiseSML = function(N, a0, a1, a2, lambda, w, sigma2) {
## t := conditions of Bij
## t = {1 | Bij > 0}
## t = {-1 | Bij < 0}
## t = {0 | Bij = 0}
function(t) {
list(
a = -a2,
b = a1 + a2 * a0 - lambda * w * t ,
c = N * sigma2 + (lambda * w * t - a1) * a0
)
}
}
# ax^2 + bx + c = 0
poly2_solver = function(a, b, c) {
r = b ^ 2 - 4 * a * c
if (r < 0) {
list(n = 0, x = NULL)
} else if (r == 0) {
list(n = 1, x = c(-b / (2 * a)))
} else {
list(n = 2, x = c((-b - sqrt(r)) / (2 * a), (-b + sqrt(r)) / (2 * a)))
}
}
## solve SML problem by component-wise update
## component-wise --> row-wise update Bij
#' @param sigma2 extimated from constrained_L2reg
#' @param B B0 initialization
#' @param f F initialization
#' @example
#' X = submatX(data)
#' Y = data$obs$Y1
#' sigma2 = getsigma2_L2reg(X, Y, nrho = 20, ncv = 5)
#' param.init = constrained_L2reg(X, Y, rho = sigma2$rho.opt)
#' param.opt = sparse_maximum_likehood_cwise(B = param.init$B, f = param.init$F, Y = Y, X = X, sigma2 = param.init$sigma2[1], N = data$var$N, Ng = data$var$Ng, lambda = 15, maxit = 100)
#' B = param.init$B; f=param.init$F; Y = Y; X = X; sigma2 = param.init$sigma2; N = data$var$N; Ng = data$var$Ng; Nk = data$var$Nk; lambda = 0.1
sparse_maximum_likehood_cwise = function(B,
f,
Y,
X,
sigma2,
N,
Ng,
lambda,
weighted = TRUE,
maxit = 100,
verbose = 2) {
## data centralization
std = centralize(X, Y)
X = std$X
Y = std$Y
meanX = std$muX
meanY = std$muY
## update for eQTL coeffs
f0 = list()
f1 = list()
for (i in 1:Ng) {
Xi = X[[i]] # n x sk
yi = Y[, i, drop = F] # n x 1 (for specific gene i)
Pi = solve(crossprod(Xi)) %*% t(Xi)
f0[[i]] = Pi %*% yi
f1[[i]] = Pi %*% Y # f[[i]] = f0[[i]] - f1[[i]] %*% B[i,]
}
## update for gnet coeffs
niter = 1
ImB = diag(Ng) - B
IBinv = solve(ImB)
wB = 1 / abs(B)
while (niter <= maxit) {
B.prev = B
f.prev = f
for (i in 1:Ng) {
## IBinv i column and j row -> c_{ij}
## c_{ij} / det(I - B) = (I - B)^{-1}_{j, i}
ci = IBinv[, i]
dbi = vector("numeric", Ng)
for (j in 1:Ng) {
## update B[i, j] for i != j
if (i != j) {
bij.prev = bij = B[i, j]
wij = if (weighted) {
wB[i, j]
} else {
1
}
mij = ci[j]
## i-th row of B
bi = ImB[i, ]
bi[j] = 0
## Yej is the j-th column of Y
Yej = Y[, j, drop = F]
a1 = crossprod(Y %*% bi - X[[i]] %*% f[[i]], Yej)
a2 = crossprod(Yej)
## if mij == 0, cij == 0
if (mij == 0) {
if (abs(a1) > lambda * wij) {
bij = sign(a1) * (abs(a1) - lambda * wij) / a2
} else {
bij = 0
}
} else {
a0 = 1 / mij + bij.prev
cond = list(1,-1, 0) # bij condition
obj = obj_cwiseSML(N, a0, a1, a2, lambda, wij, sigma2)
grad = grad_cwiseSML(N, a0, a1, a2, lambda, wij, sigma2)
params = lapply(cond, function(t) {
x = if (t != 0) {
tmp = do.call(poly2_solver, grad(t))
tmp$x[tmp$x * t > 0]
} else {
0
}
list(x = x)
})
params = unlist(params)
objval = sapply(params, obj)
mix = which.min(objval)
bij = params[mix]
}
dbij = bij.prev - bij
dbi[j] = dbij
B[i, j] = bij
ci = ci / (1 + dbij * mij)
##IBinv = IBinv - IBinv[,i,drop = F] %*% IBinv[j,,drop = F] / (1/dbij + mij)
ImB = diag(Ng) - B
}
} ## for(j in 1:Ng)
## (ImB + ei^T %*% 1 %*% dbi)^{-1}
IBinv = IBinv - IBinv[, i, drop = F] %*% dbi %*% IBinv / (1 + dbi %*% ImB[, i, drop =
F])[1]
f[[i]] = f0[[i]] - f1[[i]] %*% B[i, ]
} ## for(i in 1:Ng)
Berr = norm(B - B.prev, type = "f") / (1 + norm(B.prev, type = "f"))
Ferr = sum(sapply(1:Ng, function(i) {
norm(f[[i]] - f.prev[[i]], type = "f")
})) / (1 + sum(sapply(1:Ng, function(i) {
norm(f.prev[[i]], type = "f")
})))
err = Berr + Ferr
if (verbose >= 2) {
cat(sprintf("SML: iteration = %d, error = %f\n", niter, err))
}
niter = niter + 1
if (err < 1e-4 || niter > maxit) {
mu = (diag(Ng) - B) %*% meanY - sapply(1:Ng, function(i) {
meanX[[i]] %*% f[[i]]
})
B = Matrix(B, sparse = T)
break
}
} ## while(niter <= maxit)
list(
B = B,
f = f,
mu = mu,
niter = niter,
err = err
)
}
##---------------------------------
# utility functions for SML-CD #
##---------------------------------
## objective function for one condition
#' @param B gene network coefficients
#' @param f eQTL coefficients
#' @param Y gene expression (centralized)
#' @param X eQTL quantitive (centralized)
#' @param sigma2 estimated sigma2 in logliklihood
#' @param N number of sample
#' @param lambda lambda coefficient in weighted lasso
#' @param weighted weighted lasso
#' @description argmin -N / 2 * log(det(I - B))^2 + 1 / (2 * sigma2) * ||(I - B) %*% Y - F %*% X - mu||_F^2 + lambda * abs(weight * B)
sparse_likelihood = function(B,
f,
Y,
X,
mu,
sigma2,
N,
lambda,
weight,
detIB,
type = c("lang", "prim", "err")) {
logdet = -N / 2 * log(detIB ^ 2)
IBerr2 = 0
for (i in 1:length(f)) {
err = Y[i, ] - B[i,-i] %*% Y[-i, ] - crossprod(f[[i]], X[[i]]) - mu[i]
IBerr2 = IBerr2 + sum(err ^ 2)
}
if (match.arg(type) == "lang") {
logdet + IBerr2 / (2 * sigma2) + lambda * sum(abs(weight * B))
} else if (match.arg(type) == "prim") {
logdet + IBerr2 / (2 * sigma2)
} else {
IBerr2
}
}
#' @description gradient row-wise
#' @param c vector c = ci / det(I-B); (ng-1) x 1
grad_rwise_SML = function(N, c, Yp, Hy, sigma2) {
function(x) {
N * c + (Yp %*% x - t(Hy)) / sigma2
}
}
#' @description lipshitz moduli row-wise
#' @param o solve((I-B)[-i,] %*% t((I-B)[-i,]))
#' @param gs (I-B)[-i,-i]
#' @param si gi[,i]
#' @param Yp
lips_rwise_SML = function(N,
o,
gs,
si,
c2i,
detIBi,
maxEigenYp,
sigma2,
Ng) {
ogs = o %*% gs
ImO = diag(Ng - 1) - crossprod(gs, ogs)
sOg = crossprod(si, ogs)
c = 1 - crossprod(si, o %*% si)
lambda = 1e-6
x = -1 * tcrossprod(chol2inv(ImO + diag(Ng - 1) * lambda), sOg)
L = N * c2i / (crossprod(x, ImO %*% x) + 2 * sOg %*% x + c) / (detIBi + 1e-6) + maxEigenYp / sigma2
while(L < 0) {
lambda = lambda * 10
x = -1 * tcrossprod(chol2inv(ImO + diag(Ng - 1) * lambda), sOg)
L = N * c2i / (crossprod(x, ImO %*% x) + 2 * sOg %*% x + c) / (detIBi + 1e-6) + maxEigenYp / sigma2
}
L
}
#' @description proximal operator for lasso
#' argmin lambda * |w * x| + c / 2 * |x - u|_2^2
prox_lasso = function(lambda, c, u, w) {
pmax(u - lambda * w / c, 0) + pmin(u + lambda * w / c, 0)
}
## solve SML problem by coordinate descent
## row-wise --> row-wise update B[i,]
#' @param sigma2 extimated from constrained_L2reg
#' @param B B0 initialization (Derived from ridge regression)
#' @param f F initialization
#' @example
#' X = submatX(data)
#' Y = data$obs$Y1
#' sigma2 = getsigma2_L2reg(X, Y, nrho = 20, ncv = 5)
#' param.init = constrained_L2reg(X, Y, rho = sigma2$rho.opt)
#' param.opt1 = sparse_maximum_likehood_bcd(B = param.init$B, f = param.init$F, Y = Y, X = X, sigma2 = param.init$sigma2[1], N = data$var$N, Ng = data$var$Ng, lambda = 15, maxit = 1000, rho = sigma2$rho.opt)
sparse_maximum_likehood_bcd = function(B,
f,
Y,
X,
sigma2,
N,
Ng,
lambda,
weighted = TRUE,
maxit = 100,
verbose = 2) {
## data centralization
std = centralize(X, Y)
X = std$X
Y = std$Y
meanX = std$muX
meanY = std$muY
## update for eQTL row-wise (specific for genes)
f0 = list()
f1 = list()
Yp = list()
Hy = list()
Yp.maxEigen = list()
for (i in 1:Ng) {
Xi = X[[i]] # n x sk
yi = Y[, i, drop = F] # n x 1 (for specific gene i)
Yi = Y[, -i] # n x (ng-1) (for specific gene i)
Pi = solve(crossprod(Xi)) %*% t(Xi)
f0[[i]] = Pi %*% yi # f0 \in sk x 1; f1 \in sk x (ng - 1)
f1[[i]] = Pi %*% Yi # f[[i]] = f0[[i]] - f1[[i]] %*% bi | bi = B[i,-i]
Hi = diag(N) - Xi %*% Pi # n x n projection matrix
Yp[[i]] = t(Yi) %*% Hi %*% Yi # (ng-1) x (ng-1)
Hy[[i]] = t(yi) %*% Hi %*% Yi # 1 x (ng-1)
## maximized eigen-value for Yp
Yp.maxEigen[[i]] = eigen(Yp[[i]])$values[1]
}
## update for gnet row-wise
niter = 1
ImB = diag(Ng) - B
IBinv = solve(ImB)
detIB = det(ImB)
wB = 1 / abs(B)
while (niter <= maxit) {
B.prev = B
f.prev = f
for (i in 1:Ng) {
## -N*sigma2*log(det(I-B)^2) + \sum_{i=1}^{Ng} bi^T %*% Yp %*% bi - Hy %*% bi
## ci / det(I-B); (ng-1) x 1
ci = IBinv[-i, i, drop = F]
bi = t(B[i,-i, drop = F])
grad = grad_rwise_SML(N, ci, Yp[[i]], Hy[[i]], sigma2[1])
## Lipschitz for row-i
oi = solve(tcrossprod(ImB[-i,]))
deti = det(tcrossprod(ImB[-i,]))
gii = ImB[-i, -i]
si = ImB[-i, i, drop = F]
c2i = sum((ci * detIB) ^ 2)
gi = grad(bi)
Li = lips_rwise_SML(N, oi, gii, si, c2i, deti, Yp.maxEigen[[i]], sigma2, Ng)
## proximal operator for lasso
## argmin(lambda * w * |x| + Li/2||x - ui||_2^2)
ui = bi - gi / Li[1]
wBi = wB[i,-i]
B[i, -i] = prox_lasso(lambda, Li[1], ui[, 1], wBi)
dbi = B.prev[i,] - B[i,]
ImB = diag(Ng) - B
## update det(I-B) & (I-B)^{-1}
detIB = (ImB[i,] %*% IBinv[, i, drop = F])[1] * detIB
# IBinv = IBinv - IBinv[, i, drop = F] %*% dbi %*% IBinv / (1 + dbi %*% ImB[, i, drop = F])[1]
IBinv = solve(ImB)
f[[i]] = f0[[i]] - f1[[i]] %*% B[i, -i]
}
Berr = norm(B - B.prev, type = "f") / norm(B.prev, type = "f")
Ferr = sum(sapply(1:Ng, function(i) {
norm(f[[i]] - f.prev[[i]], type = "f")
})) / sum(sapply(1:Ng, function(i) {
norm(f.prev[[i]], type = "f")
}))
err = Berr + Ferr
if (verbose >= 2) {
cat(sprintf("SML: iteration = %d, error = %f\n", niter, err))
}
niter = niter + 1
if (err < 1e-4 || niter > maxit) {
mu = (diag(Ng) - B) %*% meanY - sapply(1:Ng, function(i) {
meanX[[i]] %*% f[[i]]
})
B = Matrix(B, sparse = T)
break
}
} ## while (niter <= maxit)
list(
B = B,
f = f,
mu = mu,
niter = niter,
err = err,
detIB = detIB
)
}
## inertial PALM
# utility functions
inertial_pars = function(opts = c("cont", "lin"),
init = 0) {
switch(
opts,
"cont" = function(k) {
init
},
"lin" = function(k) {
(k - 1) / (k + 2)
}
)
}
## solve SML problem by coordinate descent
## row-wise --> row-wise update B[i,]
#' @param sigma2 extimated from constrained_L2reg
#' @param B B0 initialization (Derived from ridge regression)
#' @param f F initialization
#' @param rho stable for lipschitz calculation(deprecated)
#' @example
#' X = submatX(data)
#' Y = data$obs$Y1
#' sigma2 = getsigma2_L2reg(X, Y, nrho = 20, ncv = 5)
#' param.init = constrained_L2reg(X, Y, rho = sigma2$rho.opt)
#' param.opt2 = sparse_maximum_likehood_iPALM(B = param.init$B, f = param.init$F, Y = Y, X = X, sigma2 = param.init$sigma2[1], N = data$var$N, Ng = data$var$Ng, lambda = 15, maxit = 200)
sparse_maximum_likehood_iPALM = function(B,
f,
Y,
X,
sigma2,
N,
Ng,
lambda,
weighted = TRUE,
inertial = inertial_pars("lin"),
maxit = 100,
verbose = 2,
threshold = 1e-4) {
## data centralization
std = centralize(X, Y)
X = std$X
Y = std$Y
meanX = std$muX
meanY = std$muY
## update for eQTL row-wise (specific for genes)
f0 = list()
f1 = list()
Yp = list()
Hy = list()
Yp.maxEigen = list()
for (i in 1:Ng) {
Xi = X[[i]] # n x sk
yi = Y[, i, drop = F] # n x 1 (for specific gene i)
Yi = Y[, -i] # n x (ng-1) (for specific gene i)
Pi = solve(crossprod(Xi)) %*% t(Xi)
f0[[i]] = Pi %*% yi # f0 \in sk x 1; f1 \in sk x (ng - 1)
f1[[i]] = Pi %*% Yi # f[[i]] = f0[[i]] - f1[[i]] %*% bi | bi = B[i,-i]
Hi = diag(N) - Xi %*% Pi # n x n projection matrix
Yp[[i]] = t(Yi) %*% Hi %*% Yi # (ng-1) x (ng-1)
Hy[[i]] = t(yi) %*% Hi %*% Yi # 1 x (ng-1)
## maximized eigen-value for Yp
Yp.maxEigen[[i]] = eigen(Yp[[i]])$values[1]
}
## update for gnet row-wise
niter = 1
ImB = diag(Ng) - B
IBinv = solve(ImB)
detIB = det(ImB)
wB = 1 / abs(B)
B.prevs = list(B, B)
while (niter <= maxit) {
inert.pars = inertial(niter)
B.inert = B.prevs[[2]] + inert.pars * (B.prevs[[2]] - B.prevs[[1]])
f.prev = f
for (i in 1:Ng) {
## -N*sigma2*log(det(I-B)^2) + \sum_{i=1}^{Ng} bi^T %*% Yp %*% bi - Hy %*% bi
## ci / det(I-B); (ng-1) x 1
ci = IBinv[-i, i, drop = F]
bi = t(B.inert[i,-i, drop = F])
grad = grad_rwise_SML(N, ci, Yp[[i]], Hy[[i]], sigma2[1])
## Lipschitz for row-i
oi = solve(tcrossprod(ImB[-i,]))
deti = det(tcrossprod(ImB[-i,]))
gii = ImB[-i, -i]
si = ImB[-i, i, drop = F]
c2i = sum((ci * detIB) ^ 2)
gi = grad(bi)
Li = lips_rwise_SML(N, oi, gii, si, c2i, deti, Yp.maxEigen[[i]], sigma2, Ng)
## proximal operator for lasso
## argmin(lambda * w * |x| + Li/2||x - ui||_2^2)
ui = bi - gi / Li[1]
wBi = wB[i,-i]
B[i, -i] = prox_lasso(lambda, Li[1], ui[, 1], wBi)
dbi = B.prevs[[2]][i,] - B[i,]
ImB = diag(Ng) - B
## update det(I-B) & (I-B)^{-1}
detIB = (ImB[i,] %*% IBinv[, i, drop = F])[1] * detIB
IBinv = IBinv - IBinv[, i, drop = F] %*% dbi %*% IBinv / (1 + dbi %*% IBinv[, i, drop = F])[1]
f[[i]] = f0[[i]] - f1[[i]] %*% B[i, -i]
}
sigma2 = sigma2_sml(X, Y, B, f, Ng, N)
Berr = norm(B - B.prevs[[2]], type = "f") / (1 + norm(B.prevs[[2]], type = "f"))
Ferr = sum(sapply(1:Ng, function(i) {
norm(f[[i]] - f.prev[[i]], type = "f")
})) / (1 + sum(sapply(1:Ng, function(i) {
norm(f.prev[[i]], type = "f")
})))
err = Berr + Ferr
if (verbose >= 2) {
cat(sprintf("SML: iteration = %d, error = %f\n", niter, err))
}
niter = niter + 1
B.prevs = list(B.prevs[[2]], B)
if (err < threshold || niter > maxit) {
mu = (diag(Ng) - B) %*% meanY - sapply(1:Ng, function(i) {
meanX[[i]] %*% f[[i]]
})
break
}
} ## while (niter <= maxit)
list(
B = B,
f = f,
mu = mu,
sigma2 = sigma2,
niter = niter,
err = err,
detIB = detIB
)
}
###### fused lasso ########
## centeralized Ys (gene expression) and Xs (eQTL quantitive)
#' @example
#' Ys = data$obs[c("Y1", "Y2")]
#' Xs = data$obs$X
centralize_mult = function(Xs, Ys) {
meanX = lapply(Xs, rowMeans)
meanY = lapply(Ys, rowMeans)
Xs = lapply(Xs, center)
Ys = lapply(Ys, center)
list(X = Xs,
Y = Ys,
muX = meanX,
muY = meanY)
}
## ridge regression for estimate sigma2 initialization in gene expression
#' @param M number of gene
#' @param N number of sample
#' @example
#' M = data$var$Ng
#' N = data$var$N
#' B = constrained_L2reg_multi(Xs, Ys, sigma2$rho.opt, M, N)
constrained_L2reg_multi = function(X, Ys, rho, M, N) {
K = length(Ys)
B = list()
F = list()
mu = list()
err = 0
df = 0
for (i in 1:K) {
fit = constrained_L2reg(X, Ys[[i]], rho)
B[[i]] = as.matrix(fit$B)
F[[i]] = fit$F
mu[[i]] = fit$mu
err = err + fit$sigma2 * (N * M - 1)
df = df + (N * M - 1)
}
sigma2 = err / df
list(
B = B,
F = F,
sigma2 = sigma2,
mu = mu
)
}
## cross-validation on ridge regression to estimate sigma2
#' @param nrho number of L2 penalty's coefficient
#' @param ncv number of cross-validation
#' @example
#' Ys = data$obs[c("Y1", "Y2")]
#' Xs = submatX(data)
#' M = data$var$Ng
#' N = data$var$N
#' sigma2 = getsigma2_L2reg_multi(Xs, Ys, nrho = 20, M = M, N = N)
getsigma2_L2reg_multi = function(X,
Ys,
nrho = 10,
ncv = 5,
M,
N) {
rho_factors = 10 ** (seq(-6, 2, length.out = nrho))
cv.err = matrix(0, nrow = nrho, ncol = ncv)
cv.fold = sample(seq(1, ncv), size = N, replace = T)
irho = 1
for (rho in rho_factors) {
for (cv in 1:ncv) {
ytrain = lapply(Ys, function(y) {
y[, cv.fold != cv, drop = F]
})
xtrain = lapply(X, function(x) {
x[, cv.fold != cv, drop = F]
})
ytest = lapply(Ys, function(y) {
y[, cv.fold == cv, drop = F]
})
xtest = lapply(X, function(x) {
x[, cv.fold == cv, drop = F]
})
Ntrain = sum(cv.fold != cv)
fit = constrained_L2reg_multi(xtrain, ytrain, rho, M, Ntrain)
for (k in 1:length(Ys)) {
ftest = lapply(1:M, function(i) {
crossprod(fit$F[[k]][[i]], xtest[[i]])
})
ftest = do.call(rbind, ftest)
cv.err[irho, cv] = cv.err[irho, cv] + norm((diag(M) - fit$B[[k]]) %*% ytest[[k]] - ftest - fit$mu[[k]], type = "f")
}
}
irho = irho + 1
}
cv.mean = rowMeans(cv.err)
rho.min = rho_factors[which.min(cv.mean)]
fit = constrained_L2reg_multi(X, Ys, rho.min, M, N)
list(
rho.opt = rho.min,
sigma2.opt = fit$sigma2[1],
cv.ram = list(rho = rho_factors, cvm = cv.mean)
)
}
##---------------------------------------
# utility functions for SML-fused_lasso #
##---------------------------------------
#' @param lambda lasso penalty
#' @param rho fused lasso penalty
#' @param w weight for lasso term
#' @param r weight for fused lasso term
#' @param c ci / det(B)
#' @param b bij[1,...,K]
obj_multiSML = function(N, c, b, a1, a2, lambda, rho, w, r, sigma2) {
a0 = 1 / c + b
if (c[1] == 0 & c[2] == 0) {
function(x, y) {
-a1[1] * x - a1[2] * y +
1 / 2 * (a2[1] * x ^ 2 + a2[2] * y ^ 2) +
lambda * (w[1] * abs(x) + w[2] * abs(y)) +
rho * r * (abs(x - y))
}
} else if (c[1] == 0 & c[2] != 0) {
function(x, y) {
sigma2 * (-N[2] / 2 * log((a0[2] - y) ^ 2)) -
a1[1] * x - a1[2] * y +
1 / 2 * (a2[1] * x ^ 2 + a2[2] * y ^ 2) +
lambda * (w[1] * abs(x) + w[2] * abs(y)) +
rho * r * (abs(x - y))
}
} else if (c[1] != 0 & c[2] == 0) {
function(x, y) {
sigma2 * (-N[1] / 2 * log((a0[1] - x) ^ 2)) -
a1[1] * x - a1[2] * y +
1 / 2 * (a2[1] * x ^ 2 + a2[2] * y ^ 2) +
lambda * (w[1] * abs(x) + w[2] * abs(y)) +
rho * r * (abs(x - y))
}
} else {
function(x, y) {
sigma2 * (-N[1] / 2 * log((a0[1] - x) ^ 2) - N[2] / 2 * log((a0[2] - y) ^
2)) -
a1[1] * x - a1[2] * y +
1 / 2 * (a2[1] * x ^ 2 + a2[2] * y ^ 2) +
lambda * (w[1] * abs(x) + w[2] * abs(y)) +
rho * r * (abs(x - y))
}
}
}
grad_multiSML = function(N, c, b, a1, a2, lambda, rho, w, r, sigma2) {
a0 = 1 / c + b
if (c[1] == 0 & c[2] == 0) {
function(t) {
xt = t[1]
yt = t[2]
dxy = t[3]
if (dxy != 0) {
x = if (xt != 0) {
structure((a1[1] - lambda * w[1] * xt - rho * r * dxy) / a2[1], class = "value")
} else {
structure(0, class = "value")
}
y = if (yt != 0) {
structure((a1[2] - lambda * w[2] * yt + rho * r * dxy) / a2[2], class = "value")
} else {
structure(0, class = "value")
}
list(x = x, y = y)
} else {
tau = xt
wxy = w[1] + w[2]
xy = if (tau != 0) {
structure((a1[1] + a1[2] - lambda * wxy * tau) / (a2[1] + a2[2]), class = "value")
} else {
structure(0, class = "value")
}
list(xy = xy)
}
}
} else if (c[1] != 0 & c[2] == 0) {
function(t) {
xt = t[1]
yt = t[2]
dxy = t[3]
if (dxy != 0) {
x = if (xt != 0) {
structure(
list(
a = -a2[1],
b = a1[1] + a2[1] * a0[1] - lambda * w[1] * xt - rho * r * dxy,
c = N[1] * sigma2 + (lambda * w[1] * xt + rho * r * dxy - a1[1]) * a0[1]
),
class = "grad2"
)
} else {
structure(0, class = "value")
}
y = if (yt != 0) {
structure((a1[2] - lambda * w[2] * yt + rho * r * dxy) / a2[2], class = "value")
} else {
structure(0, class = "value")
}
list(x = x, y = y)
} else {
tau = xt
wxy = w[1] + w[2]
xy = if (tau != 0) {
structure(list(
a = -(a2[1] + a2[2]),
b = (a1[1] + a1[2]) + (a2[1] + a2[2]) * a0[1] - lambda * wxy * tau,
c = N[1] * sigma2 + (lambda * wxy * tau - a1[1] - a1[2]) * a0[1]
),
class = "grad2")
} else {
structure(0, class = "value")
}
list(xy = xy)
}
}
} else if (c[1] == 0 & c[2] != 0) {
function(t) {
xt = t[1]
yt = t[2]
dxy = t[3]
if (dxy != 0) {
x = if (xt != 0) {
structure((a1[1] - lambda * w[1] * xt - rho * r * dxy) / a2[1], class = "value")
} else {
structure(0, class = "value")
}
y = if (yt != 0) {
structure(
list(
a = -a2[2],
b = a1[2] + a2[2] * a0[2] - lambda * w[2] * yt + rho * r * dxy,
c = N[2] * sigma2 + (lambda * w[2] * yt - rho * r * dxy - a1[2]) * a0[2]
),
class = "grad2"
)
} else {
structure(0, class = "value")
}
list(x = x, y = y)
} else {
tau = xt
wxy = w[1] + w[2]
xy = if (tau != 0) {
structure(list(
a = -(a2[1] + a2[2]),
b = (a1[1] + a1[2]) + (a2[1] + a2[2]) * a0[2] - lambda * wxy * tau,
c = N[1] * sigma2 + (lambda * wxy * tau - a1[1] - a1[2]) * a0[2]
),
class = "grad2")
} else {
structure(0, class = "value")
}
list(xy = xy)
}
}
} else {
function(t) {
xt = t[1]
yt = t[2]
dxy = t[3]
if (dxy != 0) {
x = if (xt != 0) {
structure(
list(
a = -a2[1],
b = a1[1] + a2[1] * a0[1] - lambda * w[1] * xt - rho * r * dxy,
c = N[1] * sigma2 + (lambda * w[1] * xt + rho * r * dxy - a1[1]) * a0[1]
),
class = "grad2"
)
} else {
structure(0, class = "value")
}
y = if (yt != 0) {
structure(
list(
a = -a2[2],
b = a1[2] + a2[2] * a0[2] - lambda * w[2] * yt + rho * r * dxy,
c = N[2] * sigma2 + (lambda * w[2] * yt - rho * r * dxy - a1[2]) * a0[2]
),
class = "grad2"
)
} else {
structure(0, class = "value")
}
list(x = x, y = y)
} else {
tau = xt
wxy = w[1] + w[2]
xy = if (tau != 0) {
structure(
list(
a = a2[1] + a2[2],
b = lambda * wxy * tau - (a1[1] + a1[2]) - (a2[1] + a2[2]) * (a0[1] + a0[2]),
c = (a1[1] + a1[2] - lambda * wxy * tau) * (a0[1] + a0[2]) + (a2[1] + a2[2]) * a0[1] * a0[2] - (N[1] + N[2]) * sigma2,
d = (N[2] * a0[1] + N[1] * a0[2]) * sigma2 + (lambda * wxy * tau - (a1[1] + a1[2])) * a0[1] * a0[2]
),
class = "grad3"
)
} else {
structure(0, class = "value")
}
list(xy = xy)
}
}
}
}
poly3_solver = function(a, b, c, d, eps = 1e-6) {
r = solver3P_(b / a, c / a, d / a)
r$x = r$x * (abs(r$x) > eps)
r
}
# x^3 + ax^2 + bx + c = 0
solver3P_ = function(a, b, c) {
a2 = a ^ 2
q = (a2 - 3 * b) / 9
r = (a * (2 * a2 - 9 * b) + 27 * c) / 54
r2 = r ^ 2
q3 = q ^ 3
if (r2 <= q3) {
t = r / sqrt(q3)
if (t < -1) {
t = -1
}
if (t > 1) {
t = 1
}
t = acos(t)
a = a / 3
q = -2 * sqrt(q)
list(n = 3, x = c(q * cos(t / 3) - a, q * cos((t + 2 * pi) / 3) - a, q * cos((t - 2 * pi) / 3) - a))
} else {
4
A = -(abs(r) + sqrt(r2 - q3)) ^ (1 / 3)
if (r < 0) {
A = -A
}
B = if (A == 0) {
0
} else {
q / A
}
a = a / 3
Re = -(A + B) / 2 - a
Im = sqrt(3) / 2 * (A - B)
if (abs(Re) <= 1e-6) {
Im = Re
list(n = 2, x = c(A + B - a, Re))
} else {
list(n = 1, x = c(A + B - a))
}
}
}
### call function for grad list
grad_solver = function(g, t) {
gsolver_ = function(g) {
if (class(g) == "value") {
list(n = 1, x = 0)
} else if (class(g) == "grad2") {
do.call(poly2_solver, g)
} else {
do.call(poly3_solver, g)
}
}
xt = t[1]
yt = t[2]
dxy = t[3]
res = list()
if (dxy != 0) {
cand.x = gsolver_(g[["x"]])
cand.x = cand.x[["x"]][sign(cand.x[["x"]]) == xt]
cand.y = gsolver_(g[["y"]])
cand.y = cand.y[["x"]][sign(cand.y[["x"]]) == yt]
i = 1
for (x in cand.x) {
for (y in cand.y) {
if (sign(x - y) == dxy) {
res[[i]] = list(x = x, y = y)
i = i + 1
}
}
}
} else {
cand.xy = gsolver_(g[["xy"]])
cand.xy = cand.xy[["x"]][sign(cand.xy[["x"]]) == xt]
i = 1
for (xy in cand.xy) {
res[[i]] = list(x = xy, y = xy)
i = i + 1
}
}
res
}
## solve SML problem by component-wise update
#' @param lambda lasso penalty
#' @param rho fused lasso penalty
#' @example
#' M = data$var$Ng
#' N = data$var$N
#' Ys = data$obs[c("Y1", "Y2")]
#' Xs = submatX(data)
#' sigma2 = getsigma2_L2reg_multi(Xs, Ys, nrho = 20, M = M, N = N)
#' params.init = constrained_L2reg_multi(Xs, Ys, sigma2$rho.opt, M, N)
#' params.opt = multiSML_cwise(Bs = params.init$B, fs = params.init$F, Ys = Ys, Xs = Xs,
#' sigma2 = params.init$sigma2[1], Ng = data$var$Ng,
#' lambda = 0.1, rho = 0.1, maxit = 1000)
multiSML_cwise = function(Bs,
fs,
Ys,
Xs,
sigma2,
Ng,
lambda,
rho,
weighted = TRUE,
wBs = inverse(Bs),
rB = flinv(Bs),
maxit = 100,
threshold = 1e-4,
verbose = 2) {
std = centralize_mult(Xs, Ys)
Xs = std$X
Ys = std$Y
meanXs = std$muX
meanYs = std$muY
K = length(Ys) # number of conditions = 2
if (verbose == 2) {
cat(sprintf("conditions must be restricted to 2.. K = %d\n", K))
}
## update for eQTL coeffs
f0 = vector("list", K)
f1 = vector("list", K)
for (i in 1:Ng) {
Xi = Xs[[i]]
Pi = solve(crossprod(Xi)) %*% t(Xi)
for (k in 1:K) {
yi_k = Ys[[k]][, i, drop = F] # n x 1 for gene i
f0[[k]][[i]] = Pi %*% yi_k
f1[[k]][[i]] = Pi %*% Ys[[k]] # f = f0 - f1 %*% B[i,]
}
}
## update for gnet coeffs
niter = 1
Ns = sapply(Ys, nrow)
ImBs = lapply(Bs, function(B) {
diag(Ng) - B
})
IBsinv = lapply(ImBs, solve)
while (niter <= maxit) {
Bs.prev = Bs
fs.prev = fs
for (i in 1:Ng) {
ci = lapply(IBsinv, function(IBi) {
IBi[, i]
})
dbi = lapply(1:K, function(k) {
vector("numeric", Ng)
})
for (j in 1:Ng) {
## update B[[k]][i,j] for i != j
if (i != j) {
bij.prev = bij = sapply(Bs, function(B)
(B[i, j]))
wij = sapply(wBs, function(w) {
if (weighted) {
w[i, j]
} else {
1
}
})
rij = if (weighted) {
rB[i, j]
} else {
1
}
mij = sapply(ci, function(c) {
c[j]
})
bi = lapply(ImBs, function(ImB) {
bi_k = ImB[i, ]
bi_k[j] = 0
bi_k
})
## j-th column of Ys
Yej = lapply(Ys, function(Y) {
Y[, j, drop = F]
})
a1 = sapply(1:K, function(k) {
crossprod(Ys[[k]] %*% bi[[k]] - Xs[[i]] %*% fs[[k]][[i]], Yej[[k]])
})
a2 = sapply(1:K, function(k) {
crossprod(Yej[[k]])
})
## a0 = 1/mij + bij.prev
cond = list(
c(1, 1, 1),
c(1,-1, 1),
c(1, 0, 1),
c(-1,-1, 1),
c(0,-1, 1),
c(1, 1,-1),
c(0, 1,-1),
c(-1,-1,-1),
c(-1, 0,-1),
c(-1, 1,-1),
c(1, 1, 0),
c(-1,-1, 0),
c(0, 0, 0)
)
obj = obj_multiSML(Ns, mij, bij.prev, a1, a2, lambda, rho, wij, rij, sigma2)
grad = grad_multiSML(Ns, mij, bij.prev, a1, a2, lambda, rho, wij, rij, sigma2)
params = list()
for (t in cond) {
cand.grad = grad(t)
params = c(params, grad_solver(cand.grad, t))
}
objval = sapply(params, function(args) {
do.call(obj, args)
})
mix = which.min(objval)
bij = unlist(params[[mix]])
dbij = bij.prev - bij
for (k in 1:K) {
dbi[[k]][j] = dbij[k]
Bs[[k]][i, j] = bij[k]
ci[[k]] = ci[[k]] / (1 + dbij[k] * mij[k])
ImBs[[k]] = diag(Ng) - Bs[[k]]
}
}
} ## for(j in 1:Ng)
## (ImB + ei^T %*% dbi)^{-1}
for (k in 1:K) {
## IBsinv[[k]] = IBsinv[[k]] - IBsinv[[k]][, i, drop = F] %*% dbi[[k]] %*% IBsinv[[k]] / (1 + dbi[[k]] %*% IBsinv[[k]][, i, drop = F])[1]
IBsinv[[k]] = solve(ImBs[[k]])
fs[[k]][[i]] = f0[[k]][[i]] - f1[[k]][[i]] %*% Bs[[k]][i,]
}
} ## for(i in 1:Ng)
Berr = sum(sapply(1:K, function(k) {
norm(Bs[[k]] - Bs.prev[[k]], type = "f") / norm(Bs.prev[[k]], type = "f")
}))
Ferr = sum(sapply(1:K, function(k) {
sum(sapply(1:Ng, function(i) {
norm(fs[[k]][[i]] - fs.prev[[k]][[i]], type = "f")
})) / sum(sapply(1:Ng, function(i) {
norm(fs.prev[[k]][[i]], type = "f")
}))
}))
err = Berr + Ferr
cat(sprintf("SML: iteration = %d, error = %f\n", niter, err))
niter = niter + 1
if (err < threshold || niter > maxit || is.nan(err)) {
mu = lapply(1:K, function(k) {
(diag(Ng) - Bs[[k]]) %*% meanYs[[k]] - sapply(1:Ng, function(i) {
meanXs[[i]] %*% fs[[k]][[i]]
})
})
Bs = lapply(Bs, Matrix, sparse = T)
break
}
} ## while(niter <= maxit)
list(
B = Bs,
f = fs,
mu = mu,
niter = niter,
err = err
)
}
## ultility function for multiple condition
#' @param Bs multiple gene regulatory networks (list)
#' @param fs multiple eQTL coefficient (list)
#' @param Ys multiple gene expression matrix (list)
#' @param Xs multiple eQTLs quantitive (list)
#' @param Ng number of genes
#' @param lambda lambda coefficient in lasso penalty term
#' @param rho rho coefficient in fused penalty term
#' @param type type of likelihood:
#' o objective function
#' c cross validation function
#' e independent error function
SML_error = function(Xs, Ys, Bs, fs, Ng, Ns, K) {
std = centralize_mult(Xs, Ys)
X = lapply(std$X, t)
Y = lapply(std$Y, t)
err = 0
for (k in 1:K) {
for (i in 1:Ng) {
Xi = X[[i]] # sk x N
bi = Bs[[k]][i, -i, drop = F] # (ng-1) x 1
yi = Y[[k]][i, , drop = F] # 1 x N
Yi = Y[[k]][-i, , drop = F] # (ng-1) x N
fi = fs[[k]][[i]] # sk x 1
err = err + tcrossprod(yi - bi %*% Yi - crossprod(fi, Xi))
}
}
err
}
SML_logLik = function(Xs, Ys, Bs, fs, Ng, Ns, K, detIBs, sigma2) {
std = centralize_mult(Xs, Ys)
X = lapply(std$X, t)
Y = lapply(std$Y, t)
Ls = 0
err = 0
for (k in 1:K) {
Ls = Ls - Ns[k] / 2 * log(detIBs[k] ^ 2)
for (i in 1:Ng) {
Xi = X[[i]] # sk x N
bi = Bs[[k]][i, -i, drop = F] # (ng-1) x 1
yi = Y[[k]][i, , drop = F] # 1 x N
Yi = Y[[k]][-i, , drop = F] # (ng-1) x N
fi = fs[[k]][[i]] # sk x 1
err = err + tcrossprod(yi - bi %*% Yi - crossprod(fi, Xi))
}
}
Ls + err / (2 * sigma2) + Ng * sum(Ns) / 2 * log(sigma2)
}
#' @description proximal operator for fused lasso
#' argmin lambda * |w1 * x| + lambda * |w2 * y| + rho * r * |y - x| + c/2 (|x - u1|_2^2 + |y - u_2|_2^2)
#' @param lambda lasso parameter
#' @param rho fused lasso parameter
#' @note FLSA algorithm is used in this step
prox_flsa = function(lambda, rho, c, us, ws, r) {
## lambda = 0
du = us[[1]] - us[[2]]
eq = (abs(du) <= 2 * rho * r / c)
df = 1 - eq
rho = min(rho, 1e16)
x = list((us[[1]] + us[[2]]) / 2 * eq + (us[[1]] - sign(du) * rho * r / c) * df,
(us[[1]] + us[[2]]) / 2 * eq + (us[[2]] + sign(du) * rho * r / c) * df)
x = lapply(x, as.numeric)
lapply(1:2, function(i) {
xe = pmax(x[[i]] - lambda * (ws[[1]] + ws[[2]]) / 2 / c, 0) + pmin(x[[i]] + lambda * (ws[[1]] + ws[[2]]) / 2 / c, 0)
xd = pmax(x[[i]] - lambda * ws[[i]] / c, 0) + pmin(x[[i]] + lambda * ws[[i]] / c, 0)
xe * eq + xd * df
})
}
## sigma2 estimation from SEM logLikihood function
sigma2_sem = function(Xs, Ys, B, f, Ng, Ns, K) {
X = lapply(Xs, t)
Y = lapply(Ys, t)
err = 0
for (k in 1:K) {
for (i in 1:Ng) {
Xi = X[[i]] # sk x N
bi = B[[k]][i, -i, drop = F] # (ng-1) x 1
yi = Y[[k]][i, , drop = F] # 1 x N
Yi = Y[[k]][-i, , drop = F] # (ng-1) x N
fi = f[[k]][[i]] # sk x 1
err = err + tcrossprod(yi - bi %*% Yi - crossprod(fi, Xi))
}
}
sigma2 = err / (Ng * sum(Ns))
sigma2[1]
}
## solve SML problem by block coordinate descent
#' @param lambda lambda hyper-parameter for lasso term
#' @param rho fused lasso hyper-parameter for fused lasso term
#' @param gamma invertible matrix stablize parameter gamma
#' M = data$var$Ng
#' N = data$var$N
#' Ys = data$obs[c("Y1", "Y2")]
#' Xs = submatX(data)
#' sigma2 = getsigma2_L2reg_multi(Xs, Ys, nrho = 20, M = M, N = N)
#' params.init = constrained_L2reg_multi(Xs, Ys, sigma2$rho.opt, M, N)
#' params.opt2 = multiSML_bcd(Bs = params.init$B, fs = params.init$F, Ys = Ys, Xs = Xs,
#' sigma2 = params.init$sigma2[1], Ng = data$var$Ng,
#' lambda = 0.1, rho = 0.1, maxit = 500, gamma = sigma2$rho.opt)
multiSML_bcd = function(Bs,
fs,
Ys,
Xs,
sigma2,
Ng,
lambda,
rho,
weighted = TRUE,
maxit = 100,
threshold = 1e-3,
verbose = 2) {
std = centralize_mult(Xs, Ys)
Xs = std$X
Ys = std$Y
meanXs = std$muX
meanYs = std$muY
K = length(Ys) # number of conditions = 2
if (verbose == 2) {
cat(sprintf("conditions must be restricted to 2.. K = %d\n", K))
}
## update for eQTL row-wise (specific for genes)
f0 = vector("list", K)
f1 = vector("list", K)
Yp = vector("list", K)
Hy = vector("list", K)
Yp.maxEigen = vector("list", K)
Ns = sapply(Ys, nrow)
for (i in 1:Ng) {
Xi = Xs[[i]]
Pi = solve(crossprod(Xi)) %*% t(Xi)
for (k in 1:K) {
# specific condition
yi_k = Ys[[k]][, i, drop = F] # n[k] x 1 for gene i
Yi_k = Ys[[k]][, -i] # n[k] x (ng-1) (for specific gene i)
f0[[k]][[i]] = Pi %*% yi_k
f1[[k]][[i]] = Pi %*% Yi_k # f[[k]][[i]] = f0[[k]][[i]] - f1[[k]][[i]] %*% bi^k | bi^k = B[[k]][i,-i]
Hi_k = diag(Ns[k]) - Xi %*% Pi # n[k] x n[k] projection matrix
Yp[[k]][[i]] = t(Yi_k) %*% Hi_k %*% Yi_k # (ng - 1) x (ng - 1)
Hy[[k]][[i]] = t(yi_k) %*% Hi_k %*% Yi_k # 1 x (ng - 1)
## maximized eigen-value for Yp
Yp.maxEigen[[k]][[i]] = eigen(Yp[[k]][[i]])$values[1]
}
}
## update for gnet row-wise
niter = 1
ImBs = lapply(Bs, function(B) {
diag(Ng) - B
})
detIBs = sapply(ImBs, det)
IBsinv = lapply(ImBs, solve)
wBs = lapply(Bs, function(B) {
1 / abs(B)
})
rB = 1 / abs(Bs[[1]] - Bs[[2]])
Ls = logLik(detIBs, Bs, wBs, rB, lambda, rho, K) + Ng * sum(Ns) / 2 * log(sigma2)
while (niter <= maxit) {
Bs.prev = Bs
fs.prev = fs
Ls.prev = Ls
for (i in 1:Ng) {
## sum(-Ns[k] * sigma2 * log(det(I-B[[k]])^2)) + \sum_{i=1}^{Ng} bi^T %*% Yp %*% bi - Hy %*% bi)
ci = lapply(IBsinv, function(IBi) {
# ci / det(I - B); from (I - B)^{-1}
IBi[-i, i, drop = F]
})
bi = lapply(Bs, function(B) {
t(B[i, -i, drop = F])
})
gi = lapply(1:K, function(k) {
grad = grad_rwise_SML(Ns[k], ci[[k]], Yp[[k]][[i]], Hy[[k]][[i]], sigma2[1])
grad(bi[[k]])
})
## Lipschitz moduli for row-i
Lis = sapply(1:K, function(k) {
gtg = tcrossprod(ImBs[[k]][-i,])
oi = chol2inv(gtg)
deti = det(gtg)
gii = ImBs[[k]][-i, -i]
si = ImBs[[k]][-i, i, drop = F]
c2i = sum((ci[[k]] * detIBs[k]) ^ 2)
lips_rwise_SML(Ns[k],
oi,
gii,
si,
c2i,
deti,
Yp.maxEigen[[k]][[i]],
sigma2,
Ng)[1]
})
Li = max(Lis)
ui = lapply(1:K, function(k) {
bi[[k]] - gi[[k]] / Li
})
wBi = lapply(wBs, function(wB) {
wB[i, -i]
})
rBi = rB[i, -i]
xi = prox_flsa(lambda, rho, Li, ui, wBi, rBi)
for (k in 1:K) {
Bs[[k]][i, -i] = xi[[k]]
dbi = Bs.prev[[k]][i,] - Bs[[k]][i,]
IBsinv[[k]] = IBsinv[[k]] - IBsinv[[k]][, i, drop = F] %*% dbi %*% IBsinv[[k]] / (1 + dbi %*% IBsinv[[k]][, i, drop = F])[1]
ImBs[[k]] = diag(Ng) - Bs[[k]]
detIBs[k] = (ImBs[[k]][i,] %*% IBsinv[[k]][, i, drop = F])[1] * detIBs[k]
## IBsinv[[k]] = solve(ImBs[[k]])
fs[[k]][[i]] = f0[[k]][[i]] - f1[[k]][[i]] %*% Bs[[k]][i, -i]
}
} # row-wise update
Berr = sum(sapply(1:K, function(k) {
norm(Bs[[k]] - Bs.prev[[k]], type = "f") / norm(Bs.prev[[k]], type = "f")
}))
Ferr = sum(sapply(1:K, function(k) {
sum(sapply(1:Ng, function(i) {
norm(fs[[k]][[i]] - fs.prev[[k]][[i]], type = "f")
})) / sum(sapply(1:Ng, function(i) {
norm(fs.prev[[k]][[i]], type = "f")
}))
}))
err = Berr + Ferr
sigma2 = sigma2_sem(Xs, Ys, Bs, fs, Ng, Ns, K)
Ls = logLik(detIBs, Bs, wBs, rB, lambda, rho, K) + Ng * sum(Ns) / 2 * log(sigma2)
Lerr = abs(Ls.prev - Ls) / (1 + abs(Ls.prev))
if (verbose >= 2) {
cat(
sprintf(
"SML: iteration = %d, error = %f, logLik = %f, sigma2 = %f\n",
niter,
err,
Ls,
sigma2
)
)
}
niter = niter + 1
if ((err <= threshold & Lerr <= threshold) || niter > maxit) {
mu = lapply(1:K, function(k) {
(diag(Ng) - Bs[[k]]) %*% meanYs[[k]] - sapply(1:Ng, function(i) {
meanXs[[i]] %*% fs[[k]][[i]]
})
})
sigma2 = sigma2_sem(Xs, Ys, Bs, fs, Ng, Ns, K)
Bs = lapply(Bs, Matrix, sparse = T)
break
}
} # while(niter <= maxit)
list(
B = Bs,
f = fs,
mu = mu,
sigma2 = sigma2,
niter = niter,
err = err,
detIB = detIBs
)
}
## stepwise update sigma2
err2_sem = function(Xs, Ys, B, f, Ng, Ns, K) {
X = lapply(Xs, t)
Y = lapply(Ys, t)
err2 = 0
for (k in 1:K) {
for (i in 1:M) {
Xi = X[[i]] # sk x N
bi = B[[k]][i, -i, drop = F] # (ng-1) x 1
yi = Y[[k]][i, , drop = F] # 1 x N
Yi = Y[[k]][-i, , drop = F] # (ng-1) x N
fi = f[[k]][[i]] # sk x 1
err2 = err2 + tcrossprod(yi - bi %*% Yi - crossprod(fi, Xi))
}
}
err2
}
## ultility function for objective function
logLik = function(detIBs, Bs, ws, r, lambda, rho, Ns, K) {
Ls = 0
l1norm = 0
rho = min(rho, 1e16)
lfnorm = rho * r * abs(Bs[[2]] - Bs[[1]])
for (k in 1:K) {
l1norm = l1norm + lambda * (ws[[k]] * abs(Bs[[k]]))
Ls = Ls - Ns[k] / 2 * log(detIBs[k] ^ 2)
}
diag(l1norm) = 0
diag(lfnorm) = 0
Ls + sum(l1norm) + sum(lfnorm)
}
## inverse
inverse = function(Bs) {
lapply(Bs, function(B) {
1 / abs(B)
})
}
## invone
invone = function(Bs) {
lapply(Bs, function(B) {
w = matrix(1, nrow = nrow(B), ncol = ncol(B))
diag(w) = Inf
w
})
}
## flinv
flinv = function(Bs) {
1 / abs(Bs[[1]] - Bs[[2]])
}
## flone
flone = function(Bs) {
w = matrix(1, nrow = nrow(Bs[[1]]), ncol = ncol(Bs[[2]]))
diag(w) = Inf
w
}
## solve SML problem by block coordinate descent by backtracking inert-PALM
#' @param lambda lambda hyper-parameter for lasso term
#' @param rho fused lasso hyper-parameter for fused lasso term
#' @param gamma invertible matrix stablize parameter gamma
#' params.opt4 = multiSML_iPALM(Bs = params.init$B, fs = params.init$F, Ys = Ys, Xs = Xs,
#' sigma2 = params.init$sigma2[1], Ng = data$var$Ng,
#' lambda = 0.1, rho = 0.1, maxit = 500)
multiSML_iPALM = function(Bs,
fs,
Ys,
Xs,
sigma2,
Ng,
lambda,
rho,
wBs = inverse(Bs),
rB = flinv(Bs),
maxit = 100,
acc = TRUE,
inertial = inertial_pars("lin"),
threshold = 1e-3,
sparse = FALSE,
use.strict = TRUE,
verbose = 2) {
std = centralize_mult(Xs, Ys)
Xs = std$X
Ys = std$Y
meanXs = std$muX
meanYs = std$muY
K = length(Ys) # number of conditions = 2
if (verbose == 2) {
cat(sprintf("conditions must be restricted to 2.. K = %d\n", K))
}
## update for eQTL row-wise (specific for genes)
f0 = vector("list", K)
f1 = vector("list", K)
Yp = vector("list", K)
Hy = vector("list", K)
Yp.maxEigen = vector("list", K)
Ns = sapply(Ys, nrow)
for (i in 1:Ng) {
Xi = Xs[[i]]
Pi = solve(crossprod(Xi)) %*% t(Xi)
for (k in 1:K) {
# specific condition
yi_k = Ys[[k]][, i, drop = F] # n[k] x 1 for gene i
Yi_k = Ys[[k]][, -i] # n[k] x (ng-1) (for specific gene i)
f0[[k]][[i]] = Pi %*% yi_k
f1[[k]][[i]] = Pi %*% Yi_k # f[[k]][[i]] = f0[[k]][[i]] - f1[[k]][[i]] %*% bi^k | bi^k = B[[k]][i,-i]
Hi_k = diag(Ns[k]) - Xi %*% Pi # n[k] x n[k] projection matrix
Yp[[k]][[i]] = t(Yi_k) %*% Hi_k %*% Yi_k # (ng - 1) x (ng - 1)
Hy[[k]][[i]] = t(yi_k) %*% Hi_k %*% Yi_k # 1 x (ng - 1)
## maximized eigen-value for Yp
Yp.maxEigen[[k]][[i]] = eigen(Yp[[k]][[i]])$values[1]
}
}
## update for gnet row-wise
niter = 1
ImBs = lapply(Bs, function(B) {
diag(Ng) - B
})
detIBs = sapply(ImBs, det)
IBsinv = lapply(ImBs, solve)
Ls = logLik(detIBs, Bs, wBs, rB, lambda, rho, Ns, K) + N * sum(Ns) / 2 * log(sigma2)
## history
Bs.prevs = list(Bs, Bs)
inert = acc
while (niter <= maxit) {
inert.pars = inertial(niter)
Bs.inert = if (inert) {
lapply(1:K, function(k) {
Bs.prevs[[2]][[k]] + inert.pars * (Bs.prevs[[2]][[k]] - Bs.prevs[[1]][[k]])
})
} else {
Bs
}
fs.prev = fs
Ls.prev = Ls
for (i in 1:Ng) {
## sum(-Ns[k] * sigma2 * log(det(I-B[[k]])^2)) + \sum_{i=1}^{Ng} bi^T %*% Yp %*% bi - Hy %*% bi)
ci = lapply(IBsinv, function(IBi) {
# ci / det(I - B); from (I - B)^{-1}
IBi[-i, i, drop = F]
})
bi = lapply(Bs.inert, function(B.inert) {
t(B.inert[i, -i, drop = F])
})
gi = lapply(1:K, function(k) {
grad = grad_rwise_SML(Ns[k], ci[[k]], Yp[[k]][[i]], Hy[[k]][[i]], sigma2[1])
grad(bi[[k]])
})
## Lipschitz moduli for row-i
Lis = sapply(1:K, function(k) {
gtg = tcrossprod(ImBs[[k]][-i,])
oi = chol2inv(gtg)
deti = crossprod(IBsinv[[k]][,i])[1] * (detIBs[k]^2)
gii = ImBs[[k]][-i, -i]
si = ImBs[[k]][-i, i, drop = F]
c2i = sum((ci[[k]] * detIBs[k]) ^ 2)
Li = lips_rwise_SML(Ns[k],
oi,
gii,
si,
c2i,
deti,
Yp.maxEigen[[k]][[i]],
sigma2,
Ng)[1]
Li
})
Li = max(Lis)
Li = (1 + 2 * inert.pars) * Li / (2 * (1 - inert.pars))
detZero = TRUE
cl = 1
while(detZero) {
ui = lapply(1:K, function(k) {
bi[[k]] - gi[[k]] / (cl * Li)
})
wBi = lapply(wBs, function(wB) {
wB[i, -i]
})
rBi = rB[i, -i]
xi = prox_flsa(lambda, rho, Li, ui, wBi, rBi)
dIBu = sapply(1:K, function(k) {
IBsinv[[k]][i, i] - (t(xi[[k]]) %*% IBsinv[[k]][-i, i, drop = F])[1]
})
cl = cl * 2
detZero = any(dIBu == 0)
}
for (k in 1:K) {
Bs[[k]][i, -i] = xi[[k]]
ImBs[[k]] = diag(Ng) - Bs[[k]]
detIBs[k] = (ImBs[[k]][i,] %*% IBsinv[[k]][, i, drop = F])[1] * detIBs[k]
# detIBs[k] = det(ImBs[[k]])
dbi = Bs.prevs[[2]][[k]][i,] - Bs[[k]][i,]
# IBsinv[[k]] = solve(ImBs[[k]])
IBsinv[[k]] = IBsinv[[k]] - IBsinv[[k]][, i, drop = F] %*% dbi %*% IBsinv[[k]] / (1 + dbi %*% IBsinv[[k]][, i, drop = F])[1]
fs[[k]][[i]] = f0[[k]][[i]] - f1[[k]][[i]] %*% Bs[[k]][i, -i]
}
# sigma2 = sigma2_sem(Xs, Ys, Bs, fs, Ng, Ns, K)
} # row-wise update
Berr = sum(sapply(1:K, function(k) {
norm(Bs[[k]] - Bs.prevs[[2]][[k]], type = "f") / (1 + norm(Bs.prevs[[2]][[k]], type = "f"))
}))
Ferr = sum(sapply(1:K, function(k) {
sum(sapply(1:Ng, function(i) {
norm(fs[[k]][[i]] - fs.prev[[k]][[i]], type = "f")
})) / (1 + sum(sapply(1:Ng, function(i) {
norm(fs.prev[[k]][[i]], type = "f")
})))
}))
err = Berr + Ferr
sigma2 = sigma2_sem(Xs, Ys, Bs, fs, Ng, Ns, K)
Ls = logLik(detIBs, Bs, wBs, rB, lambda, rho, Ns, K) + Ng * sum(Ns) / 2 * log(sigma2)
Lerr = abs(Ls.prev - Ls) / (1 + abs(Ls.prev))
# inert = ifelse(Lerr < 1e-6, FALSE, acc)
if (verbose >= 2) {
cat(
sprintf(
"SML: iteration = %d, error = %f, logLik = %f, sigma2 = %f, inert=%s\n",
niter,
err,
Ls,
sigma2,
inert
)
)
}
niter = niter + 1
Bs.prevs = list(Bs.prevs[[2]], Bs)
opt.cond = if (use.strict) {
(err < threshold && Lerr < threshold)
} else {
(err < threshold || Lerr < threshold)
}
if (opt.cond || niter > maxit || is.nan(err)) {
mu = lapply(1:K, function(k) {
(diag(Ng) - Bs[[k]]) %*% meanYs[[k]] - sapply(1:Ng, function(i) {
meanXs[[i]] %*% fs[[k]][[i]]
})
})
sigma2 = sigma2_sem(Xs, Ys, Bs, fs, Ng, Ns, K)
if (sparse) {
Bs = lapply(Bs, Matrix, sparse = T)
}
break
}
} # while(niter <= maxit)
list(
B = Bs,
f = fs,
mu = mu,
sigma2 = sigma2,
niter = niter,
err = err,
detIB = detIBs
)
}
## cross-validation and EBIC for hyper-parameter tuning
## lambda max can be estimated
#' @description get_lambda.max
#' @example
#' lamax = get_lambda.max(params.init$B, Ys, Xs, Ng)
get_lambda.max = function(Bs, Ys, Xs, Ng, weighted = TRUE) {
std = centralize_mult(Xs, Ys)
Xs = std$X ## N x sk
Ys = std$Y ## N x p
K = length(Ys)
Ns = sapply(Ys, nrow)
R = vector("list", K) ## Ng
w = if(weighted) {
inverse(Bs)
} else {
invone(Bs)
}
for (k in 1:K) {
R[[k]] = matrix(0, nrow = Ng, ncol = Ns[k]) ## Ng x N
}
for (i in 1:Ng) {
Xi = Xs[[i]]
Pi = solve(crossprod(Xi)) %*% t(Xi)
for (k in 1:K) {
yi = Ys[[k]][, i, drop = F] # n x 1
fi = solve(crossprod(Xi)) %*% t(Xi) %*% yi
Xf = Xi %*% fi # n x 1
R[[k]][i,] = yi - Xf
}
}
err = 0
for (k in 1:K) {
err = err + norm(R[[k]], type = "f") ^ 2
}
sigma2 = err / (Ng * sum(Ns))
Ry = vector("list", K) ## Ng
for (k in 1:K) {
Ry[[k]] = R[[k]] %*% Ys[[k]]
Ry[[k]] = abs(Ry[[k]] / sigma2) / w[[k]]
}
max(sapply(Ry, max))
}
## cross-validation and EBIC for hyper-parameter tuning
## rho max can be estimated, rho is the fused lasso
## regularized hyper parameter
#' @description get_rho.max
#' @example
#' rhomax = get_rho.max(params.init$B, params.init$F, Ys, Xs, params.init$sigma2[1], data$var$Ng)
get_rho.max = function(Bs, fs, Ys, Xs, sigma2, Ng, weighted = TRUE) {
if(weighted) {
wBs = inverse(Bs)
rB = flinv(Bs)
} else {
wBs = invone(Bs)
rB = flone(Bs)
}
params.rho = multiSML_iPALM(
Bs,
fs,
Ys,
Xs,
sigma2,
Ng,
lambda = 0,
rho = Inf,
wBs = wBs,
rB = rB,
maxit = 2000,
threshold = 1e-4,
use.strict = F,
sparse = T,
verbose = 1
)
weight.rho = if(weighted) {
flinv(Bs)
} else {
flone(Bs)
}
Bs = params.rho$B[[1]]
fs = params.rho$f
sigma2 = params.rho$sigma2
std = centralize_mult(Xs, Ys)
Xs = std$X ## Ng (n x sk)
Ys = std$Y ## n x ng
## multiple
K = length(Ys)
Ns = sapply(Ys, nrow)
Bc = vector("list", K)
YY = vector("list", K)
FX = vector("list", K)
Dx = vector("list", K)
for (k in 1:K) {
Bc[[k]] = -Ns[k] * t(solve(diag(Ng) - Bs))
YY[[k]] = crossprod(Ys[[k]]) ## Y %*% t(Y)
FX[[k]] = matrix(0, nrow = Ng, ncol = Ns[k]) ## F %*% X (p x k x k x n = p x n)
for (i in 1:Ng) {
FX[[k]][i, ] = as.numeric(Xs[[i]] %*% fs[[k]][[i]])
}
Dx[[k]] = abs(Bc[[k]] + ((diag(Ng) - Bs) %*% YY[[k]] - FX[[k]] %*% Ys[[k]]) / sigma2) / weight.rho
diag(Dx[[k]]) = -Inf
}
## Dxy = abs((diag(Ng) - Bs) %*% (YY[[2]] - YY[[1]]) - (FX[[2]] %*% Ys[[2]] - FX[[1]] %*% Ys[[1]])) / sigma2 / 2 / weight.rho
## diag(Dxy) = -Inf
max(c(max(Dx[[1]]), max(Dx[[2]])))
## max(Dxy)
}
## cross validation for hyper-parameter tuning
#' @description 5-fold cross-validation
#' @param dyn dynamic updated rho.max by given lambda
#' @example
#' cv.params = cv_multiSML(Bs = params.init$B, fs = params.init$F, Ys = Ys, Xs = Xs, sigma2 = params.init$sigma2[1], Ng = data$var$Ng, nlambda = 20, nrho = 20)
cv_multiSML = function(Bs,
fs,
Ys,
Xs,
sigma2,
Ng,
nlambda = 20,
nrho = 20,
threshold = 1e-4,
verbose = 1) {
lambda.max = get_lambda.max(Bs, Ys, Xs, Ng)
lambda.factors = 10 ^ seq(0, -4, length.out = nlambda) * lambda.max
wBs = inverse(Bs)
rB = flinv(Bs)
rho.max = get_rho.max(Bs, fs, Ys, Xs, sigma2, Ng)
rho.factors = 10 ^ seq(0, -4, length.out = nrho) * rho.max
ncv = 5
Ns = sapply(Ys, ncol)
K = length(Ys)
Ytrain = vector("list", ncv)
Xtrain = vector("list", ncv)
Ytest = vector("list", ncv)
Xtest = vector("list", ncv)
cv.fold = sample(seq(1, ncv), size = Ns[1], replace = T)
for (i in 1:ncv) {
Ytrain[[i]] = lapply(Ys, function(y) {
y[, cv.fold != i, drop = F]
})
Xtrain[[i]] = lapply(Xs, function(x) {
x[, cv.fold != i, drop = F]
})
Ytest[[i]] = lapply(Ys, function(y) {
y[, cv.fold == i, drop = F]
})
Xtest[[i]] = lapply(Xs, function(x) {
x[, cv.fold == i, drop = F]
})
}
cverrs = vector("list", nrho * nlambda)
cvlls = vector("list", nrho * nlambda)
hyper.params = list()
for (cv in 1:ncv) {
params.opt = list()
Nc = sapply(Ytest[[cv]], ncol)
ix = 1
for (rho in rho.factors) {
for (lambda in lambda.factors) {
cat(sprintf("lambda = %4f, rho = %4f, foldid = %d\n", lambda, rho, cv))
if (ix %% nlambda == 1) {
params.opt[[ix]] = multiSML_iPALM(
Bs,
fs,
Ytrain[[cv]],
Xtrain[[cv]],
sigma2,
Ng,
lambda,
rho,
wBs,
rB,
maxit = 1000,
threshold = threshold,
acc = TRUE,
sparse = FALSE,
use.strict = FALSE,
verbose = verbose
)
} else {
params.opt[[ix]] = multiSML_iPALM(
params.opt[[ix - 1]]$B,
params.opt[[ix - 1]]$f,
Ytrain[[cv]],
Xtrain[[cv]],
params.opt[[ix - 1]]$sigma2,
Ng,
lambda,
rho,
wBs,
rB,
maxit = 1000,
threshold = threshold,
acc = TRUE,
sparse = FALSE,
use.strict = FALSE,
verbose = verbose
)
}
loglik = SML_logLik(
Xtest[[cv]],
Ytest[[cv]],
params.opt[[ix]]$B,
params.opt[[ix]]$f,
Ng,
Nc,
K,
params.opt[[ix]]$detIB,
params.opt[[ix]]$sigma2
)[1]
err = SML_error(Xtest[[cv]],
Ytest[[cv]],
params.opt[[ix]]$B,
params.opt[[ix]]$f,
Ng,
Nc,
K)[1]
cverrs[[ix]] = c(cverrs[[ix]], err)
cvlls[[ix]] = c(cvlls[[ix]], loglik)
if (cv == 1) {
hyper.params[[ix]] = c(lambda, rho)
}
ix = ix + 1
}
}
}
list(opt.hyperparams = hyper.params,
cverrs = cverrs,
loglik = cvlls)
}
## ultility funciton
cvsurface = function(cvparams, type = c("err", "loglik")) {
cvm = if(type == "err") {
cvparams$cverrs
} else {
cvparams$loglik
}
cvfuns = data.frame(
lambda = sapply(cvparams$opt.hyperparams, `[`, 1),
rho = sapply(cvparams$opt.hyperparams, `[`, 2),
cvmean = sapply(cvm, mean),
cvsd = sapply(cvm, sd)
)
lambda = sort(unique(cvfuns$lambda))
rho = sort(unique(cvfuns$rho))
cvmean = matrix(nrow = length(lambda), ncol = length(rho))
cvfuns$lambda = sapply(cvfuns$lambda, function(l) {
which(lambda == l)
})
cvfuns$rho = sapply(cvfuns$rho, function(r) {
which(rho == r)
})
apply(cvfuns, 1, function(x) {
cvmean[x[1], x[2]] <<- x[3]
})
require(plotly)
if(type == "err") {
surface = plot_ly(x = log10(lambda),
y = log10(rho),
z = log10(cvmean)) %>% add_surface()
} else {
surface = plot_ly(x = log10(lambda),
y = log10(rho),
z = cvmean) %>% add_surface()
}
list(
lambda = lambda,
rho = rho,
cvm = cvmean,
surf = surface
)
}
## ultility functions
## pick lambda
optimLambda_cv = function(cvparams, type = c("err", "loglik"), se = TRUE, fused.sparse = TRUE) {
cvm = if(type == "err") {
cvparams$cverrs
} else {
cvparams$loglik
}
cvfuns = data.frame(
lambda = sapply(cvparams$opt.hyperparams, `[`, 1),
rho = sapply(cvparams$opt.hyperparams, `[`, 2),
cvmean = sapply(cvm, mean),
cvsd = sapply(cvm, sd)
)
cv.min = which.min(cvfuns$cvmean)
cv.1se = cvfuns[cv.min, 3] + cvfuns[cv.min, 4]
cvfun.1se = cvfuns[cvfuns$cvmean <= cv.1se, c(1, 2, 3)]
lambda = cvfun.1se[, 1]
rho = cvfun.1se[, 2]
if(fused.sparse) {
rho.1se = max(rho)
lambda.1se = lambda[which.min(cvfun.1se[cvfun.1se$rho == rho.1se, 3])]
} else {
lambda.1se = max(lambda)
# rho.1se = min(rho[lambda == lambda.1se])
rho.1se = rho[which.min(cvfun.1se[cvfun.1se$lambda == lambda.1se, 3])]
}
if (se) {
list(lambda = lambda.1se, rho = rho.1se)
} else {
list(lambda = cvfuns[cv.min, 1], rho = cvfuns[cv.min, 2])
}
}
## optimLambda_eBIC
optimLambda_eBIC = function(eBICparams) {
eBICfuns = data.frame(
lambda = sapply(eBICparams$opt.hyperparams, `[`, 1),
rho = sapply(eBICparams$opt.hyperparams, `[`, 2),
eBIC = eBICparams$eBIC
)
eBIC.min = which.min(eBICfuns$eBIC)
lambda.min = eBICfuns[eBIC.min, 1]
rho.min = eBICfuns[eBIC.min, 2]
list(lambda = lambda.min, rho = rho.min)
}
## optimGamma_eBIC
optimGamma_eBIC = function(BICparams) {
optim = vector("list", 11)
i = 1
eBIC = vector("numeric", 11)
for (gamma in seq(0, 1, by = 0.1)) {
eBICs = BICparams$BIC + gamma * BICparams$extend
eBICparams = list(opt.hyperparams = BICparams$opt.hyperparams,
eBIC = eBICs)
optim[[i]] = optimLambda_eBIC(eBICparams)
eBIC[i] = min(eBICs)
i = i + 1
}
list(
gamma = seq(0, 1, by = 0.1),
optimLambda = optim,
eBIC = eBIC
)
}
#' extended BIC
#' @param gamma [0,1]
eBIC_multSML = function(Bs,
fs,
Ys,
Xs,
sigma2,
Ng,
Nk,
nlambda = 20,
nrho = 20,
verbose = 1) {
lambda.max = get_lambda.max(Bs, Ys, Xs, Ng)
lambda.factors = 10 ^ seq(0, -4, length.out = nlambda) * lambda.max
wBs = inverse(Bs)
rB = flinv(Bs)
rho.max = get_rho.max(Bs, fs, Ys, Xs, sigma2, Ng)
rho.factors = 10 ^ seq(0, -4, length.out = nrho) * rho.max
ncv = 5
Ns = sapply(Ys, ncol)
K = length(Ys)
hyper.params = vector("list", nrho * nlambda)
params.prev = NULL
BIC = vector("numeric", nrho * nlambda)
extend = vector("numeric", nrho * nlambda)
ix = 1
for (rho in rho.factors) {
for (lambda in lambda.factors) {
cat(sprintf("lambda = %f, rho = %f\n", lambda, rho))
if (ix %% nlambda == 1) {
params.opt = multiSML_iPALM(
Bs,
fs,
Ys,
Xs,
sigma2,
Ng,
lambda,
rho,
wBs,
rB,
maxit = 1000,
threshold = 1e-4,
acc = TRUE,
sparse = FALSE,
verbose = verbose
)
} else {
params.opt = multiSML_iPALM(
params.prev$B,
params.prev$f,
Ys,
Xs,
params.prev$sigma2,
Ng,
lambda,
rho,
wBs,
rB,
maxit = 1000,
threshold = 1e-4,
acc = TRUE,
sparse = FALSE,
verbose = verbose
)
}
logLik = SML_logLik(
Xs,
Ys,
params.opt$B,
params.opt$f,
Ng,
Ns,
K,
params.opt$detIB,
params.opt$sigma2
)[1]
df = sum(params.opt$B[[1]] != 0) +
sum(params.opt$B[[2]] != 0) -
sum(params.opt$B[[2]] == params.opt$B[[1]] &
params.opt$B[[1]] != 0) + 2 * Nk
BIC[ix] = 2 * logLik + df * log(sum(Ns))
extend[ix] = 2 * log(choose(2 * (Ng ^ 2 - Ng + Nk), df))
hyper.params[[ix]] = c(lambda, rho)
params.prev = params.opt
ix = ix + 1
}
}
list(opt.hyperparams = hyper.params,
BIC = BIC,
extend = extend)
}
## ultility
## ultility funciton
eBICsurface = function(eBICparams, gamma = 0) {
ebicfuns = data.frame(
lambda = sapply(eBICparams$opt.hyperparams, `[`, 1),
rho = sapply(eBICparams$opt.hyperparams, `[`, 2),
eBIC = eBICparams$BIC + gamma * eBICparams$extend
)
lambda = sort(unique(ebicfuns$lambda))
rho = sort(unique(ebicfuns$rho))
eBIC = matrix(nrow = length(lambda), ncol = length(rho))
ebicfuns$lambda = sapply(ebicfuns$lambda, function(l) {
which(lambda == l)
})
ebicfuns$rho = sapply(ebicfuns$rho, function(r) {
which(rho == r)
})
apply(ebicfuns, 1, function(x) {
eBIC[x[1], x[2]] <<- x[3]
})
require(plotly)
surface = plot_ly(x = log(lambda),
y = log(rho),
z = log(eBIC)) %>% add_surface()
list(
lambda = lambda,
rho = rho,
ebics = eBIC,
surf = surface
)
}
## eQTL and gene regulatory network are all different with each other under
## different conditions
#' @description getdiffeQTL
#' @param N number of sample
#' @param Ng number of gene
#' @param k number of eQTL
#' @param d differential ratio = 0.1
getdiffeQTL = function(N, Ng, Nk, d = 0.1) {
step = Nk / Ng
X = vector("list", 2)
X[[1]] = round(2 * matrix(runif(Nk * N), nrow = Nk)) + 1
X[[2]] = apply(X[[1]], 2, function(x) {
Nd = Nk * d
dx = sample(1:Nk, Nd, replace = F)
x[dx] = round(2 * runif(Nd)) + 1
x
})
G = matrix(0,
nrow = Ng,
ncol = Nk)
ix = lapply(1:Ng,
function(i) {
s = seq(0, step - 1) * Ng + i
G[i, s] <<- 1
s
})
list(G = G, X = X, sk = ix)
}
#' @description getdiffsem
#' @details eQTL measurement for different condition are generated with proportional difference.
#' @param f difference proportion of each gene's eQTL measurement, such as SNP
getdiffsem = function(N = 200,
Ng = 10,
Nk = 10,
r = 0.3,
d = 0.1,
f = 0.1,
dag = TRUE,
sigma = 0.1) {
B = getrandDAG(Ng, e = Ng * r, dag = dag, d = d)
Q = getdiffeQTL(N, Ng, Nk, f)
F = Q[[1]]
X = Q[[2]]
sk = Q[[3]]
E1 = sigma * t(rmvnorm(N, mean = rep(0, Ng), sigma = diag(Ng)))
E2 = sigma * t(rmvnorm(N, mean = rep(0, Ng), sigma = diag(Ng)))
Y1 = solve(diag(Ng) - B[[1]]) %*% (F %*% X[[1]] + E1)
Y2 = solve(diag(Ng) - B[[2]]) %*% (F %*% X[[2]] + E2)
list(
obs = list(
Y1 = Y1,
Y2 = Y2,
X1 = X[[1]],
X2 = X[[2]],
sk = sk
),
var = list(
B1 = Matrix(B[[1]], sparse = T),
B2 = Matrix(B[[2]], sparse = T),
F = Matrix(F, sparse = T),
N = N,
Ng = Ng,
Nk = Nk
)
)
}
## data = getdiffsem(N = 200, Ng = 30, Nk = 90, r = 0.1, d = 0.1, f = 0.1, sigma = 1, dag = TRUE)
#' @description build sumbatrix for eQTLs observation for subset of corresponding genes
submatXs = function(data) {
sk = data$obs$sk
Xs = list(X1 = data$obs$X1, X2 = data$obs$X2)
lapply(Xs, function(X) {
lapply(sk, function(ix) {
X[ix, , drop = F]
})
})
}
## centralized for multiple Ys and Xs
#' @description generalized centralization of Ys and Xs
#' Xs -> n x sk
#' Ys -> n x ng
#' @example
#' Ys = data$obs[c("Y1", "Y2")]
#' Xs = submatXs(data)
centralize_gen = function(Xs, Ys) {
meanX = lapply(Xs, function(X) {
lapply(X, rowMeans)
})
meanY = lapply(Ys, rowMeans)
Xs = lapply(Xs, function(X) {
lapply(X, center)
})
Ys = lapply(Ys, center)
list(X = Xs,
Y = Ys,
muX = meanX,
muY = meanY)
}
## ridge regression for estimate sigma2 initialization
## on different gene expressionand different eQTLs
#' @param M number of gene
#' @param N number of sample
#' @example
#' M = data$var$Ng
#' N = data$var$N
#' params.init = constrained_L2reg_gen(Xs, Ys, sigma2$rho.opt, M, N)
constrained_L2reg_gen = function(Xs, Ys, rho, M, N) {
K = length(Ys)
B = list()
F = list()
mu = list()
err = 0
df = 0
for (i in 1:K) {
fit = constrained_L2reg(Xs[[i]], Ys[[i]], rho)
B[[i]] = as.matrix(fit$B)
F[[i]] = fit$F
mu[[i]] = fit$mu
err = err + fit$sigma2 * (N[i] * M - 1)
df = df + (N[i] * M - 1)
}
sigma2 = err / df
list(
B = B,
F = F,
sigma2 = sigma2,
mu = mu
)
}
## generalized cross-validation on ridge regression to estimate sigma2
## on different gene expressionand different eQTLs
#' @param nrho number of L2 penalty's coefficient
#' @param ncv number of cross-validation
#' @example
#' Ys = data$obs[c("Y1", "Y2")]
#' Xs = submatXs(data)
#' M = data$var$Ng
#' N = data$var$N
#' sigma2 = getsigma2_L2reg_gen(Xs, Ys, nrho = 20, M = M, N = N)
getsigma2_L2reg_gen = function(Xs,
Ys,
nrho = 10,
ncv = 5,
M,
N) {
rho_factors = 10 ** (seq(-6, 2, length.out = nrho))
cv.err = matrix(0, nrow = nrho, ncol = ncv)
cv.fold = list()
cv.fold[[1]] = sample(seq(1, ncv), size = N[1], replace = T)
cv.fold[[2]] = sample(seq(1, ncv), size = N[2], replace = T)
irho = 1
for (rho in rho_factors) {
for (cv in 1:ncv) {
ytrain = lapply(1:2, function(ix) {
Ys[[ix]][, cv.fold[[ix]] != cv, drop = F]
})
xtrain = lapply(1:2, function(ix) {
lapply(Xs[[ix]], function(x) {
x[, cv.fold[[ix]] != cv, drop = F]
})
})
ytest = lapply(1:2, function(ix) {
Ys[[ix]][, cv.fold[[ix]] == cv, drop = F]
})
xtest = lapply(1:2, function(ix) {
lapply(Xs[[ix]], function(x) {
x[, cv.fold[[ix]] == cv, drop = F]
})
})
Ntrain = sapply(cv.fold, function(f){ sum(f != cv) })
fit = constrained_L2reg_gen(xtrain, ytrain, rho, M, Ntrain)
for (k in 1:length(Ys)) {
ftest = lapply(1:M, function(i) {
crossprod(fit$F[[k]][[i]], xtest[[k]][[i]])
})
ftest = do.call(rbind, ftest)
cv.err[irho, cv] = cv.err[irho, cv] + norm((diag(M) - fit$B[[k]]) %*% ytest[[k]] - ftest - fit$mu[[k]], type = "f")
}
}
irho = irho + 1
}
cv.mean = rowMeans(cv.err)
rho.min = rho_factors[which.min(cv.mean)]
fit = constrained_L2reg_gen(Xs, Ys, rho.min, M, N)
list(
rho.opt = rho.min,
sigma2.opt = fit$sigma2[1],
cv.ram = list(rho = rho_factors, cvm = cv.mean)
)
}
## solve SML problem by component-wise update with generalized configuration
## different gene expression and different eQTLs
#' @param lambda lasso penalty
#' @param rho fused lasso penalty
#' @example
#' M = data$var$Ng
#' N = data$var$N
#' Ys = data$obs[c("Y1", "Y2")]
#' Xs = submatX(data)
#' sigma2 = getsigma2_L2reg_multi(Xs, Ys, nrho = 20, M = M, N = N)
#' params.init = constrained_L2reg_multi(Xs, Ys, sigma2$rho.opt, M, N)
#' params.opt = genSML_cwise(Bs = params.opt4$B, fs = params.opt4$f, Ys = Ys, Xs = Xs,
#' sigma2 = params.init$sigma2[1], Ng = data$var$Ng,
#' wBs = inverse(params.init$B), rB = flinv(params.init$B),
#' lambda = 10, rho = 40, maxit = 1000)
genSML_cwise = function(Bs,
fs,
Ys,
Xs,
sigma2,
Ng,
lambda,
rho,
weighted = TRUE,
wBs = inverse(Bs),
rB = flinv(Bs),
maxit = 100,
threshold = 1e-4,
verbose = 2) {
std = centralize_gen(Xs, Ys)
Xs = std$X
Ys = std$Y
meanXs = std$muX
meanYs = std$muY
K = length(Ys) # number of conditions = 2
if (verbose == 2) {
cat(sprintf("conditions must be restricted to 2.. K = %d\n", K))
}
## update for eQTL coeffs
f0 = vector("list", K)
f1 = vector("list", K)
for (i in 1:Ng) {
for (k in 1:K) {
Xi = Xs[[k]][[i]]
Pi = solve(crossprod(Xi)) %*% t(Xi)
yi_k = Ys[[k]][, i, drop = F] # n x 1 for gene i
f0[[k]][[i]] = Pi %*% yi_k
f1[[k]][[i]] = Pi %*% Ys[[k]] # f = f0 - f1 %*% B[i,]
}
}
## update for gnet coeffs
niter = 1
Ns = sapply(Ys, nrow)
ImBs = lapply(Bs, function(B) {
diag(Ng) - B
})
IBsinv = lapply(ImBs, solve)
while (niter <= maxit) {
Bs.prev = Bs
fs.prev = fs
for (i in 1:Ng) {
ci = lapply(IBsinv, function(IBi) {
IBi[, i]
})
dbi = lapply(1:K, function(k) {
vector("numeric", Ng)
})
for (j in 1:Ng) {
## update B[[k]][i,j] for i != j
if (i != j) {
bij.prev = bij = sapply(Bs, function(B)
(B[i, j]))
wij = sapply(wBs, function(w) {
if (weighted) {
w[i, j]
} else {
1
}
})
rij = if (weighted) {
rB[i, j]
} else {
1
}
mij = sapply(ci, function(c) {
c[j]
})
bi = lapply(ImBs, function(ImB) {
bi_k = ImB[i,]
bi_k[j] = 0
bi_k
})
## j-th column of Ys
Yej = lapply(Ys, function(Y) {
Y[, j, drop = F]
})
a1 = sapply(1:K, function(k) {
crossprod(Ys[[k]] %*% bi[[k]] - Xs[[k]][[i]] %*% fs[[k]][[i]], Yej[[k]])
})
a2 = sapply(1:K, function(k) {
crossprod(Yej[[k]])
})
## a0 = 1/mij + bij.prev
cond = list(
c(1, 1, 1),
c(1, -1, 1),
c(1, 0, 1),
c(-1, -1, 1),
c(0, -1, 1),
c(1, 1, -1),
c(0, 1, -1),
c(-1, -1, -1),
c(-1, 0, -1),
c(-1, 1, -1),
c(1, 1, 0),
c(-1, -1, 0),
c(0, 0, 0)
)
obj = obj_multiSML(Ns, mij, bij.prev, a1, a2, lambda, rho, wij, rij, sigma2)
grad = grad_multiSML(Ns, mij, bij.prev, a1, a2, lambda, rho, wij, rij, sigma2)
params = list()
for (t in cond) {
cand.grad = grad(t)
params = c(params, grad_solver(cand.grad, t))
}
objval = sapply(params, function(args) {
do.call(obj, args)
})
mix = which.min(objval)
bij = unlist(params[[mix]])
dbij = bij.prev - bij
for (k in 1:K) {
dbi[[k]][j] = dbij[k]
Bs[[k]][i, j] = bij[k]
ci[[k]] = ci[[k]] / (1 + dbij[k] * mij[k])
ImBs[[k]] = diag(Ng) - Bs[[k]]
}
}
} ## for(j in 1:Ng)
## (ImB + ei^T %*% dbi)^{-1}
for (k in 1:K) {
## IBsinv[[k]] = IBsinv[[k]] - IBsinv[[k]][, i, drop = F] %*% dbi[[k]] %*% IBsinv[[k]] / (1 + dbi[[k]] %*% IBsinv[[k]][, i, drop = F])[1]
IBsinv[[k]] = solve(ImBs[[k]])
fs[[k]][[i]] = f0[[k]][[i]] - f1[[k]][[i]] %*% Bs[[k]][i, ]
}
} ## for(i in 1:Ng)
Berr = sum(sapply(1:K, function(k) {
norm(Bs[[k]] - Bs.prev[[k]], type = "f") / norm(Bs.prev[[k]], type = "f")
}))
Ferr = sum(sapply(1:K, function(k) {
sum(sapply(1:Ng, function(i) {
norm(fs[[k]][[i]] - fs.prev[[k]][[i]], type = "f")
})) / sum(sapply(1:Ng, function(i) {
norm(fs.prev[[k]][[i]], type = "f")
}))
}))
err = Berr + Ferr
cat(sprintf("SML: iteration = %d, error = %f\n", niter, err))
niter = niter + 1
if (err < threshold || niter > maxit || is.nan(err)) {
mu = lapply(1:K, function(k) {
(diag(Ng) - Bs[[k]]) %*% meanYs[[k]] - sapply(1:Ng, function(i) {
meanXs[[k]][[i]] %*% fs[[k]][[i]]
})
})
Bs = lapply(Bs, Matrix, sparse = T)
break
}
} ## while(niter <= maxit)
list(
B = Bs,
f = fs,
mu = mu,
niter = niter,
err = err
)
}
sigma2_gen = function(Xs, Ys, B, f, Ng, Ns, K) {
X = lapply(Xs, function(X) {
lapply(X, t)
})
Y = lapply(Ys, t)
err = 0
for (k in 1:K) {
for (i in 1:Ng) {
Xi = X[[k]][[i]] # sk x N
bi = B[[k]][i,-i, drop = F] # (ng-1) x 1
yi = Y[[k]][i, , drop = F] # 1 x N
Yi = Y[[k]][-i, , drop = F] # (ng-1) x N
fi = f[[k]][[i]] # sk x 1
err = err + tcrossprod(yi - bi %*% Yi - crossprod(fi, Xi))
}
}
sigma2 = err / (Ng * sum(Ns))
sigma2[1]
}
## solve SML problem by block coordinate descent by backtracking inert-PALM on
## different gene expression and different eQTLs
#' @param lambda lambda hyper-parameter for lasso term
#' @param rho fused lasso hyper-parameter for fused lasso term
#' @param gamma invertible matrix stablize parameter gamma
#' params.init = constrained_L2reg_gen(Xs, Ys, sigma2$rho.opt, M, N)
#' params.opt = genSML_iPALM(Bs = params.init$B, fs = params.init$F, Ys = Ys, Xs = Xs,
#' sigma2 = params.init$sigma2[1], Ng = data$var$Ng,
#' lambda = 0.1, rho = 0.1, maxit = 500)
genSML_iPALM = function(Bs,
fs,
Ys,
Xs,
sigma2,
Ng,
lambda,
rho,
wBs = inverse(Bs),
rB = flinv(Bs),
maxit = 100,
acc = TRUE,
inertial = inertial_pars("lin"),
threshold = 1e-3,
sparse = FALSE,
use.strict = TRUE,
verbose = 2) {
std = centralize_gen(Xs, Ys)
Xs = std$X
Ys = std$Y
meanXs = std$muX
meanYs = std$muY
K = length(Ys) # number of conditions = 2
if (verbose == 2) {
cat(sprintf("conditions must be restricted to 2.. K = %d\n", K))
}
## update for eQTL row-wise (specific for genes)
f0 = vector("list", K)
f1 = vector("list", K)
Yp = vector("list", K)
Hy = vector("list", K)
Yp.maxEigen = vector("list", K)
Ns = sapply(Ys, nrow)
for (i in 1:Ng) {
for (k in 1:K) {
Xi = Xs[[k]][[i]]
Pi = solve(crossprod(Xi)) %*% t(Xi)
# specific condition
yi_k = Ys[[k]][, i, drop = F] # n[k] x 1 for gene i
Yi_k = Ys[[k]][,-i] # n[k] x (ng-1) (for specific gene i)
f0[[k]][[i]] = Pi %*% yi_k
f1[[k]][[i]] = Pi %*% Yi_k # f[[k]][[i]] = f0[[k]][[i]] - f1[[k]][[i]] %*% bi^k | bi^k = B[[k]][i,-i]
Hi_k = diag(Ns[k]) - Xi %*% Pi # n[k] x n[k] projection matrix
Yp[[k]][[i]] = t(Yi_k) %*% Hi_k %*% Yi_k # (ng - 1) x (ng - 1)
Hy[[k]][[i]] = t(yi_k) %*% Hi_k %*% Yi_k # 1 x (ng - 1)
## maximized eigen-value for Yp
Yp.maxEigen[[k]][[i]] = eigen(Yp[[k]][[i]])$values[1]
}
}
## update for gnet row-wise
niter = 1
ImBs = lapply(Bs, function(B) {
diag(Ng) - B
})
detIBs = sapply(ImBs, det)
IBsinv = lapply(ImBs, solve)
Ls = logLik(detIBs, Bs, wBs, rB, lambda, rho, Ns, K) + N * sum(Ns) / 2 * log(sigma2)
## history
Bs.prevs = list(Bs, Bs)
inert = acc
while (niter <= maxit) {
inert.pars = inertial(niter)
Bs.inert = if (inert) {
lapply(1:K, function(k) {
Bs.prevs[[2]][[k]] + inert.pars * (Bs.prevs[[2]][[k]] - Bs.prevs[[1]][[k]])
})
} else {
Bs
}
fs.prev = fs
Ls.prev = Ls
for (i in 1:Ng) {
## sum(-Ns[k] * sigma2 * log(det(I-B[[k]])^2)) + \sum_{i=1}^{Ng} bi^T %*% Yp %*% bi - Hy %*% bi)
ci = lapply(IBsinv, function(IBi) {
# ci / det(I - B); from (I - B)^{-1}
IBi[-i, i, drop = F]
})
bi = lapply(Bs.inert, function(B.inert) {
t(B.inert[i,-i, drop = F])
})
gi = lapply(1:K, function(k) {
grad = grad_rwise_SML(Ns[k], ci[[k]], Yp[[k]][[i]], Hy[[k]][[i]], sigma2[1])
grad(bi[[k]])
})
## Lipschitz moduli for row-i
Lis = sapply(1:K, function(k) {
gtg = tcrossprod(ImBs[[k]][-i, ])
oi = chol2inv(gtg)
deti = det(gtg)
gii = ImBs[[k]][-i,-i]
si = ImBs[[k]][-i, i, drop = F]
c2i = sum((ci[[k]] * detIBs[k]) ^ 2)
lips_rwise_SML(Ns[k],
oi,
gii,
si,
c2i,
deti,
Yp.maxEigen[[k]][[i]],
sigma2,
Ng)[1]
})
Li = max(Lis)
Li = (1 + 2 * inert.pars) * Li / (2 * (1 - inert.pars))
ui = lapply(1:K, function(k) {
bi[[k]] - gi[[k]] / Li
})
wBi = lapply(wBs, function(wB) {
wB[i,-i]
})
rBi = rB[i,-i]
xi = prox_flsa(lambda, rho, Li, ui, wBi, rBi)
for (k in 1:K) {
Bs[[k]][i,-i] = xi[[k]]
ImBs[[k]] = diag(Ng) - Bs[[k]]
detIBs[k] = (ImBs[[k]][i, ] %*% IBsinv[[k]][, i, drop = F])[1] * detIBs[k]
dbi = Bs.prevs[[2]][[k]][i, ] - Bs[[k]][i, ]
IBsinv[[k]] = IBsinv[[k]] - IBsinv[[k]][, i, drop = F] %*% dbi %*% IBsinv[[k]] / (1 + dbi %*% IBsinv[[k]][, i, drop = F])[1]
fs[[k]][[i]] = f0[[k]][[i]] - f1[[k]][[i]] %*% Bs[[k]][i,-i]
}
} # row-wise update
Berr = sum(sapply(1:K, function(k) {
norm(Bs[[k]] - Bs.prevs[[2]][[k]], type = "f") / (1 + norm(Bs.prevs[[2]][[k]], type = "f"))
}))
Ferr = sum(sapply(1:K, function(k) {
sum(sapply(1:Ng, function(i) {
norm(fs[[k]][[i]] - fs.prev[[k]][[i]], type = "f")
})) / (1 + sum(sapply(1:Ng, function(i) {
norm(fs.prev[[k]][[i]], type = "f")
})))
}))
err = Berr + Ferr
sigma2 = sigma2_gen(Xs, Ys, Bs, fs, Ng, Ns, K)
Ls = logLik(detIBs, Bs, wBs, rB, lambda, rho, Ns, K) + Ng * sum(Ns) / 2 * log(sigma2)
Lerr = abs(Ls.prev - Ls) / (1 + abs(Ls.prev))
# inert = ifelse(Lerr < 1e-8, FALSE, acc)
if (verbose >= 2) {
cat(
sprintf(
"SML: iteration = %d, error = %f, logLik = %f, sigma2 = %f, inert = %s\n",
niter,
err,
Ls,
sigma2,
inert
)
)
}
niter = niter + 1
Bs.prevs = list(Bs.prevs[[2]], Bs)
opt.cond = if (use.strict) {
(err < threshold && Lerr < threshold)
} else {
(err < threshold || Lerr < threshold)
}
if (opt.cond || niter > maxit || is.nan(err)) {
mu = lapply(1:K, function(k) {
(diag(Ng) - Bs[[k]]) %*% meanYs[[k]] - sapply(1:Ng, function(i) {
meanXs[[k]][[i]] %*% fs[[k]][[i]]
})
})
sigma2 = sigma2_gen(Xs, Ys, Bs, fs, Ng, Ns, K)
if (sparse) {
Bs = lapply(Bs, Matrix, sparse = T)
}
break
}
} # while(niter <= maxit)
list(
B = Bs,
f = fs,
mu = mu,
sigma2 = sigma2,
niter = niter,
err = err,
detIB = detIBs
)
}
######################
# comparable method
######################
## cross validation for hyper-parameter tuning
#' @description 5-fold cross-validation
#' @param dyn dynamic updated rho.max by given lambda
#' @example
#' cv.params = cv_multiSML(Bs = params.init$B, fs = params.init$F, Ys = Ys, Xs = Xs, sigma2 = params.init$sigma2[1], Ng = data$var$Ng, nlambda = 20, nrho = 20)
cv_genSML = function(Bs,
fs,
Ys,
Xs,
sigma2,
Ng,
nlambda = 20,
nrho = 20,
weighted = TRUE,
threshold = 1e-4,
use.strict = FALSE,
verbose = 1) {
lambda.max = gen_lambda.max(Bs, Ys, Xs, Ng, weighted)
lambda.factors = 10 ^ seq(0,-5, length.out = nlambda) * lambda.max
if(weighted) {
wBs = inverse(Bs)
rB = flinv(Bs)
} else{
wBs = invone(Bs)
rB = flone(Bs)
}
rho.max = gen_rho.max(Bs, fs, Ys, Xs, sigma2, Ng, weighted)
rho.factors = 10 ^ seq(0,-5, length.out = nrho) * rho.max
ncv = 5
Ns = sapply(Ys, ncol)
K = length(Ys)
Ytrain = vector("list", ncv)
Xtrain = vector("list", ncv)
Ytest = vector("list", ncv)
Xtest = vector("list", ncv)
cv.fold = list()
cv.fold[[1]] = sample(seq(1, ncv), size = Ns[1], replace = T)
cv.fold[[2]] = sample(seq(1, ncv), size = Ns[2], replace = T)
for (i in 1:ncv) {
Ytrain[[i]] = lapply(1:2, function(ix) {
Ys[[ix]][, cv.fold[[ix]] != i, drop = F]
})
Xtrain[[i]] = lapply(1:2, function(ix) {
lapply(Xs[[ix]], function(x) {
x[, cv.fold[[ix]] != i, drop = F]
})
})
Ytest[[i]] = lapply(1:2, function(ix) {
Ys[[ix]][, cv.fold[[ix]] == i, drop = F]
})
Xtest[[i]] = lapply(1:2, function(ix) {
lapply(Xs[[ix]], function(x) {
x[, cv.fold[[ix]] == i, drop = F]
})
})
}
cverrs = vector("list", nrho * nlambda)
cvlls = vector("list", nrho * nlambda)
hyper.params = list()
for (cv in 1:ncv) {
params.opt = list()
Nc = sapply(Ytest[[cv]], ncol)
ix = 1
for (rho in rho.factors) {
for (lambda in lambda.factors) {
cat(sprintf("lambda = %4f, rho = %4f, foldid = %d\n", lambda, rho, cv))
if (ix %% nlambda == 1) {
params.opt[[ix]] = genSML_iPALM(
Bs,
fs,
Ytrain[[cv]],
Xtrain[[cv]],
sigma2,
Ng,
lambda,
rho,
wBs,
rB,
maxit = 1000,
threshold = threshold,
acc = TRUE,
sparse = FALSE,
use.strict = use.strict,
verbose = verbose
)
} else {
params.opt[[ix]] = genSML_iPALM(
params.opt[[ix - 1]]$B,
params.opt[[ix - 1]]$f,
Ytrain[[cv]],
Xtrain[[cv]],
params.opt[[ix - 1]]$sigma2,
Ng,
lambda,
rho,
wBs,
rB,
maxit = 1000,
threshold = threshold,
acc = TRUE,
sparse = FALSE,
use.strict = use.strict,
verbose = verbose
)
}
loglik = gen_logLik(
Xtest[[cv]],
Ytest[[cv]],
params.opt[[ix]]$B,
params.opt[[ix]]$f,
Ng,
Nc,
K,
params.opt[[ix]]$detIB,
params.opt[[ix]]$sigma2
)[1]
err = gen_error(Xtest[[cv]],
Ytest[[cv]],
params.opt[[ix]]$B,
params.opt[[ix]]$f,
Ng,
Nc,
K)[1]
cverrs[[ix]] = c(cverrs[[ix]], err)
cvlls[[ix]] = c(cvlls[[ix]], loglik)
if (cv == 1) {
hyper.params[[ix]] = c(lambda, rho)
}
ix = ix + 1
}
}
}
list(opt.hyperparams = hyper.params,
cverrs = cverrs,
loglik = rbind, cvlls)
}
gen_lambda.max = function(Bs, Ys, Xs, Ng, weighted = TRUE) {
std = centralize_gen(Xs, Ys)
Xs = std$X ## N x sk
Ys = std$Y ## N x p
K = length(Ys)
Ns = sapply(Ys, nrow)
R = vector("list", K) ## Ng
w = if(weighted) {
inverse(Bs)
} else {
invone(Bs)
}
for (k in 1:K) {
R[[k]] = matrix(0, nrow = Ng, ncol = Ns[k]) ## Ng x N
}
for (i in 1:Ng) {
for (k in 1:K) {
Xi = Xs[[k]][[i]]
Pi = solve(crossprod(Xi)) %*% t(Xi)
yi = Ys[[k]][, i, drop = F] # n x 1
fi = solve(crossprod(Xi)) %*% t(Xi) %*% yi
Xf = Xi %*% fi # n x 1
R[[k]][i,] = yi - Xf
}
}
err = 0
for (k in 1:K) {
err = err + norm(R[[k]], type = "f") ^ 2
}
sigma2 = err / (Ng * sum(Ns))
Ry = vector("list", K) ## Ng
for (k in 1:K) {
Ry[[k]] = R[[k]] %*% Ys[[k]]
Ry[[k]] = abs(Ry[[k]] / sigma2 - Ns[[k]]) / w[[k]]
}
max(sapply(Ry, max))
}
## cross-validation and EBIC for hyper-parameter tuning
## rho max can be estimated, rho is the fused lasso
## regularized hyper parameter
#' @description get_rho.max
#' @example
#' rhomax = get_rho.max(params.init$B, params.init$F, Ys, Xs, params.init$sigma2[1], data$var$Ng)
gen_rho.max = function(Bs, fs, Ys, Xs, sigma2, Ng, weighted = TRUE) {
if(weighted) {
wBs = inverse(Bs)
rB = flinv(Bs)
} else {
wBs = invone(Bs)
rB = flone(Bs)
}
params.rho = genSML_iPALM(
Bs,
fs,
Ys,
Xs,
sigma2,
Ng,
lambda = 0,
rho = Inf,
wBs = wBs,
rB = rB,
maxit = 2000,
threshold = 1e-4,
use.strict = F,
sparse = T,
verbose = 1
)
weight.rho = flinv(Bs)
Bs = params.rho$B[[1]]
fs = params.rho$f
sigma2 = params.rho$sigma2
std = centralize_gen(Xs, Ys)
Xs = std$X ## Ng (n x sk)
Ys = std$Y ## n x ng
## multiple
K = length(Ys)
Ns = sapply(Ys, nrow)
Bc = vector("list", K)
YY = vector("list", K)
FX = vector("list", K)
Dx = vector("list", K)
for (k in 1:K) {
Bc[[k]] = -Ns[k] * t(solve(diag(Ng) - Bs))
YY[[k]] = crossprod(Ys[[k]]) ## Y %*% t(Y)
FX[[k]] = matrix(0, nrow = Ng, ncol = Ns[k]) ## F %*% X (p x k x k x n = p x n)
for (i in 1:Ng) {
FX[[k]][i, ] = as.numeric(Xs[[k]][[i]] %*% fs[[k]][[i]])
}
Dx[[k]] = abs(Bc[[k]] + ((diag(Ng) - Bs) %*% YY[[k]] - FX[[k]] %*% Ys[[k]]) / sigma2 / weight.rho)
diag(Dx[[k]]) = -Inf
}
## Dxy = abs((diag(Ng) - Bs) %*% (YY[[2]] - YY[[1]]) - (FX[[2]] %*% Ys[[2]] - FX[[1]] %*% Ys[[1]])) / sigma2 / 2 / weight.rho
## diag(Dxy) = -Inf
max(c(max(Dx[[1]]), max(Dx[[2]])))
## max(Dxy)
}
gen_logLik = function(Xs, Ys, Bs, fs, Ng, Ns, K, detIBs, sigma2) {
std = centralize_gen(Xs, Ys)
X = lapply(1:K, function(k) {
lapply(std$X[[k]], t)
})
Y = lapply(std$Y, t)
Ls = 0
err = 0
for (k in 1:K) {
Ls = Ls - Ns[k] / 2 * log(detIBs[k] ^ 2)
for (i in 1:Ng) {
Xi = X[[k]][[i]] # sk x N
bi = Bs[[k]][i, -i, drop = F] # (ng-1) x 1
yi = Y[[k]][i, , drop = F] # 1 x N
Yi = Y[[k]][-i, , drop = F] # (ng-1) x N
fi = fs[[k]][[i]] # sk x 1
err = err + tcrossprod(yi - bi %*% Yi - crossprod(fi, Xi))
}
}
Ls + err / (2 * sigma2) + Ng * sum(Ns) / 2 * log(sigma2)
}
gen_error = function(Xs, Ys, Bs, fs, Ng, Ns, K) {
std = centralize_gen(Xs, Ys)
X = lapply(1:K, function(k) {
lapply(std$X[[k]], t)
})
Y = lapply(std$Y, t)
err = 0
for (k in 1:K) {
for (i in 1:Ng) {
Xi = X[[k]][[i]] # sk x N
bi = Bs[[k]][i, -i, drop = F] # (ng-1) x 1
yi = Y[[k]][i, , drop = F] # 1 x N
Yi = Y[[k]][-i, , drop = F] # (ng-1) x N
fi = fs[[k]][[i]] # sk x 1
err = err + tcrossprod(yi - bi %*% Yi - crossprod(fi, Xi))
}
}
err
}
## single SML for fused lasso method
#' @description SML method for single problem and fused lasso
cv_SMLasso = function(Bs,
fs,
Ys,
Xs,
sigma2,
Ng,
nlambda = 20,
threshold = 1e-4) {
lambda.max = get_lambda.max(Bs, Ys, Xs, Ng)
lambda.factors = 10 ^ seq(0,-4, length.out = nlambda) * lambda.max
wBs = inverse(Bs)
ncv = 5
Ns = sapply(Ys, ncol)
K = length(Ys)
Ytrain = vector("list", ncv)
Xtrain = vector("list", ncv)
Ytest = vector("list", ncv)
Xtest = vector("list", ncv)
cv.fold = sample(seq(1, ncv), size = Ns[1], replace = T)
for (i in 1:ncv) {
Ytrain[[i]] = lapply(Ys, function(y) {
y[, cv.fold != i, drop = F]
})
Xtrain[[i]] = lapply(Xs, function(x) {
x[, cv.fold != i, drop = F]
})
Ytest[[i]] = lapply(Ys, function(y) {
y[, cv.fold == i, drop = F]
})
Xtest[[i]] = lapply(Xs, function(x) {
x[, cv.fold == i, drop = F]
})
}
cverrs = vector("list", nlambda)
hyper.params = NULL
for (cv in 1:ncv) {
params.opt = list()
Nt = sapply(Ytrain[[cv]], ncol)
ix = 1
for (lambda in lambda.factors) {
cat(sprintf("lambda = %4f, foldid = %d\n", lambda, cv))
params.opt[[ix]] = vector("list", 2)
params.opt[[ix]][[1]] = sparse_maximum_likehood_iPALM(
B = Bs[[1]],
f = fs[[1]],
Y = Ytrain[[cv]][[1]],
X = Xtrain[[cv]],
sigma2 = sigma2[1],
N = Nt[1],
Ng = Ng,
lambda = lambda,
maxit = 50,
verbose = 1,
threshold = 1e-3
)
params.opt[[ix]][[2]] = sparse_maximum_likehood_iPALM(
B = Bs[[2]],
f = fs[[2]],
Y = Ytrain[[cv]][[2]],
X = Xtrain[[cv]],
sigma2 = sigma2[1],
N = Nt[2],
Ng = Ng,
lambda = lambda,
maxit = 50,
verbose = 1,
threshold = 1e-3
)
Nc = sapply(Ytest[[cv]], ncol)
err = SML_error(
Xtest[[cv]],
Ytest[[cv]],
list(params.opt[[ix]][[1]]$B, params.opt[[ix]][[2]]$B),
list(params.opt[[ix]][[1]]$f, params.opt[[ix]][[2]]$f),
Ng,
Nc,
K
)[1]
cverrs[[ix]] = c(cverrs[[ix]], err)
if (cv == 1) {
hyper.params = c(hyper.params, lambda)
}
ix = ix + 1
}
}
list(opt.hyperparams = hyper.params,
cverrs = do.call(rbind, cverrs))
}
optLasso_cv = function(cvparams, se = TRUE) {
cvfuns = data.frame(
lambda = cvparams$opt.hyperparams,
cvmean = apply(cvparams$cverrs, 1, mean),
cvsd = apply(cvparams$cverrs, 1, sd)
)
cv.min = which.min(cvfuns$cvmean)
cv.1se = cvfuns[cv.min, 2] + cvfuns[cv.min, 3]
cvfun.1se = cvfuns[cvfuns$cvmean <= cv.1se, c(1, 2, 3)]
lambda = cvfun.1se[, 1]
lambda.1se = max(lambda)
if (se) {
lambda.1se
} else {
cvfuns[cv.min, 1]
}
}
## stability selection
ssSML_iPALM = function(Bs,
fs,
Ys,
Xs,
sigma2,
Ng,
lambda,
rho,
wBs = inverse(Bs),
rB = flinv(Bs),
maxit = 100,
acc = TRUE,
inertial = inertial_pars("lin"),
threshold = 1e-3,
sparse = FALSE,
use.strict = TRUE,
Nbootstrap = 100,
Nsample = 0.75,
verbose = 2) {
N = ncol(Ys[[1]])
ss.fold = lapply(1:Nbootstrap,
function(n) {
sample(seq(1, N), ceiling(N * Nsample), replace = F)
})
ss.fit = list()
N.ss = vector("list", 2)
D.ss = NULL
for (i in 1:Nbootstrap) {
Yss = lapply(Ys, function(Y){Y[, ss.fold[[i]]]})
Xss = list()
for(k in 1:length(Xs)) {
Xss[[k]] = lapply(Xs[[k]], function(X){X[, ss.fold[[k]], drop = F]})
}
ss.fit[[i]] = genSML_iPALM(
Bs = params.init$B,
fs = params.init$F,
Ys = Yss,
Xs = Xss,
sigma2 = params.init$sigma2[1],
Ng = data$var$Ng,
lambda = cvlambda.opt$lambda,
rho = cvlambda.opt$rho,
maxit = maxit,
threshold = threshold,
use.strict = use.strict,
acc = acc,
sparse = sparse
)
err2abs = ss.fit[[i]]$err
if(is.null(N.ss[[1]])) {
N.ss[[1]] = ifelse(abs(ss.fit[[i]]$B[[1]]) > err2abs, 1, 0)
} else {
N.ss[[1]] = N.ss[[1]] + ifelse(abs(ss.fit[[i]]$B[[1]]) > err2abs, 1, 0)
}
if(is.null(N.ss[[2]])) {
N.ss[[2]] = ifelse(abs(ss.fit[[i]]$B[[2]]) > err2abs, 1, 0)
} else {
N.ss[[2]] = N.ss[[2]] + ifelse(abs(ss.fit[[i]]$B[[2]]) > err2abs, 1, 0)
}
nonzero = c(as.numeric(ss.fit[[i]]$B[[1]]), as.numeric(ss.fit[[i]]$B[[2]]))
nonzero = nonzero[nonzero != 0]
thresh.2 = sort(abs(nonzero))[round(0.2 * length(nonzero))+1]
if(is.null(D.ss)) {
D.ss = ifelse(abs(ss.fit[[i]]$B[[1]] - ss.fit[[i]]$B[[2]]) > pmin(abs(ss.fit[[i]]$B[[1]]), abs(ss.fit[[i]]$B[[2]])) &
abs(ss.fit[[i]]$B[[1]] - ss.fit[[i]]$B[[2]]) > thresh.2, 1, 0)
} else {
D.ss = D.ss + ifelse(abs(ss.fit[[i]]$B[[1]] - ss.fit[[i]]$B[[2]]) > pmin(abs(ss.fit[[i]]$B[[1]]), abs(ss.fit[[i]]$B[[2]])) &
abs(ss.fit[[i]]$B[[1]] - ss.fit[[i]]$B[[2]]) > thresh.2, 1, 0)
}
}
list(fit = ss.fit, Ns = N.ss, Ds = D.ss)
}
################################################################# adaptive hyper-parameter version ##########################
## cross-validation for adaptive hyper-parameter ###
## version 2, added Jun 25 ###
## ###
#############################################################################################################################
## Shrinkage fused lasso regularizer parameter
#--------------------------------------------------------------------
## cross-validation on adaptive rho(fused lasso params) of lambda
## same function names but different scheme
#--------------------------------------------------------------------
get_rho.max = function(Bs, fs, Ys, Xs, lambda, sigma2, Ng, weighted = TRUE) {
params.rho = multiSML_iPALM(
Bs,
fs,
Ys,
Xs,
sigma2,
Ng,
lambda = lambda,
rho = Inf,
maxit = 2000,
threshold = 1e-4,
use.strict = F,
sparse = T,
verbose = 1
)
weight.rho = flinv(Bs)
weight.lambda = inverse(Bs)[[1]]
Bs = params.rho$B[[1]]
fs = params.rho$f
sigma2 = params.rho$sigma2
std = centralize_mult(Xs, Ys)
Xs = std$X ## Ng (n x sk)
Ys = std$Y ## n x ng
## multiple
K = length(Ys)
Ns = sapply(Ys, nrow)
Bc = vector("list", K)
YY = vector("list", K)
FX = vector("list", K)
Dx = vector("list", K)
for (k in 1:K) {
Bc[[k]] = -Ns[k] * t(solve(diag(Ng) - Bs))
YY[[k]] = crossprod(Ys[[k]]) ## Y %*% t(Y)
FX[[k]] = matrix(0, nrow = Ng, ncol = Ns[k]) ## F %*% X (p x k x k x n = p x n)
for (i in 1:Ng) {
FX[[k]][i, ] = as.numeric(Xs[[i]] %*% fs[[k]][[i]])
}
Dx[[k]] = abs(Bc[[k]] + ((diag(Ng) - Bs) %*% YY[[k]] - FX[[k]] %*% Ys[[k]]) / sigma2 + lambda * weight.lambda * sign(Bs)) / weight.rho
diag(Dx[[k]]) = -Inf
}
max(c(max(Dx[[1]]), max(Dx[[2]])))
}
### debug switch
cv_multiSML = function(Bs,
fs,
Ys,
Xs,
sigma2,
Ng,
nlambda = 20,
nrho = 20,
threshold = 1e-4,
logLik = TRUE,
verbose = 1) {
lambda.max = get_lambda.max(Bs, Ys, Xs, Ng)
lambda.factors = 10 ^ seq(0,-4, length.out = nlambda) * lambda.max
wBs = inverse(Bs)
rB = flinv(Bs)
ncv = 5
Ns = sapply(Ys, ncol)
K = length(Ys)
Ytrain = vector("list", ncv)
Xtrain = vector("list", ncv)
Ytest = vector("list", ncv)
Xtest = vector("list", ncv)
cv.fold = sample(seq(1, ncv), size = Ns[1], replace = T)
for (i in 1:ncv) {
Ytrain[[i]] = lapply(Ys, function(y) {
y[, cv.fold != i, drop = F]
})
Xtrain[[i]] = lapply(Xs, function(x) {
x[, cv.fold != i, drop = F]
})
Ytest[[i]] = lapply(Ys, function(y) {
y[, cv.fold == i, drop = F]
})
Xtest[[i]] = lapply(Xs, function(x) {
x[, cv.fold == i, drop = F]
})
}
cverrs = vector("list", nrho * nlambda)
cvlls = vector("list", nrho * nlambda)
params = NULL
rho.factors = list()
il = 1
for (lambda in lambda.factors) {
rho.max = get_rho.max(Bs, fs, Ys, Xs, lambda, sigma2, Ng)
rho.factors[[il]] = 10 ^ seq(0,-4, length.out = nrho) * rho.max
params = c(params, lapply(rho.factors[[il]], function(rho) {
c(lambda, rho)
}))
il = il + 1
}
for (cv in 1:ncv) {
params.opt = list()
Nc = sapply(Ytest[[cv]], ncol)
ix = 1
il = 1
for (lambda in lambda.factors) {
for (rho in rho.factors[[il]]) {
cat(sprintf("lambda = %4f, rho = %4f, foldid = %d\n", lambda, rho, cv))
if (ix %% nrho == 1) {
params.opt[[ix]] = multiSML_iPALM(
Bs,
fs,
Ytrain[[cv]],
Xtrain[[cv]],
sigma2,
Ng,
lambda,
rho,
wBs,
rB,
maxit = 1000,
threshold = threshold,
acc = TRUE,
sparse = FALSE,
use.strict = FALSE,
verbose = verbose
)
} else {
params.opt[[ix]] = multiSML_iPALM(
params.opt[[ix - 1]]$B,
params.opt[[ix - 1]]$f,
Ytrain[[cv]],
Xtrain[[cv]],
params.opt[[ix - 1]]$sigma2,
Ng,
lambda,
rho,
wBs,
rB,
maxit = 1000,
threshold = threshold,
acc = TRUE,
sparse = FALSE,
use.strict = FALSE,
verbose = verbose
)
}
loglik = SML_logLik(
Xtest[[cv]],
Ytest[[cv]],
params.opt[[ix]]$B,
params.opt[[ix]]$f,
Ng,
Nc,
K,
params.opt[[ix]]$detIB,
params.opt[[ix]]$sigma2
)[1]
err = SML_error(Xtest[[cv]],
Ytest[[cv]],
params.opt[[ix]]$B,
params.opt[[ix]]$f,
Ng,
Nc,
K)[1]
cverrs[[ix]] = c(cverrs[[ix]], err)
cvlls[[ix]] = c(cvlls[[ix]], loglik)
ix = ix + 1
} ## rho
il = il + 1
} ## lambda
} ## ncv
list(opt.hyperparams = do.call(rbind, params),
cverrs = do.call(rbind, cverrs),
loglik = do.call(rbind, cvlls))
}
## ultility functions
## pick lambda
optimLambda_cv = function(cvparams, type = c("err", "loglik"), se = TRUE, fused.sparse = TRUE) {
cvm = if(type == "err") {
cvparams$cverrs
} else {
cvparams$loglik
}
cvfuns = data.frame(
lambda = cvparams$opt.hyperparams[, 1],
rho = cvparams$opt.hyperparams[, 2],
cvmean = apply(cvm, 1, mean),
cvsd = apply(cvm, 1, sd)
)
cv.min = which.min(cvfuns$cvmean)
cvlambda = split(cvfuns, cvfuns$lambda)
cvms = data.frame(
lambda = as.numeric(names(cvlambda)),
cvmean = sapply(cvlambda, function(x){mean(x$cvmean)}),
cvsd = sapply(cvlambda, function(x){sum(x$cvsd)/sqrt(length(x))})
)
lambda.1se = min_Lambda(cvms$lambda, cvms$cvmean, cvms$cvsd)
rhos = cvlambda[[as.character(lambda.1se)]]
rho.1se = min_Lambda(rhos$rho, rhos$cvmean, rhos$cvsd)
rho.min = rhos$rho[which.min(rhos$cvmean)]
# ggplot(cvlambda, aes(x = lambda, y = cvmean)) +
# geom_errorbar(aes(ymin = cvmean - cvsd, ymax = cvmean + cvsd)) +
# geom_line() + geom_point() + scale_x_log10() + xlab(expression(log(lambda))) +
# ylab("cv-mean") + ggtitle("cvmean ~ lambda")
if (se) {
list(lambda = lambda.1se, rho = rho.1se)
} else {
list(lambda = cvfuns[cv.min, 1], rho = cvfuns[cv.min, 2])
}
}
min_Lambda = function(lambda, cvmean, cvsd) {
cvmin = min(cvmean, na.rm = TRUE)
ixmin = cvmean <= cvmin
lambda.min = max(lambda[ixmin], na.rm = TRUE)
ixmin = match(lambda.min, lambda)
ix1se = cvmean <= (cvmean + cvsd)[ixmin]
max(lambda[ix1se], na.rm = TRUE)
}
################## adaptive version of genSML
######################
# comparable method
######################
## cross validation for hyper-parameter tuning
#' @description 5-fold cross-validation
#' @param dyn dynamic updated rho.max by given lambda
#' @example
#' cv.params = cv_multiSML(Bs = params.init$B, fs = params.init$F, Ys = Ys, Xs = Xs, sigma2 = params.init$sigma2[1], Ng = data$var$Ng, nlambda = 20, nrho = 20)
cv_genSML = function(Bs,
fs,
Ys,
Xs,
sigma2,
Ng,
nlambda = 20,
nrho = 20,
weighted = TRUE,
threshold = 1e-4,
use.strict = FALSE,
verbose = 1) {
lambda.max = gen_lambda.max(Bs, Ys, Xs, Ng, weighted)
lambda.factors = 10 ^ seq(0,-4, length.out = nlambda) * lambda.max
if(weighted) {
wBs = inverse(Bs)
rB = flinv(Bs)
} else{
wBs = invone(Bs)
rB = flone(Bs)
}
ncv = 5
Ns = sapply(Ys, ncol)
K = length(Ys)
Ytrain = vector("list", ncv)
Xtrain = vector("list", ncv)
Ytest = vector("list", ncv)
Xtest = vector("list", ncv)
cv.fold = list()
cv.fold[[1]] = sample(seq(1, ncv), size = Ns[1], replace = T)
cv.fold[[2]] = sample(seq(1, ncv), size = Ns[2], replace = T)
for (i in 1:ncv) {
Ytrain[[i]] = lapply(1:2, function(ix) {
Ys[[ix]][, cv.fold[[ix]] != i, drop = F]
})
Xtrain[[i]] = lapply(1:2, function(ix) {
lapply(Xs[[ix]], function(x) {
x[, cv.fold[[ix]] != i, drop = F]
})
})
Ytest[[i]] = lapply(1:2, function(ix) {
Ys[[ix]][, cv.fold[[ix]] == i, drop = F]
})
Xtest[[i]] = lapply(1:2, function(ix) {
lapply(Xs[[ix]], function(x) {
x[, cv.fold[[ix]] == i, drop = F]
})
})
}
cverrs = vector("list", nrho * nlambda)
cvlls = vector("list", nrho * nlambda)
hyper.params = list()
rho.factors = list()
il = 1
for (lambda in lambda.factors) {
rho.max = gen_rho.max(Bs, fs, Ys, Xs, lambda, sigma2, Ng)
rho.factors[[il]] = 10 ^ seq(0,-4, length.out = nrho) * rho.max
params = c(params, lapply(rho.factors[[il]], function(rho) {
c(lambda, rho)
}))
il = il + 1
}
for (cv in 1:ncv) {
params.opt = list()
Nc = sapply(Ytest[[cv]], ncol)
ix = 1
il = 1
for (lambda in lambda.factors) {
for (rho in rho.factors[[il]]) {
cat(sprintf("lambda = %4f, rho = %4f, foldid = %d\n", lambda, rho, cv))
if (ix %% nrho == 1) {
params.opt[[ix]] = genSML_iPALM(
Bs,
fs,
Ytrain[[cv]],
Xtrain[[cv]],
sigma2,
Ng,
lambda,
rho,
wBs,
rB,
maxit = 1000,
threshold = threshold,
acc = TRUE,
sparse = FALSE,
use.strict = use.strict,
verbose = verbose
)
} else {
params.opt[[ix]] = genSML_iPALM(
params.opt[[ix - 1]]$B,
params.opt[[ix - 1]]$f,
Ytrain[[cv]],
Xtrain[[cv]],
params.opt[[ix - 1]]$sigma2,
Ng,
lambda,
rho,
wBs,
rB,
maxit = 1000,
threshold = threshold,
acc = TRUE,
sparse = FALSE,
use.strict = use.strict,
verbose = verbose
)
}
loglik = gen_logLik(
Xtest[[cv]],
Ytest[[cv]],
params.opt[[ix]]$B,
params.opt[[ix]]$f,
Ng,
Nc,
K,
params.opt[[ix]]$detIB,
params.opt[[ix]]$sigma2
)[1]
err = gen_error(Xtest[[cv]],
Ytest[[cv]],
params.opt[[ix]]$B,
params.opt[[ix]]$f,
Ng,
Nc,
K)[1]
cverrs[[ix]] = c(cverrs[[ix]], err)
cvlls[[ix]] = c(cvlls[[ix]], loglik)
if (cv == 1) {
hyper.params[[ix]] = c(lambda, rho)
}
ix = ix + 1
} ## rho
il = il + 1
}
}
list(opt.hyperparams = hyper.params,
cverrs = cverrs,
loglik = cvlls)
}
## cross-validation and EBIC for hyper-parameter tuning
## rho max can be estimated, rho is the fused lasso
## regularized hyper parameter
#' @description get_rho.max
#' @example
#' rhomax = get_rho.max(params.init$B, params.init$F, Ys, Xs, params.init$sigma2[1], data$var$Ng)
gen_rho.max = function(Bs, fs, Ys, Xs, lambda, sigma2, Ng, weighted = TRUE) {
if(weighted) {
wBs = inverse(Bs)
rB = flinv(Bs)
} else {
wBs = invone(Bs)
rB = flone(Bs)
}
params.rho = genSML_iPALM(
Bs,
fs,
Ys,
Xs,
sigma2,
Ng,
lambda = lambda,
rho = Inf,
wBs = wBs,
rB = rB,
maxit = 2000,
threshold = 1e-4,
use.strict = F,
sparse = T,
verbose = 1
)
weight.rho = flinv(Bs)
weight.lambda = inverse(Bs)[[1]]
Bs = params.rho$B[[1]]
fs = params.rho$f
sigma2 = params.rho$sigma2
std = centralize_gen(Xs, Ys)
Xs = std$X ## Ng (n x sk)
Ys = std$Y ## n x ng
## multiple
K = length(Ys)
Ns = sapply(Ys, nrow)
Bc = vector("list", K)
YY = vector("list", K)
FX = vector("list", K)
Dx = vector("list", K)
for (k in 1:K) {
Bc[[k]] = -Ns[k] * t(solve(diag(Ng) - Bs))
YY[[k]] = crossprod(Ys[[k]]) ## Y %*% t(Y)
FX[[k]] = matrix(0, nrow = Ng, ncol = Ns[k]) ## F %*% X (p x k x k x n = p x n)
for (i in 1:Ng) {
FX[[k]][i, ] = as.numeric(Xs[[k]][[i]] %*% fs[[k]][[i]])
}
Dx[[k]] = abs(Bc[[k]] + ((diag(Ng) - Bs) %*% YY[[k]] - FX[[k]] %*% Ys[[k]]) / sigma2 + lambda * weight.lambda * sign(Bs)) / weight.rho
diag(Dx[[k]]) = -Inf
}
## Dxy = abs((diag(Ng) - Bs) %*% (YY[[2]] - YY[[1]]) - (FX[[2]] %*% Ys[[2]] - FX[[1]] %*% Ys[[1]])) / sigma2 / 2 / weight.rho
## diag(Dxy) = -Inf
max(c(max(Dx[[1]]), max(Dx[[2]])))
## max(Dxy)
}
############# new version
## ultility functions
## pick lambda
optimLambda_cv = function(cvparams, type = c("err", "loglik"), se = TRUE, fused.sparse = TRUE) {
cvm = if(type == "err") {
cvparams$cverrs
} else {
cvparams$loglik
}
cvfuns = data.frame(
lambda = cvparams$opt.hyperparams[, 1],
rho = cvparams$opt.hyperparams[, 2],
cvmean = apply(cvm, 1, mean),
cvsd = apply(cvm, 1, sd)
)
cv.min = which.min(cvfuns$cvmean)
cv.1se = cvfuns[cv.min, 3] + cvfuns[cv.min, 4]
cvfun.1se = cvfuns[cvfuns$cvmean <= cv.1se, c(1, 2, 3)]
lambda = cvfun.1se[, 1]
rho = cvfun.1se[, 2]
if(fused.sparse) {
rho.1se = max(rho)
lambda.1se = lambda[which.min(cvfun.1se[cvfun.1se$rho == rho.1se, 3])]
} else {
lambda.1se = max(lambda)
# rho.1se = min(rho[lambda == lambda.1se])
rho.1se = rho[which.min(cvfun.1se[cvfun.1se$lambda == lambda.1se, 3])]
}
if (se) {
list(lambda = lambda.1se, rho = rho.1se)
} else {
list(lambda = cvfuns[cv.min, 1], rho = cvfuns[cv.min, 2])
}
}
#############################################
## new version of genSML
#############################################
## solve SML problem by block coordinate descent by backtracking inert-PALM on
## different gene expression and different eQTLs
#' @param lambda lambda hyper-parameter for lasso term
#' @param rho fused lasso hyper-parameter for fused lasso term
#' @param gamma invertible matrix stablize parameter gamma
#' params.init = constrained_L2reg_gen(Xs, Ys, sigma2$rho.opt, M, N)
#' params.opt = genSML_iPALM(Bs = params.init$B, fs = params.init$F, Ys = Ys, Xs = Xs,
#' sigma2 = params.init$sigma2[1], Ng = data$var$Ng,
#' lambda = 0.1, rho = 0.1, maxit = 500)
genSML_iPALM = function(Bs,
fs,
Ys,
Xs,
sigma2,
Ng,
lambda,
rho,
wBs = inverse(Bs),
rB = flinv(Bs),
maxit = 100,
acc = TRUE,
inertial = inertial_pars("lin"),
threshold = 1e-3,
sparse = FALSE,
use.strict = TRUE,
verbose = 2) {
std = centralize_gen(Xs, Ys)
Xs = std$X
Ys = std$Y
meanXs = std$muX
meanYs = std$muY
K = length(Ys) # number of conditions = 2
if (verbose == 2) {
cat(sprintf("conditions must be restricted to 2.. K = %d\n", K))
}
## update for eQTL row-wise (specific for genes)
f0 = vector("list", K)
f1 = vector("list", K)
Yp = vector("list", K)
Hy = vector("list", K)
Yp.maxEigen = vector("list", K)
Ns = sapply(Ys, nrow)
for (i in 1:Ng) {
for (k in 1:K) {
Xi = Xs[[k]][[i]]
Pi = solve(crossprod(Xi)) %*% t(Xi)
# specific condition
yi_k = Ys[[k]][, i, drop = F] # n[k] x 1 for gene i
Yi_k = Ys[[k]][,-i] # n[k] x (ng-1) (for specific gene i)
f0[[k]][[i]] = Pi %*% yi_k
f1[[k]][[i]] = Pi %*% Yi_k # f[[k]][[i]] = f0[[k]][[i]] - f1[[k]][[i]] %*% bi^k | bi^k = B[[k]][i,-i]
Hi_k = diag(Ns[k]) - Xi %*% Pi # n[k] x n[k] projection matrix
Yp[[k]][[i]] = t(Yi_k) %*% Hi_k %*% Yi_k # (ng - 1) x (ng - 1)
Hy[[k]][[i]] = t(yi_k) %*% Hi_k %*% Yi_k # 1 x (ng - 1)
## maximized eigen-value for Yp
Yp.maxEigen[[k]][[i]] = eigen(Yp[[k]][[i]])$values[1]
}
}
## update for gnet row-wise
niter = 1
ImBs = lapply(Bs, function(B) {
diag(Ng) - B
})
detIBs = sapply(ImBs, det)
IBsinv = lapply(ImBs, solve)
Ls = logLik(detIBs, Bs, wBs, rB, lambda, rho, Ns, K) + N * sum(Ns) / 2 * log(sigma2)
## history
Bs.prevs = list(Bs, Bs)
inert = acc
while (niter <= maxit) {
inert.pars = inertial(niter)
Bs.inert = if (inert) {
lapply(1:K, function(k) {
Bs.prevs[[2]][[k]] + inert.pars * (Bs.prevs[[2]][[k]] - Bs.prevs[[1]][[k]])
})
} else {
Bs
}
fs.prev = fs
Ls.prev = Ls
for (i in 1:Ng) {
## sum(-Ns[k] * sigma2 * log(det(I-B[[k]])^2)) + \sum_{i=1}^{Ng} bi^T %*% Yp %*% bi - Hy %*% bi)
ci = lapply(IBsinv, function(IBi) {
# ci / det(I - B); from (I - B)^{-1}
IBi[-i, i, drop = F]
})
bi = lapply(Bs.inert, function(B.inert) {
t(B.inert[i,-i, drop = F])
})
gi = lapply(1:K, function(k) {
grad = grad_rwise_SML(Ns[k], ci[[k]], Yp[[k]][[i]], Hy[[k]][[i]], sigma2[1])
grad(bi[[k]])
})
## Lipschitz moduli for row-i
Lis = sapply(1:K, function(k) {
gtg = tcrossprod(ImBs[[k]][-i, ])
oi = chol2inv(gtg)
deti = det(gtg)
gii = ImBs[[k]][-i,-i]
si = ImBs[[k]][-i, i, drop = F]
c2i = sum((ci[[k]] * detIBs[k]) ^ 2)
lips_rwise_SML(Ns[k],
oi,
gii,
si,
c2i,
deti,
Yp.maxEigen[[k]][[i]],
sigma2,
Ng)[1]
})
Li = max(Lis)
Li = (1 + 2 * inert.pars) * Li / (2 * (1 - inert.pars))
detZero = TRUE
cl = 1
while (detZero) {
ui = lapply(1:K, function(k) {
bi[[k]] - gi[[k]] / Li
})
wBi = lapply(wBs, function(wB) {
wB[i, -i]
})
rBi = rB[i, -i]
xi = prox_flsa(lambda, rho, Li, ui, wBi, rBi)
dIBu = sapply(1:K, function(k) {
IBsinv[[k]][i, i] - (t(xi[[k]]) %*% IBsinv[[k]][-i, i, drop = F])[1]
})
cl = cl * 2
detZero = any(dIBu == 0)
}
for (k in 1:K) {
Bs[[k]][i,-i] = xi[[k]]
ImBs[[k]] = diag(Ng) - Bs[[k]]
detIBs[k] = (ImBs[[k]][i, ] %*% IBsinv[[k]][, i, drop = F])[1] * detIBs[k]
dbi = Bs.prevs[[2]][[k]][i, ] - Bs[[k]][i, ]
IBsinv[[k]] = IBsinv[[k]] - IBsinv[[k]][, i, drop = F] %*% dbi %*% IBsinv[[k]] / (1 + dbi %*% IBsinv[[k]][, i, drop = F])[1]
fs[[k]][[i]] = f0[[k]][[i]] - f1[[k]][[i]] %*% Bs[[k]][i,-i]
}
} # row-wise update
Berr = sum(sapply(1:K, function(k) {
norm(Bs[[k]] - Bs.prevs[[2]][[k]], type = "f") / (1 + norm(Bs.prevs[[2]][[k]], type = "f"))
}))
Ferr = sum(sapply(1:K, function(k) {
sum(sapply(1:Ng, function(i) {
norm(fs[[k]][[i]] - fs.prev[[k]][[i]], type = "f")
})) / (1 + sum(sapply(1:Ng, function(i) {
norm(fs.prev[[k]][[i]], type = "f")
})))
}))
err = Berr + Ferr
sigma2 = sigma2_gen(Xs, Ys, Bs, fs, Ng, Ns, K)
Ls = logLik(detIBs, Bs, wBs, rB, lambda, rho, Ns, K) + Ng * sum(Ns) / 2 * log(sigma2)
Lerr = abs(Ls.prev - Ls) / (1 + abs(Ls.prev))
# inert = ifelse(Lerr < 1e-8, FALSE, acc)
if (verbose >= 2) {
cat(
sprintf(
"SML: iteration = %d, error = %f, logLik = %f, sigma2 = %f, inert = %s\n",
niter,
err,
Ls,
sigma2,
inert
)
)
}
niter = niter + 1
Bs.prevs = list(Bs.prevs[[2]], Bs)
opt.cond = if (use.strict) {
(err < threshold && Lerr < threshold)
} else {
(err < threshold || Lerr < threshold)
}
if (opt.cond || niter > maxit || is.nan(err)) {
mu = lapply(1:K, function(k) {
(diag(Ng) - Bs[[k]]) %*% meanYs[[k]] - sapply(1:Ng, function(i) {
meanXs[[k]][[i]] %*% fs[[k]][[i]]
})
})
sigma2 = sigma2_gen(Xs, Ys, Bs, fs, Ng, Ns, K)
if (sparse) {
Bs = lapply(Bs, Matrix, sparse = T)
}
break
}
} # while(niter <= maxit)
list(
B = Bs,
f = fs,
mu = mu,
sigma2 = sigma2,
niter = niter,
err = err,
detIB = detIBs
)
}
############# new version
## ultility functions
## pick lambda
optimLambda_cv1 = function(cvparams,
type = c("err", "loglik"),
se = TRUE,
fused.sparse = TRUE) {
cvm = if (type == "err") {
cvparams$cverrs
} else {
cvparams$loglik
}
cvfuns = data.frame(
lambda = sapply(cvparams$opt.hyperparams, `[`, 1),
rho = sapply(cvparams$opt.hyperparams, `[`, 2),
cvmean = sapply(cvm, mean),
cvsd = sapply(cvm, sd)
)
cv.min = which.min(cvfuns$cvmean)
cv.1se = cvfuns[cv.min, 3] + cvfuns[cv.min, 4]
cvfun.1se = cvfuns[cvfuns$cvmean <= cv.1se, c(1, 2, 3)]
lambda = cvfun.1se[, 1]
rho = cvfun.1se[, 2]
if (fused.sparse) {
rho.1se = max(rho)
lambda.1se = lambda[which.min(cvfun.1se[cvfun.1se$rho == rho.1se, 3])]
} else {
lambda.1se = max(lambda)
# rho.1se = min(rho[lambda == lambda.1se])
rho.1se = rho[which.min(cvfun.1se[cvfun.1se$lambda == lambda.1se, 3])]
}
if (se) {
list(lambda = lambda.1se, rho = rho.1se)
} else {
list(lambda = cvfuns[cv.min, 1], rho = cvfuns[cv.min, 2])
}
}
#### select subset of hyper-parameter with in 1 sd
############# new version
## ultility functions
## pick lambda
subLambda_ss = function(cvparams,
type = c("err", "loglik"),
ntop = 10) {
cvm = if (type == "err") {
cvparams$cverrs
} else {
cvparams$loglik
}
cvfuns = data.frame(
lambda = sapply(cvparams$opt.hyperparams, `[`, 1),
rho = sapply(cvparams$opt.hyperparams, `[`, 2),
cvmean = sapply(cvm, mean),
cvsd = sapply(cvm, sd)
)
cv.min = which.min(cvfuns$cvmean)
cv.1se = cvfuns[cv.min, 3] + cvfuns[cv.min, 4]
cvfun.1se = cvfuns[cvfuns$cvmean < cv.1se, c(1, 2, 3, 4)]
cvfun.1se[order(cvfun.1se[,3], decreasing = F)[1:10],c(1,2)]
}
####################################################
## stability selection on a class of parameters
##################
##' @title ss_fssem
## stability selection
ss_fssem = function(Bs,
fs,
Ys,
Xs,
sigma2,
Ng,
params = NULL,
wBs = inverse(Bs),
rB = flinv(Bs),
maxit = 100,
acc = TRUE,
inertial = inertial_pars("lin"),
threshold = 1e-3,
sparse = FALSE,
use.strict = TRUE,
Nbootstrap = 100,
Nsample = 0.75,
verbose = 2) {
N = ncol(Ys[[1]])
ss.fold = vector("list", Nbootstrap)
i = 1
while(i <= Nbootstrap) {
subs = sample(seq(1, N), ceiling(N * Nsample), replace = F)
ss.fold[[i]] = sort(subs)
ss.fold[[i+1]] = setdiff(seq(1, N), subs)
i = i + 2
}
ss.fit = list()
N.ss = vector("list", 2)
D.ss = NULL
for (j in 1:nrow(params)) {
lambda = params[j, 1]
rho = params[j, 2]
for (i in 1:Nbootstrap) {
Yss = lapply(Ys, function(Y) {
Y[, ss.fold[[i]]]
})
Xss = list()
for (k in 1:length(Xs)) {
Xss[[k]] = lapply(Xs[[k]], function(X) {
X[, ss.fold[[k]], drop = F]
})
}
ss.fit[[i]] = genSML_iPALM(
Bs = params.init$B,
fs = params.init$F,
Ys = Yss,
Xs = Xss,
sigma2 = params.init$sigma2[1],
Ng = data$var$Ng,
lambda = lambda,
rho = rho,
maxit = maxit,
threshold = threshold,
use.strict = use.strict,
acc = acc,
sparse = sparse
)
err2abs = ss.fit[[i]]$err
if (is.null(N.ss[[1]])) {
N.ss[[1]] = ifelse(abs(ss.fit[[i]]$B[[1]]) > err2abs, 1, 0)
} else {
N.ss[[1]] = N.ss[[1]] + ifelse(abs(ss.fit[[i]]$B[[1]]) > err2abs, 1, 0)
}
if (is.null(N.ss[[2]])) {
N.ss[[2]] = ifelse(abs(ss.fit[[i]]$B[[2]]) > err2abs, 1, 0)
} else {
N.ss[[2]] = N.ss[[2]] + ifelse(abs(ss.fit[[i]]$B[[2]]) > err2abs, 1, 0)
}
nonzero = c(as.numeric(ss.fit[[i]]$B[[1]]), as.numeric(ss.fit[[i]]$B[[2]]))
nonzero = nonzero[nonzero != 0]
thresh.2 = sort(abs(nonzero))[round(0.2 * length(nonzero)) + 1]
if (is.null(D.ss)) {
D.ss = ifelse(
abs(ss.fit[[i]]$B[[1]] - ss.fit[[i]]$B[[2]]) > pmin(abs(ss.fit[[i]]$B[[1]]), abs(ss.fit[[i]]$B[[2]])) &
abs(ss.fit[[i]]$B[[1]] - ss.fit[[i]]$B[[2]]) > thresh.2,
1,
0
)
} else {
D.ss = D.ss + ifelse(
abs(ss.fit[[i]]$B[[1]] - ss.fit[[i]]$B[[2]]) > pmin(abs(ss.fit[[i]]$B[[1]]), abs(ss.fit[[i]]$B[[2]])) &
abs(ss.fit[[i]]$B[[1]] - ss.fit[[i]]$B[[2]]) > thresh.2,
1,
0
)
}
}
}
list(N = N.ss, D = D.ss)
}
bic_SMLasso = function(Bs,
fs,
Ys,
Xs,
sigma2,
Ng,
nlambda = 20,
threshold = 1e-3) {
lambda.max = get_lambda.max(Bs, Ys, Xs, Ng)
lambda.factors = 10 ^ seq(0,-3, length.out = nlambda) * lambda.max
wBs = inverse(Bs)
ncv = 5
Ns = sapply(Ys, ncol)
K = length(Ys)
allfit = list()
berr = vector("list", nlambda)
ix = 1
for (lambda in lambda.factors) {
cat(sprintf("lambda = %4f\n", lambda))
df = 0
fit = vector("list", 2)
for (i in 1:2) {
fit[[i]] = sparse_maximum_likehood_iPALM(
B = Bs[[i]],
f = fs[[i]],
Y = Ys[[i]],
X = Xs,
sigma2 = sigma2,
N = Ns[i],
Ng = Ng,
lambda = lambda,
maxit = 30,
verbose = 1,
threshold = 1e-3
)
}
BIC = SML_BIC(Xs, Ys, fit, Ng, Ns, 2)
berr[[ix]] = c(lambda, BIC)
allfit[[ix]] = fit
ix = ix + 1
}
berr = do.call(rbind, berr)
BICmin = which.min(berr[,2])
list(lambda = berr[BICmin,1], fit = allfit[[BICmin]])
}
SML_BIC = function(Xs, Ys, fit, Ng, Ns, K) {
std = centralize_mult(Xs, Ys)
X = lapply(std$X, t)
Y = lapply(std$Y, t)
err = 0
BIC = 0
Bs = lapply(fit, function(x){x$B})
fs = lapply(fit, function(x){x$f})
for (k in 1:K) {
nll = - Ns[k] / 2 * log(fit[[k]]$detIB**2)
for (i in 1:Ng) {
Xi = X[[i]] # sk x N
bi = Bs[[k]][i, -i, drop = F] # (ng-1) x 1
yi = Y[[k]][i, , drop = F] # 1 x N
Yi = Y[[k]][-i, , drop = F] # (ng-1) x N
fi = fs[[k]][[i]] # sk x 1
err = err + tcrossprod(yi - bi %*% Yi - crossprod(fi, Xi))
}
nll = nll + err / (2 * fit[[k]]$sigma2) + Ng * Ns[k] / 2 * log(2 * pi * fit[[k]]$sigma2)
BIC = BIC + (2 * nll + sum(Bs[[k]] != 0) * log(Ns[k]))
}
as.numeric(BIC[1,1])
}
sigma2_sml = function(X, Y, B, f, Ng, N) {
X = lapply(X, t)
Y = t(Y)
err = 0
for (i in 1:Ng) {
Xi = X[[i]] # sk x N
bi = B[i,-i, drop = F] # (ng-1) x 1
yi = Y[i, , drop = F] # 1 x N
Yi = Y[-i, , drop = F] # (ng-1) x N
fi = f[[i]] # sk x 1
err = err + tcrossprod(yi - bi %*% Yi - crossprod(fi, Xi))
}
sigma2 = err / (Ng * N)
sigma2[1]
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/eml.R
\name{pid_to_eml_entity}
\alias{pid_to_eml_entity}
\title{Create EML entity with physical section from a DataONE PID}
\usage{
pid_to_eml_entity(mn, pid, entity_type = "otherEntity", ...)
}
\arguments{
\item{mn}{(MNode) Member Node where the PID is associated with an object.}
\item{pid}{(character) The PID of the object to create the sub-tree for.}
\item{entity_type}{(character) What kind of object to create from the input. One of "dataTable",
"spatialRaster", "spatialVector", "storedProcedure", "view", or "otherEntity".}
\item{...}{(optional) Additional arguments to be passed to \code{eml$entityType())}.}
}
\value{
(list) The entity object.
}
\description{
Create EML entity with physical section from a DataONE PID
}
\examples{
\dontrun{
# Generate EML otherEntity
pid_to_eml_entity(mn,
pid,
entity_type = "otherEntity",
entityName = "Entity Name",
entityDescription = "Description about entity")
}
}
| /man/pid_to_eml_entity.Rd | permissive | kristenpeach/arcticdatautils | R | false | true | 1,069 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/eml.R
\name{pid_to_eml_entity}
\alias{pid_to_eml_entity}
\title{Create EML entity with physical section from a DataONE PID}
\usage{
pid_to_eml_entity(mn, pid, entity_type = "otherEntity", ...)
}
\arguments{
\item{mn}{(MNode) Member Node where the PID is associated with an object.}
\item{pid}{(character) The PID of the object to create the sub-tree for.}
\item{entity_type}{(character) What kind of object to create from the input. One of "dataTable",
"spatialRaster", "spatialVector", "storedProcedure", "view", or "otherEntity".}
\item{...}{(optional) Additional arguments to be passed to \code{eml$entityType())}.}
}
\value{
(list) The entity object.
}
\description{
Create EML entity with physical section from a DataONE PID
}
\examples{
\dontrun{
# Generate EML otherEntity
pid_to_eml_entity(mn,
pid,
entity_type = "otherEntity",
entityName = "Entity Name",
entityDescription = "Description about entity")
}
}
|
# Report
# Data = TmbData
# ParHat = Obj$env$parList()
# SD = Opt$SD
# category_order = 1:Data$n_c
# category_names = 1:Data$n_c
# plotdir = diag_dir
# figname = "Cov"
# plotTF = NULL
# plot_cor = TRUE
# mgp = c(2, 0.5, 0)
# tck = -0.02
# oma = c(0, 5, 2, 0)
summarize_covariance = function (Report, Data, ParHat, SD = NULL,
category_order = 1:Data$n_c,
category_names = 1:Data$n_c,
plotdir = paste0(getwd(), "/"),
figname = "Cov",
plotTF = NULL,
plot_cor = TRUE,
mgp = c(2, 0.5, 0),
tck = -0.02,
oma = c(0, 5, 2, 0), ...)
{
if (is.vector(Data[["FieldConfig"]]) && length(Data[["FieldConfig"]]) ==
4) {
Data[["FieldConfig"]] = rbind(matrix(Data[["FieldConfig"]],
ncol = 2, dimnames = list(c("Omega", "Epsilon"),
c("Component_1", "Component_2"))), Beta = c(Beta1 = -2,
Beta2 = -2))
} else {
if (!is.matrix(Data[["FieldConfig"]]) || !all(dim(Data[["FieldConfig"]]) ==
c(3, 2))) {
stop("`FieldConfig` has the wrong dimensions in `Summarize_Covariance`")
}
}
if (is.null(plotTF)) {
plotTF = as.vector(Data[["FieldConfig"]] > 0)
} else {
plotTF = as.vector(plotTF)
}
Return = list()
for (i in which(Data[["FieldConfig"]] >= 0)) {
Par_name = c("omega1", "epsilon1", "beta1", "omega2",
"epsilon2", "beta2")[i]
L_name = paste0("L_", Par_name, "_z")
if (!is.null(SD)) {
sd_summary = summary(SD)
Slot_name = paste0("lowercov_uppercor_", Par_name)
if (Slot_name %in% rownames(sd_summary)) {
Cor = Cov = Mat = ThorsonUtilities::Extract_SE(SD = SD,
parname = Slot_name, columns = 1:2, Dim = c(Data$n_c,
Data$n_c))
dimnames(Cor) = dimnames(Cov) = list(category_names,
category_names, c("Estimate", "Std.Error"))
Cor[, , 1][lower.tri(Cor[, , 1])] = t(Mat[, ,
1])[lower.tri(Mat[, , 1])]
diag(Cor[, , 1]) = 1
Cor[, , 2][lower.tri(Cor[, , 2])] = t(Mat[, ,
2])[lower.tri(Mat[, , 2])]
diag(Cor[, , 2]) = NA
Cov[, , 1][upper.tri(Cov[, , 1])] = t(Mat[, ,
1])[upper.tri(Mat[, , 1])]
Cov[, , 2][upper.tri(Cov[, , 2])] = t(Mat[, ,
2])[upper.tri(Mat[, , 2])]
}
else {
Cov = Cor = NULL
}
}
else {
Cov = Cor = NULL
}
if (is.null(Cov) | is.null(Cor)) {
Cov = Cor = array(NA, dim = c(Data$n_c, Data$n_c,
2), dimnames = list(category_names, category_names,
c("Estimate", "Std.Error")))
Cov[, , "Estimate"] = FishStatsUtils:::calc_cov(L_z = ParHat[[L_name]],
n_f = as.vector(Data[["FieldConfig"]])[i], n_c = Data$n_c)
Cor[, , "Estimate"] = cov2cor(Cov[, , "Estimate"])
}
List = list(Cor, Cov)
names(List) = paste0(c("Cor_", "Cov_"), Par_name)
Return = c(Return, List)
}
if (!is.null(figname)) {
Dim = c(3, 2)
if (sum(ifelse(plotTF > 0, 1, 0)) == 1)
Dim = c(1, 1)
if (all(ifelse(plotTF > 0, 1, 0) == c(1, 1, 0, 0, 0,
0)) | all(ifelse(plotTF > 0, 1, 0) == c(0, 0, 1,
1, 0, 0)))
Dim = c(1, 2)
if (all(ifelse(plotTF > 0, 1, 0) == c(1, 0, 1, 0, 0,
0)) | all(ifelse(plotTF > 0, 1, 0) == c(0, 1, 0,
1, 0, 0)))
Dim = c(2, 1)
if (plot_cor == TRUE) {
convert = function(Cov) ifelse(is.na(cov2cor(Cov)),
0, cov2cor(Cov))
}
else {
convert = function(Cov) ifelse(is.na(Cov), 0, Cov)
}
ThorsonUtilities::save_fig(file = paste0(plotdir, figname,
"--Analytic.png"), width = Dim[2] * 4 + 1, height = Dim[1] *
4)#, ...)
par(mfrow = Dim, mar = c(0, 1, 1, 0), mgp = mgp, tck = tck,
oma = oma)
for (i in 1:6) {
if (i %in% which(plotTF > 0)) {
Cov_cc = FishStatsUtils:::calc_cov(L_z = ParHat[c("L_omega1_z",
"L_epsilon1_z", "L_beta1_z", "L_omega2_z",
"L_epsilon2_z", "L_beta2_z")][[i]], n_f = as.vector(Data[["FieldConfig"]])[i],
n_c = Data$n_c)
plot_cov(Cov = convert(Cov_cc)[category_order,
category_order], names = list(category_names[category_order],
NA)[[ifelse(i == 1 | i == 3 | Dim[2] == 1,
1, 2)]], names2 = list(1:nrow(Cov_cc), NA)[[ifelse(i ==
1 | i == 2, 1, 2)]], digits = 1, font = 2)
}
}
dev.off()
}
return(invisible(Return))
} | /data/operating_model/diagnostics/summarize_covariance.r | no_license | garonen/Optimal_Allocation_GoA_Manuscript | R | false | false | 6,058 | r | # Report
# Data = TmbData
# ParHat = Obj$env$parList()
# SD = Opt$SD
# category_order = 1:Data$n_c
# category_names = 1:Data$n_c
# plotdir = diag_dir
# figname = "Cov"
# plotTF = NULL
# plot_cor = TRUE
# mgp = c(2, 0.5, 0)
# tck = -0.02
# oma = c(0, 5, 2, 0)
summarize_covariance = function (Report, Data, ParHat, SD = NULL,
category_order = 1:Data$n_c,
category_names = 1:Data$n_c,
plotdir = paste0(getwd(), "/"),
figname = "Cov",
plotTF = NULL,
plot_cor = TRUE,
mgp = c(2, 0.5, 0),
tck = -0.02,
oma = c(0, 5, 2, 0), ...)
{
if (is.vector(Data[["FieldConfig"]]) && length(Data[["FieldConfig"]]) ==
4) {
Data[["FieldConfig"]] = rbind(matrix(Data[["FieldConfig"]],
ncol = 2, dimnames = list(c("Omega", "Epsilon"),
c("Component_1", "Component_2"))), Beta = c(Beta1 = -2,
Beta2 = -2))
} else {
if (!is.matrix(Data[["FieldConfig"]]) || !all(dim(Data[["FieldConfig"]]) ==
c(3, 2))) {
stop("`FieldConfig` has the wrong dimensions in `Summarize_Covariance`")
}
}
if (is.null(plotTF)) {
plotTF = as.vector(Data[["FieldConfig"]] > 0)
} else {
plotTF = as.vector(plotTF)
}
Return = list()
for (i in which(Data[["FieldConfig"]] >= 0)) {
Par_name = c("omega1", "epsilon1", "beta1", "omega2",
"epsilon2", "beta2")[i]
L_name = paste0("L_", Par_name, "_z")
if (!is.null(SD)) {
sd_summary = summary(SD)
Slot_name = paste0("lowercov_uppercor_", Par_name)
if (Slot_name %in% rownames(sd_summary)) {
Cor = Cov = Mat = ThorsonUtilities::Extract_SE(SD = SD,
parname = Slot_name, columns = 1:2, Dim = c(Data$n_c,
Data$n_c))
dimnames(Cor) = dimnames(Cov) = list(category_names,
category_names, c("Estimate", "Std.Error"))
Cor[, , 1][lower.tri(Cor[, , 1])] = t(Mat[, ,
1])[lower.tri(Mat[, , 1])]
diag(Cor[, , 1]) = 1
Cor[, , 2][lower.tri(Cor[, , 2])] = t(Mat[, ,
2])[lower.tri(Mat[, , 2])]
diag(Cor[, , 2]) = NA
Cov[, , 1][upper.tri(Cov[, , 1])] = t(Mat[, ,
1])[upper.tri(Mat[, , 1])]
Cov[, , 2][upper.tri(Cov[, , 2])] = t(Mat[, ,
2])[upper.tri(Mat[, , 2])]
}
else {
Cov = Cor = NULL
}
}
else {
Cov = Cor = NULL
}
if (is.null(Cov) | is.null(Cor)) {
Cov = Cor = array(NA, dim = c(Data$n_c, Data$n_c,
2), dimnames = list(category_names, category_names,
c("Estimate", "Std.Error")))
Cov[, , "Estimate"] = FishStatsUtils:::calc_cov(L_z = ParHat[[L_name]],
n_f = as.vector(Data[["FieldConfig"]])[i], n_c = Data$n_c)
Cor[, , "Estimate"] = cov2cor(Cov[, , "Estimate"])
}
List = list(Cor, Cov)
names(List) = paste0(c("Cor_", "Cov_"), Par_name)
Return = c(Return, List)
}
if (!is.null(figname)) {
Dim = c(3, 2)
if (sum(ifelse(plotTF > 0, 1, 0)) == 1)
Dim = c(1, 1)
if (all(ifelse(plotTF > 0, 1, 0) == c(1, 1, 0, 0, 0,
0)) | all(ifelse(plotTF > 0, 1, 0) == c(0, 0, 1,
1, 0, 0)))
Dim = c(1, 2)
if (all(ifelse(plotTF > 0, 1, 0) == c(1, 0, 1, 0, 0,
0)) | all(ifelse(plotTF > 0, 1, 0) == c(0, 1, 0,
1, 0, 0)))
Dim = c(2, 1)
if (plot_cor == TRUE) {
convert = function(Cov) ifelse(is.na(cov2cor(Cov)),
0, cov2cor(Cov))
}
else {
convert = function(Cov) ifelse(is.na(Cov), 0, Cov)
}
ThorsonUtilities::save_fig(file = paste0(plotdir, figname,
"--Analytic.png"), width = Dim[2] * 4 + 1, height = Dim[1] *
4)#, ...)
par(mfrow = Dim, mar = c(0, 1, 1, 0), mgp = mgp, tck = tck,
oma = oma)
for (i in 1:6) {
if (i %in% which(plotTF > 0)) {
Cov_cc = FishStatsUtils:::calc_cov(L_z = ParHat[c("L_omega1_z",
"L_epsilon1_z", "L_beta1_z", "L_omega2_z",
"L_epsilon2_z", "L_beta2_z")][[i]], n_f = as.vector(Data[["FieldConfig"]])[i],
n_c = Data$n_c)
plot_cov(Cov = convert(Cov_cc)[category_order,
category_order], names = list(category_names[category_order],
NA)[[ifelse(i == 1 | i == 3 | Dim[2] == 1,
1, 2)]], names2 = list(1:nrow(Cov_cc), NA)[[ifelse(i ==
1 | i == 2, 1, 2)]], digits = 1, font = 2)
}
}
dev.off()
}
return(invisible(Return))
} |
\name{geneFilter}
\alias{geneFilter}
\title{geneFilter}
\description{the function to filter genes by Intergrative Correlation}
\usage{geneFilter(obj, cor.cutoff = 0.5)}
\arguments{
\item{obj}{a list of ExpressionSet, matrix or SummarizedExperiment objects. If its elements are matrices,
columns represent samples, rows represent genes}
\item{cor.cutoff}{the cutoff threshold for filtering genes. Only when the integrative correlation
between every pair of sets is larger than the cutoff value, will the gene
be selected.}
}
\value{returns a list of ExpressionSets matrix or SummarizedExperiment objects
with genes filtered }
\references{Garrett-Mayer, E., Parmigiani, G., Zhong, X., Cope, L.,
Gabrielson, E., Cross-study validation and combined analysis of gene
expression microarray data. Biostatistics. 2008 Apr;9(2):333-354.}
\author{Yuqing Zhang, Christoph Bernau, Levi Waldron}
\examples{
set.seed(8)
library(curatedOvarianData)
library(GenomicRanges)
source(system.file("extdata", "patientselection.config",
package="curatedOvarianData"))
source(system.file("extdata", "createEsetList.R", package="curatedOvarianData"))
esets.list <- lapply(esets, function(eset){
return(eset[1:1500, 1:10])
})
esets.list <- esets.list[1:5]
result.set <- geneFilter(esets.list, 0)
result.set
### as we cannot calculate correlation with one set, this function just
### delivers the same set if esets has length 1
result.oneset <- geneFilter(esets.list[1])
result.oneset
## Support matrices
X.list <- lapply(esets.list, function(eset){
return(exprs(eset)) ## Columns represent samples!
})
result.set <- geneFilter(X.list, 0)
dim(result.set[[1]])
## Support SummarizedExperiment
nrows <- 200; ncols <- 6
counts <- matrix(runif(nrows * ncols, 1, 1e4), nrows)
rowData <- GRanges(rep(c("chr1", "chr2"), c(50, 150)),
IRanges(floor(runif(200, 1e5, 1e6)), width=100),
strand=sample(c("+", "-"), 200, TRUE))
colData <- DataFrame(Treatment=rep(c("ChIP", "Input"), 3),
row.names=LETTERS[1:6])
sset <- SummarizedExperiment(assays=SimpleList(counts=counts),
rowData=rowData, colData=colData)
s.list <- list(sset, sset)
result.set <- geneFilter(s.list, 0.9)
## the same set should resemble each other, no genes filtered
dim(assay(result.set[[1]]))
}
| /man/geneFilter.Rd | no_license | lwaldron/simulatorZ | R | false | false | 2,477 | rd | \name{geneFilter}
\alias{geneFilter}
\title{geneFilter}
\description{the function to filter genes by Intergrative Correlation}
\usage{geneFilter(obj, cor.cutoff = 0.5)}
\arguments{
\item{obj}{a list of ExpressionSet, matrix or SummarizedExperiment objects. If its elements are matrices,
columns represent samples, rows represent genes}
\item{cor.cutoff}{the cutoff threshold for filtering genes. Only when the integrative correlation
between every pair of sets is larger than the cutoff value, will the gene
be selected.}
}
\value{returns a list of ExpressionSets matrix or SummarizedExperiment objects
with genes filtered }
\references{Garrett-Mayer, E., Parmigiani, G., Zhong, X., Cope, L.,
Gabrielson, E., Cross-study validation and combined analysis of gene
expression microarray data. Biostatistics. 2008 Apr;9(2):333-354.}
\author{Yuqing Zhang, Christoph Bernau, Levi Waldron}
\examples{
set.seed(8)
library(curatedOvarianData)
library(GenomicRanges)
source(system.file("extdata", "patientselection.config",
package="curatedOvarianData"))
source(system.file("extdata", "createEsetList.R", package="curatedOvarianData"))
esets.list <- lapply(esets, function(eset){
return(eset[1:1500, 1:10])
})
esets.list <- esets.list[1:5]
result.set <- geneFilter(esets.list, 0)
result.set
### as we cannot calculate correlation with one set, this function just
### delivers the same set if esets has length 1
result.oneset <- geneFilter(esets.list[1])
result.oneset
## Support matrices
X.list <- lapply(esets.list, function(eset){
return(exprs(eset)) ## Columns represent samples!
})
result.set <- geneFilter(X.list, 0)
dim(result.set[[1]])
## Support SummarizedExperiment
nrows <- 200; ncols <- 6
counts <- matrix(runif(nrows * ncols, 1, 1e4), nrows)
rowData <- GRanges(rep(c("chr1", "chr2"), c(50, 150)),
IRanges(floor(runif(200, 1e5, 1e6)), width=100),
strand=sample(c("+", "-"), 200, TRUE))
colData <- DataFrame(Treatment=rep(c("ChIP", "Input"), 3),
row.names=LETTERS[1:6])
sset <- SummarizedExperiment(assays=SimpleList(counts=counts),
rowData=rowData, colData=colData)
s.list <- list(sset, sset)
result.set <- geneFilter(s.list, 0.9)
## the same set should resemble each other, no genes filtered
dim(assay(result.set[[1]]))
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get_met_data.R
\name{get_met_data}
\alias{get_met_data}
\title{Get data from a meteorological station}
\usage{
get_met_data(station_id, years = NULL, full_data = FALSE,
add_fields = NULL, make_hourly = TRUE, local_file_dir = NULL)
}
\arguments{
\item{station_id}{A station identifier composed of the station's USAF and
WBAN numbers, separated by a hyphen.}
\item{years}{The years for which station met data will be collected. If not
specified then all records for all available years will be obtained for the
station.}
\item{full_data}{Include all additional meteorological data found in the
dataset's additional data section?}
\item{add_fields}{A vector of categories for additional meteorological data
to include (instead of all available categories).}
\item{make_hourly}{Transforms data to force values to the start of each hour.
All data is bucketed by hour and all missing hours are filled with \code{NA}s.
This regularizes each year of data, where the number of records per year of
data will be either 8760 or 8784 (depending on whether a year is a leap
year). By default to this is \code{TRUE}.}
\item{local_file_dir}{Path to local meteorological data files. If specified,
then data files will be downloaded to and retrieved from this location and
not from the remote data store.}
}
\value{
Returns a tibble with at least 10 variables. While times are recorded
using the Universal Time Code (UTC) in the source data, they are adjusted
here to local standard time for the station's locale.
\describe{
\item{id}{A character string identifying the fixed weather station
from the USAF Master Station Catalog identifier and the WBAN identifier.}
\item{time}{A datetime value representing the observation time.}
\item{temp}{Air temperature measured in degrees Celsius. Conversions to
degrees Fahrenheit may be calculated with \code{(temp * 9) / 5 + 32}.}
\item{wd}{The angle of wind direction, measured in a clockwise direction,
between true north and the direction from which the wind is blowing. For
example, \code{wd = 90} indicates the wind is blowing from due east. \code{wd = 225}
indicates the wind is blowing from the south west. The minimum value is \code{1},
and the maximum value is \code{360}.}
\item{ws}{Wind speed in meters per second. Wind speed in feet per second can
be estimated by \code{ws * 3.28084}.}
\item{atmos_pres}{The air pressure in hectopascals relative to Mean Sea Level
(MSL).}
\item{dew_point}{The temperature in degrees Celsius to which a given parcel
of air must be cooled at constant pressure and water vapor content in order
for saturation to occur.}
\item{rh}{Relative humidity, measured as a percentage, as calculated using
the August-Roche-Magnus approximation.}
\item{ceil_hgt}{The height above ground level of the lowest cloud cover or
other obscuring phenomena amounting to at least 5/8 sky coverage. Measured in
meters. Unlimited height (no obstruction) is denoted by the value \code{22000}.}
\item{visibility}{The horizontal distance at which an object can be seen and
identified. Measured in meters. Values greater than \code{16000} are entered as
\code{16000} (which constitutes 10 mile visibility).}
}
}
\description{
Obtain one or more years of meteorological data for a particular station.
}
\examples{
\dontrun{
# Obtain two years of data from the
# met station with the ID value of
# "999999-63897"
met_data <-
get_met_data(
station_id = "999999-63897",
years = 2013:2014
)
}
}
| /man/get_met_data.Rd | permissive | dondealban/stationaRy | R | false | true | 3,529 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get_met_data.R
\name{get_met_data}
\alias{get_met_data}
\title{Get data from a meteorological station}
\usage{
get_met_data(station_id, years = NULL, full_data = FALSE,
add_fields = NULL, make_hourly = TRUE, local_file_dir = NULL)
}
\arguments{
\item{station_id}{A station identifier composed of the station's USAF and
WBAN numbers, separated by a hyphen.}
\item{years}{The years for which station met data will be collected. If not
specified then all records for all available years will be obtained for the
station.}
\item{full_data}{Include all additional meteorological data found in the
dataset's additional data section?}
\item{add_fields}{A vector of categories for additional meteorological data
to include (instead of all available categories).}
\item{make_hourly}{Transforms data to force values to the start of each hour.
All data is bucketed by hour and all missing hours are filled with \code{NA}s.
This regularizes each year of data, where the number of records per year of
data will be either 8760 or 8784 (depending on whether a year is a leap
year). By default to this is \code{TRUE}.}
\item{local_file_dir}{Path to local meteorological data files. If specified,
then data files will be downloaded to and retrieved from this location and
not from the remote data store.}
}
\value{
Returns a tibble with at least 10 variables. While times are recorded
using the Universal Time Code (UTC) in the source data, they are adjusted
here to local standard time for the station's locale.
\describe{
\item{id}{A character string identifying the fixed weather station
from the USAF Master Station Catalog identifier and the WBAN identifier.}
\item{time}{A datetime value representing the observation time.}
\item{temp}{Air temperature measured in degrees Celsius. Conversions to
degrees Fahrenheit may be calculated with \code{(temp * 9) / 5 + 32}.}
\item{wd}{The angle of wind direction, measured in a clockwise direction,
between true north and the direction from which the wind is blowing. For
example, \code{wd = 90} indicates the wind is blowing from due east. \code{wd = 225}
indicates the wind is blowing from the south west. The minimum value is \code{1},
and the maximum value is \code{360}.}
\item{ws}{Wind speed in meters per second. Wind speed in feet per second can
be estimated by \code{ws * 3.28084}.}
\item{atmos_pres}{The air pressure in hectopascals relative to Mean Sea Level
(MSL).}
\item{dew_point}{The temperature in degrees Celsius to which a given parcel
of air must be cooled at constant pressure and water vapor content in order
for saturation to occur.}
\item{rh}{Relative humidity, measured as a percentage, as calculated using
the August-Roche-Magnus approximation.}
\item{ceil_hgt}{The height above ground level of the lowest cloud cover or
other obscuring phenomena amounting to at least 5/8 sky coverage. Measured in
meters. Unlimited height (no obstruction) is denoted by the value \code{22000}.}
\item{visibility}{The horizontal distance at which an object can be seen and
identified. Measured in meters. Values greater than \code{16000} are entered as
\code{16000} (which constitutes 10 mile visibility).}
}
}
\description{
Obtain one or more years of meteorological data for a particular station.
}
\examples{
\dontrun{
# Obtain two years of data from the
# met station with the ID value of
# "999999-63897"
met_data <-
get_met_data(
station_id = "999999-63897",
years = 2013:2014
)
}
}
|
# Read data in R
# Note that data is separated by ;
electric <- read.table("household_power_consumption.txt", sep = ";", header = TRUE,na.strings ="?")
# convert date
electric$Date <- as.Date(electric$Date,"%d/%m/%Y")
# convert time
electric$datetime <- paste(electric$Date, electric$Time)
electric$Time <- strptime(electric$datetime, format = "%Y-%m-%d %H:%M:%S")
# Subset data to the dates 2007-02-01 and 2007-02-02
date1 <- as.Date("2007-02-01")
date2 <- as.Date("2007-02-02")
electric_subset <- subset(electric, Date >= date1 & Date <= date2)
# Create first plot
png(file = "plot1.png", width = 480, height = 480)
hist(electric_subset$Global_active_power, main = "Global Active Power", xlab = "Global Active Power (kilowatts)", col = "red")
dev.off()
| /plot1.R | no_license | davidrip83/ExData_Plotting1 | R | false | false | 761 | r | # Read data in R
# Note that data is separated by ;
electric <- read.table("household_power_consumption.txt", sep = ";", header = TRUE,na.strings ="?")
# convert date
electric$Date <- as.Date(electric$Date,"%d/%m/%Y")
# convert time
electric$datetime <- paste(electric$Date, electric$Time)
electric$Time <- strptime(electric$datetime, format = "%Y-%m-%d %H:%M:%S")
# Subset data to the dates 2007-02-01 and 2007-02-02
date1 <- as.Date("2007-02-01")
date2 <- as.Date("2007-02-02")
electric_subset <- subset(electric, Date >= date1 & Date <= date2)
# Create first plot
png(file = "plot1.png", width = 480, height = 480)
hist(electric_subset$Global_active_power, main = "Global Active Power", xlab = "Global Active Power (kilowatts)", col = "red")
dev.off()
|
m<-lm(formula=Satisfaction~Age+Gender+Airline.Status+Price.Sensitivity+Year.of.First.Flight+
No.of.Flights.p.a.+Percentage.of.Flight.with.other.Airlines+Type.of.Travel+
No.of.other.Loyalty.Cards+Shopping.Amount.at.Airport+Eating.and.Drinking.at.Airport+Class+
Day.of.Month+Scheduled.Departure.Hour+
Departure.Delay.in.Minutes+Arrival.Delay.in.Minutes+Flight.time.in.minutes+
Flight.Distance,data=satisfactionSurvey)
summary(m)
| /LinearModelling.R | no_license | fall2018-saltz/group2_ist687_ec | R | false | false | 504 | r |
m<-lm(formula=Satisfaction~Age+Gender+Airline.Status+Price.Sensitivity+Year.of.First.Flight+
No.of.Flights.p.a.+Percentage.of.Flight.with.other.Airlines+Type.of.Travel+
No.of.other.Loyalty.Cards+Shopping.Amount.at.Airport+Eating.and.Drinking.at.Airport+Class+
Day.of.Month+Scheduled.Departure.Hour+
Departure.Delay.in.Minutes+Arrival.Delay.in.Minutes+Flight.time.in.minutes+
Flight.Distance,data=satisfactionSurvey)
summary(m)
|
# Factor Sampling ---------------------------------------------------------
test_that("Sample TEF", {
set.seed(1234)
tef <- sample_tef(n = 10, params = list(1, 10, 100))
expect_is(tef, "list")
# ensure that the list has the required elements
expect_equal(names(tef), c("type", "samples", "details"))
# ensure that the samples matches the number requested
expect_equal(length(tef$samples), 10)
# ensure that TEF values are returned as integers
expect_is(tef$samples, "integer")
# ensure that values of samples is correct
expect_equal(unlist(tef$samples),
c(7, 30, 2, 34, 36, 13, 14, 14, 9, 15))
})
context("Sample DIFF")
test_that("Sample DIFF", {
set.seed(1234)
dat <- sample_diff(n = 10, params = list(50, 70, 75, 3))
expect_is(dat, "list")
# ensure that the list has the required elements
expect_equal(names(dat), c("type", "samples", "details"))
# ensure that the samples matches the number requested
expect_equal(length(dat$samples), 10)
# ensure that values of samples is correct
expect_equal(signif(unlist(dat$samples), digits = 4),
signif(c(72.5519454551502, 65.1852603020272, 59.1564180836877,
74.5816023178688, 64.1192226440207, 63.561355776164,
70.1284833577168, 69.9960887031119, 70.0802721600923,
71.4683219144408), digits = 4))
})
test_that("Multi control diff works", {
set.seed(1234)
diff_estimates <- list(list(min = 1, mode = 10, max = 20, shape = 1),
list(min = 2, mode = 15, max = 100, shape = 3))
dat <- map(diff_estimates, ~sample_diff(n = 10, params = .x))
expect_is(dat, "list")
# ensure that we received two responses back
expect_equal(length(dat), 2)
# ensure that each list has the required elements
expect_equal(names(dat[[1]]), c("type", "samples", "details"))
expect_equal(names(dat[[2]]), c("type", "samples", "details"))
# ensure that the samples matches the number requested
expect_equal(length(map(dat, "samples") %>% purrr::as_vector()), 20)
# ensure that values of samples is correct
expect_equal(signif(map(dat, "samples") %>% purrr::as_vector(), digits = 4),
signif(c(3.90393636733577, 12.0433334741972, 16.4016497257758,
1.40316316329966, 12.9556613644064, 13.407051718503,
6.90857118330148, 7.06525326308045, 6.96573684805713,
5.27395602706608, 19.325236233819, 11.5371092097717,
14.4718568402617, 30.9888681331648, 57.8265729409247,
26.8078941229072, 18.7219717579054, 12.6241144280687,
13.6123515927865, 32.76590164781
), digits = 4))
})
context("Sample TC")
test_that("Sample TC", {
set.seed(1234)
tc <- sample_tc(n = 10, params = list(50, 75, 100, 4))
expect_is(tc, "list")
# ensure that the list has the required elements
expect_equal(names(tc), c("type", "samples", "details"))
# ensure that the samples matches the number requested
expect_equal(length(tc$samples), 10)
# ensure that values of samples is correct
expect_equal(signif(unlist(tc$samples), digits = 4),
signif(c(61.7026564773373, 78.188740471894, 87.0623477417219,
53.1987199785052, 79.9184628308895, 80.7889924652588,
68.4387021948896, 68.7541469869603, 68.554057026653,
64.9764652390671), digits = 4))
})
context("Select Loss Opportunities")
test_that("Select loss opportunites handles iterations with zero loss events", {
threat_strengths <- c(0.2, 0.3, 0.4)
diff_strengths <- c(0.3, 0.4, 0.5)
dat <- select_loss_opportunities(threat_strengths, diff_strengths)
expect_equal(dat$details$mean_diff_exceedance, 0.1)
expect_equal(all(is.logical(dat$samples)), TRUE)
expect_equal(sum(dat$samples), 0)
})
context("Sample VULN")
test_that("Sample VULN works with binom", {
set.seed(1234)
dat <- sample_vuln(n = 10, params = list(1, .5))
expect_is(dat, "list")
# ensure that the list has the required elements
expect_equal(names(dat), c("type", "samples", "details"))
# ensure that the samples matches the number requested
expect_equal(length(dat$samples), 10)
# ensure that values of samples is correct
expect_equal(sum(dat$samples), 7)
})
test_that("Sample VULN works with TC and DIFF", {
set.seed(1234)
tc <- sample_tc(n = 10, params = list(50, 70, 85, 2))$samples
diff <- sample_diff(n = 10, params = list(50, 70, 85, 2))$samples
dat <- sample_vuln(n = 10, .func = "evaluator::select_loss_opportunities", params = list(tc = tc, diff = diff))
expect_is(dat, "list")
# ensure that the list has the required elements
expect_equivalent(names(dat), c("type", "samples", "details"))
# ensure that the samples matches the number requested
expect_equivalent(length(dat$samples), 10)
# ensure that values of samples is correct
expect_equivalent(sum(dat$samples), 5)
# ensure that mean_tc_exceedance is set correctly
expect_equivalent(floor(dat$details$mean_tc_exceedance), 7)
# ensure that mean_diff_exceedance is set correctly
expect_equivalent(floor(dat$details$mean_diff_exceedance), 8)
})
test_that("TC and DIFF exceedance handles NA threat events", {
set.seed(1234)
tc <- c(NA)
diff <- sample_diff(n = 2, params = list(50, 70, 85, 2))$samples
dat <- sample_vuln(n = 2, .func = "evaluator::select_loss_opportunities", params = list(tc = tc, diff = diff))
expect_is(dat, "list")
# ensure that mean_tc_exceedance is set correctly
expect_equivalent(dat$details$mean_tc_exceedance, NA)
# ensure that mean_diff_exceedance is set correctly
expect_equivalent(dat$details$mean_diff_exceedance, NA)
})
context("Sample LM")
test_that("Sample LM", {
set.seed(1234)
lm <- sample_lm(n = 10, params = list(min = 1*10^4, mode = 5*10^4,
max = 1*10^7, shape = 3))
expect_is(lm, "list")
# ensure that the list has the required elements
expect_equal(names(lm), c("type", "samples", "details"))
# ensure that the samples matches the number requested
expect_equal(length(lm$samples), 10)
# ensure that values of samples is correct
expect_equal(signif(unlist(lm$samples), digits = 4),
signif(c(332422.727880636, 2831751.79415706, 35602.2608120876,
3349352.73654269, 3632631.71769846, 927503.010814968,
966756.805719722, 941718.366417413, 569057.598433507,
1069488.76293628), digits = 4))
})
test_that("Non-standard distributions work as expected", {
set.seed(1234)
lm <- sample_lm(.func = "EnvStats::rlnormTrunc", n = 10,
params = list(meanlog = 1, sdlog = 2, min = 1, max = 2))
expect_is(lm, "list")
# ensure that the list has the required elements
expect_equal(names(lm), c("type", "samples", "details"))
# ensure that the samples matches the number requested
expect_equal(length(lm$samples), 10)
# ensure that values of samples is correct
expect_equal(signif(unlist(lm$samples), digits = 4),
signif(c(1.087017, 1.552746, 1.539039, 1.553887, 1.823434,
1.571874, 1.007058, 1.184094, 1.599599, 1.442124),
digits = 4))
})
context("Sample LEF")
test_that("Sample LEF works with composition function", {
set.seed(1234)
tef <- sample_tef(n = 10, params = list(1, 10, 20))
vuln <- sample_vuln(n = 10, params = list(1, .6))
dat <- sample_lef(n = 10, .func = "evaluator::compare_tef_vuln",
params = list(tef = tef$samples, vuln = vuln$samples))
expect_is(dat, "list")
# ensure that the list has the required elements
expect_equal(names(dat), c("type", "samples", "details"))
# ensure that the samples matches the number requested
expect_equal(length(dat$samples), 10)
# ensure that LEF samples are always integers
expect_is(dat$samples, "integer")
# ensure that values of samples is correct
expect_equal(dat$samples, c(5, 11, 15, 2, 12, 0, 8, 0, 0, 6))
})
# Model Tests -------------------------------------------------------------
context("LM simulation model")
test_that("Default simulation model returns expected results", {
scen <- tidyrisk_scenario(
tef_params = list(func = "mc2d::rpert", min = 1, mode = 10, max = 100, shape = 4),
tc_params = list(func = "mc2d::rpert", min = 1, mode = 10, max = 75, shape = 100),
lm_params = list(func = "mc2d::rpert", min = 1, mode = 100, max = 10000, shape = 54),
diff_params = list(list(func = "mc2d::rpert", min = 1, mode = 10, max = 50, shape = 4)))
results <- run_simulation(scen, iterations = 100)
expect_s3_class(results, "tbl_df")
expect_equal(nrow(results), 100)
expect_equal(length(results), 11)
expect_equal(sum(results$threat_events), 2287)
expect_equal(sum(results$loss_events), 786)
})
context("PLM-SR simulation model")
test_that("SR model works as expected", {
scenario <- structure(list(scenario_id = "1",
scenario = "Inadequate human resources are available to execute the informaton security strategic security plan.",
tcomm = "Organizational Leadership", domain_id = "ORG",
controls = "1, 5, 7, 32, 14, 15, 16",
diff_params = list(list(list(func = "mc2d::rpert", min = 70L, mode = 85L, max = 98L, shape = 4L),
list(func = "mc2d::rpert", min = 50L, mode = 70L, max = 84L, shape = 4L),
list(func = "mc2d::rpert", min = 0L, mode = 10L, max = 30L, shape = 4L),
list(func = "mc2d::rpert", min = 50L, mode = 70L, max = 84L, shape = 4L),
list(func = "mc2d::rpert", min = 20L, mode = 30L, max = 50L, shape = 4L),
list(func = "mc2d::rpert", min = 20L, mode = 30L, max = 50L, shape = 4L),
list(func = "mc2d::rpert", min = 50L, mode = 70L, max = 84L, shape = 4L))),
tef_params = list(list(func = "mc2d::rpert", min = 10L, mode = 24, max = 52L, shape = 4L)),
tc_params = list(list(func = "mc2d::rpert", min = 33L, mode = 50, max = 60L, shape = 3L)),
plm_params = list(list(func = "mc2d::rpert", min = 10000L, mode = 20000, max = 500000L, shape = 4L)),
sr_params = list(list(func = "mc2d::rpert", min = 10000L, mode = 20000, max = 500000L, shape = 4L)),
model = "openfair_tef_tc_diff_plm_sr"), row.names = c(NA, -1L),
class = c("tbl_df", "tbl", "data.frame"))
scenario <- scenario %>%
mutate(scenario = pmap(list(tef_params = tef_params, tc_params = tc_params,
diff_params = diff_params, plm_params = plm_params,
sr_params = sr_params, model = model), tidyrisk_scenario))
results <- run_simulation(scenario[[1, "scenario"]], 100L)
expect_s3_class(results, "tbl_df")
expect_equal(nrow(results), 100)
expect_equal(length(results), 11)
expect_equal(sum(results$threat_events), 2686)
expect_equivalent(stats::quantile(results$ale, 0.95), 2792183, tolerance = 0.1)
expect_equal(sum(results$loss_events), 772)
})
| /tests/testthat/test-openfair.R | permissive | abdullahalbyati/evaluator | R | false | false | 11,489 | r |
# Factor Sampling ---------------------------------------------------------
test_that("Sample TEF", {
set.seed(1234)
tef <- sample_tef(n = 10, params = list(1, 10, 100))
expect_is(tef, "list")
# ensure that the list has the required elements
expect_equal(names(tef), c("type", "samples", "details"))
# ensure that the samples matches the number requested
expect_equal(length(tef$samples), 10)
# ensure that TEF values are returned as integers
expect_is(tef$samples, "integer")
# ensure that values of samples is correct
expect_equal(unlist(tef$samples),
c(7, 30, 2, 34, 36, 13, 14, 14, 9, 15))
})
context("Sample DIFF")
test_that("Sample DIFF", {
set.seed(1234)
dat <- sample_diff(n = 10, params = list(50, 70, 75, 3))
expect_is(dat, "list")
# ensure that the list has the required elements
expect_equal(names(dat), c("type", "samples", "details"))
# ensure that the samples matches the number requested
expect_equal(length(dat$samples), 10)
# ensure that values of samples is correct
expect_equal(signif(unlist(dat$samples), digits = 4),
signif(c(72.5519454551502, 65.1852603020272, 59.1564180836877,
74.5816023178688, 64.1192226440207, 63.561355776164,
70.1284833577168, 69.9960887031119, 70.0802721600923,
71.4683219144408), digits = 4))
})
test_that("Multi control diff works", {
set.seed(1234)
diff_estimates <- list(list(min = 1, mode = 10, max = 20, shape = 1),
list(min = 2, mode = 15, max = 100, shape = 3))
dat <- map(diff_estimates, ~sample_diff(n = 10, params = .x))
expect_is(dat, "list")
# ensure that we received two responses back
expect_equal(length(dat), 2)
# ensure that each list has the required elements
expect_equal(names(dat[[1]]), c("type", "samples", "details"))
expect_equal(names(dat[[2]]), c("type", "samples", "details"))
# ensure that the samples matches the number requested
expect_equal(length(map(dat, "samples") %>% purrr::as_vector()), 20)
# ensure that values of samples is correct
expect_equal(signif(map(dat, "samples") %>% purrr::as_vector(), digits = 4),
signif(c(3.90393636733577, 12.0433334741972, 16.4016497257758,
1.40316316329966, 12.9556613644064, 13.407051718503,
6.90857118330148, 7.06525326308045, 6.96573684805713,
5.27395602706608, 19.325236233819, 11.5371092097717,
14.4718568402617, 30.9888681331648, 57.8265729409247,
26.8078941229072, 18.7219717579054, 12.6241144280687,
13.6123515927865, 32.76590164781
), digits = 4))
})
context("Sample TC")
test_that("Sample TC", {
set.seed(1234)
tc <- sample_tc(n = 10, params = list(50, 75, 100, 4))
expect_is(tc, "list")
# ensure that the list has the required elements
expect_equal(names(tc), c("type", "samples", "details"))
# ensure that the samples matches the number requested
expect_equal(length(tc$samples), 10)
# ensure that values of samples is correct
expect_equal(signif(unlist(tc$samples), digits = 4),
signif(c(61.7026564773373, 78.188740471894, 87.0623477417219,
53.1987199785052, 79.9184628308895, 80.7889924652588,
68.4387021948896, 68.7541469869603, 68.554057026653,
64.9764652390671), digits = 4))
})
context("Select Loss Opportunities")
test_that("Select loss opportunites handles iterations with zero loss events", {
threat_strengths <- c(0.2, 0.3, 0.4)
diff_strengths <- c(0.3, 0.4, 0.5)
dat <- select_loss_opportunities(threat_strengths, diff_strengths)
expect_equal(dat$details$mean_diff_exceedance, 0.1)
expect_equal(all(is.logical(dat$samples)), TRUE)
expect_equal(sum(dat$samples), 0)
})
context("Sample VULN")
test_that("Sample VULN works with binom", {
set.seed(1234)
dat <- sample_vuln(n = 10, params = list(1, .5))
expect_is(dat, "list")
# ensure that the list has the required elements
expect_equal(names(dat), c("type", "samples", "details"))
# ensure that the samples matches the number requested
expect_equal(length(dat$samples), 10)
# ensure that values of samples is correct
expect_equal(sum(dat$samples), 7)
})
test_that("Sample VULN works with TC and DIFF", {
set.seed(1234)
tc <- sample_tc(n = 10, params = list(50, 70, 85, 2))$samples
diff <- sample_diff(n = 10, params = list(50, 70, 85, 2))$samples
dat <- sample_vuln(n = 10, .func = "evaluator::select_loss_opportunities", params = list(tc = tc, diff = diff))
expect_is(dat, "list")
# ensure that the list has the required elements
expect_equivalent(names(dat), c("type", "samples", "details"))
# ensure that the samples matches the number requested
expect_equivalent(length(dat$samples), 10)
# ensure that values of samples is correct
expect_equivalent(sum(dat$samples), 5)
# ensure that mean_tc_exceedance is set correctly
expect_equivalent(floor(dat$details$mean_tc_exceedance), 7)
# ensure that mean_diff_exceedance is set correctly
expect_equivalent(floor(dat$details$mean_diff_exceedance), 8)
})
test_that("TC and DIFF exceedance handles NA threat events", {
set.seed(1234)
tc <- c(NA)
diff <- sample_diff(n = 2, params = list(50, 70, 85, 2))$samples
dat <- sample_vuln(n = 2, .func = "evaluator::select_loss_opportunities", params = list(tc = tc, diff = diff))
expect_is(dat, "list")
# ensure that mean_tc_exceedance is set correctly
expect_equivalent(dat$details$mean_tc_exceedance, NA)
# ensure that mean_diff_exceedance is set correctly
expect_equivalent(dat$details$mean_diff_exceedance, NA)
})
context("Sample LM")
test_that("Sample LM", {
set.seed(1234)
lm <- sample_lm(n = 10, params = list(min = 1*10^4, mode = 5*10^4,
max = 1*10^7, shape = 3))
expect_is(lm, "list")
# ensure that the list has the required elements
expect_equal(names(lm), c("type", "samples", "details"))
# ensure that the samples matches the number requested
expect_equal(length(lm$samples), 10)
# ensure that values of samples is correct
expect_equal(signif(unlist(lm$samples), digits = 4),
signif(c(332422.727880636, 2831751.79415706, 35602.2608120876,
3349352.73654269, 3632631.71769846, 927503.010814968,
966756.805719722, 941718.366417413, 569057.598433507,
1069488.76293628), digits = 4))
})
test_that("Non-standard distributions work as expected", {
set.seed(1234)
lm <- sample_lm(.func = "EnvStats::rlnormTrunc", n = 10,
params = list(meanlog = 1, sdlog = 2, min = 1, max = 2))
expect_is(lm, "list")
# ensure that the list has the required elements
expect_equal(names(lm), c("type", "samples", "details"))
# ensure that the samples matches the number requested
expect_equal(length(lm$samples), 10)
# ensure that values of samples is correct
expect_equal(signif(unlist(lm$samples), digits = 4),
signif(c(1.087017, 1.552746, 1.539039, 1.553887, 1.823434,
1.571874, 1.007058, 1.184094, 1.599599, 1.442124),
digits = 4))
})
context("Sample LEF")
test_that("Sample LEF works with composition function", {
set.seed(1234)
tef <- sample_tef(n = 10, params = list(1, 10, 20))
vuln <- sample_vuln(n = 10, params = list(1, .6))
dat <- sample_lef(n = 10, .func = "evaluator::compare_tef_vuln",
params = list(tef = tef$samples, vuln = vuln$samples))
expect_is(dat, "list")
# ensure that the list has the required elements
expect_equal(names(dat), c("type", "samples", "details"))
# ensure that the samples matches the number requested
expect_equal(length(dat$samples), 10)
# ensure that LEF samples are always integers
expect_is(dat$samples, "integer")
# ensure that values of samples is correct
expect_equal(dat$samples, c(5, 11, 15, 2, 12, 0, 8, 0, 0, 6))
})
# Model Tests -------------------------------------------------------------
context("LM simulation model")
test_that("Default simulation model returns expected results", {
scen <- tidyrisk_scenario(
tef_params = list(func = "mc2d::rpert", min = 1, mode = 10, max = 100, shape = 4),
tc_params = list(func = "mc2d::rpert", min = 1, mode = 10, max = 75, shape = 100),
lm_params = list(func = "mc2d::rpert", min = 1, mode = 100, max = 10000, shape = 54),
diff_params = list(list(func = "mc2d::rpert", min = 1, mode = 10, max = 50, shape = 4)))
results <- run_simulation(scen, iterations = 100)
expect_s3_class(results, "tbl_df")
expect_equal(nrow(results), 100)
expect_equal(length(results), 11)
expect_equal(sum(results$threat_events), 2287)
expect_equal(sum(results$loss_events), 786)
})
context("PLM-SR simulation model")
test_that("SR model works as expected", {
scenario <- structure(list(scenario_id = "1",
scenario = "Inadequate human resources are available to execute the informaton security strategic security plan.",
tcomm = "Organizational Leadership", domain_id = "ORG",
controls = "1, 5, 7, 32, 14, 15, 16",
diff_params = list(list(list(func = "mc2d::rpert", min = 70L, mode = 85L, max = 98L, shape = 4L),
list(func = "mc2d::rpert", min = 50L, mode = 70L, max = 84L, shape = 4L),
list(func = "mc2d::rpert", min = 0L, mode = 10L, max = 30L, shape = 4L),
list(func = "mc2d::rpert", min = 50L, mode = 70L, max = 84L, shape = 4L),
list(func = "mc2d::rpert", min = 20L, mode = 30L, max = 50L, shape = 4L),
list(func = "mc2d::rpert", min = 20L, mode = 30L, max = 50L, shape = 4L),
list(func = "mc2d::rpert", min = 50L, mode = 70L, max = 84L, shape = 4L))),
tef_params = list(list(func = "mc2d::rpert", min = 10L, mode = 24, max = 52L, shape = 4L)),
tc_params = list(list(func = "mc2d::rpert", min = 33L, mode = 50, max = 60L, shape = 3L)),
plm_params = list(list(func = "mc2d::rpert", min = 10000L, mode = 20000, max = 500000L, shape = 4L)),
sr_params = list(list(func = "mc2d::rpert", min = 10000L, mode = 20000, max = 500000L, shape = 4L)),
model = "openfair_tef_tc_diff_plm_sr"), row.names = c(NA, -1L),
class = c("tbl_df", "tbl", "data.frame"))
scenario <- scenario %>%
mutate(scenario = pmap(list(tef_params = tef_params, tc_params = tc_params,
diff_params = diff_params, plm_params = plm_params,
sr_params = sr_params, model = model), tidyrisk_scenario))
results <- run_simulation(scenario[[1, "scenario"]], 100L)
expect_s3_class(results, "tbl_df")
expect_equal(nrow(results), 100)
expect_equal(length(results), 11)
expect_equal(sum(results$threat_events), 2686)
expect_equivalent(stats::quantile(results$ale, 0.95), 2792183, tolerance = 0.1)
expect_equal(sum(results$loss_events), 772)
})
|
% Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/plot.versionTest.R
\name{plot.versionTest}
\alias{plot.versionTest}
\title{plot.versionTest}
\usage{
\method{plot}{versionTest}(x, versions = setdiff(names(x), "test"))
}
\arguments{
\item{x}{a data frame of the type returned by compareTests()}
\item{versions}{Which software versions to include (default is all).}
}
\description{
plot.versionTest
}
\details{
Plots the results of compareTests() for an individual piece of software.
}
| /man/plot.versionTest.Rd | no_license | davetgerrard/kanute | R | false | false | 523 | rd | % Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/plot.versionTest.R
\name{plot.versionTest}
\alias{plot.versionTest}
\title{plot.versionTest}
\usage{
\method{plot}{versionTest}(x, versions = setdiff(names(x), "test"))
}
\arguments{
\item{x}{a data frame of the type returned by compareTests()}
\item{versions}{Which software versions to include (default is all).}
}
\description{
plot.versionTest
}
\details{
Plots the results of compareTests() for an individual piece of software.
}
|
funboot<-function(data,iter){
estar=matrix(NA,length(data[,1]),iter)
ystar=matrix(NA,length(data[,1]),iter)
u=matrix(NA,dim(data)[2],iter)
bsresvar <- rep(NA,iter)
w<-(1/abs(lasso(y~.,data)$coef))[-1]
if(sum(w!=Inf)==0) w<-NULL
LASSO<-lasso( y ~ . , data=data, weights=w )
coef<-(LASSO$coef)[-1]
prediction<-as.matrix(LASSO$y-LASSO$res)
residu<-(LASSO$res-mean(LASSO$res))
for (i in 1:iter){
estar[,i]<-as.matrix(sample(residu,length(residu),replace = T))
ystar[,i]<-prediction+estar[,i]
z<-ystar[,i]
data2<-data.frame(z,data[,-1])
v<-(1/abs(lasso(z~ . , data2)$coef))[-1]
if(sum(v!=Inf)==0) v<-NULL
lstar <- lasso(z ~ . , data2 , weights=v)
u[,i]<-as.matrix(lstar$coef)
bsresvar[i] <- var(lstar$res)
}
return(list(u[-1,],coef,var(residu),bsresvar))
}
| /Functions/fun.R | no_license | Allisterh/Forecasting-ProjetEZ | R | false | false | 851 | r |
funboot<-function(data,iter){
estar=matrix(NA,length(data[,1]),iter)
ystar=matrix(NA,length(data[,1]),iter)
u=matrix(NA,dim(data)[2],iter)
bsresvar <- rep(NA,iter)
w<-(1/abs(lasso(y~.,data)$coef))[-1]
if(sum(w!=Inf)==0) w<-NULL
LASSO<-lasso( y ~ . , data=data, weights=w )
coef<-(LASSO$coef)[-1]
prediction<-as.matrix(LASSO$y-LASSO$res)
residu<-(LASSO$res-mean(LASSO$res))
for (i in 1:iter){
estar[,i]<-as.matrix(sample(residu,length(residu),replace = T))
ystar[,i]<-prediction+estar[,i]
z<-ystar[,i]
data2<-data.frame(z,data[,-1])
v<-(1/abs(lasso(z~ . , data2)$coef))[-1]
if(sum(v!=Inf)==0) v<-NULL
lstar <- lasso(z ~ . , data2 , weights=v)
u[,i]<-as.matrix(lstar$coef)
bsresvar[i] <- var(lstar$res)
}
return(list(u[-1,],coef,var(residu),bsresvar))
}
|
#
# This is a Shiny web application. You can run the application by clicking
# the 'Run App' button above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
source("funcoes.R", encoding = 'UTF-8')
source("texto.R", encoding = 'UTF-8')
ui <- fluidPage(
navbarPage(
title = 'Estimando Variáveis Latentes',
tabPanel(
'Análise Exploratória com Grafos',
tabsetPanel(tabPanel('Explore a AEG',
fluidRow(
sidebarPanel(
wellPanel(
h4('Estimação do Modelo'),
selectInput("model",
"Estimação do modelo",
c('glasso', 'TMFG'),
'glasso'),
selectInput('algorithm',
'Algoritmo',
c('walktrap', 'louvain'),
'walktrap'),
conditionalPanel("input.model == 'glasso'",
h5(strong(
'Você escolheu glasso'
)),
h6(strong(
'glasso é um algoritmo...'
))),
conditionalPanel("input.model == 'TMFG'",
h5(strong('Você escolheu TMFG')),
h6(strong(
'TMFG é um algoritmo...'
)))
)
),
mainPanel(plotOutput('graph'))
)),
tabPanel('O que é a AEG?',
mainPanel(texto_ega)))),
tabPanel('Análise Fatorial Exploratória',
tabsetPanel(
# Painel 1 - Figura
tabPanel(
'Explore a AFE',
fluidRow(
sidebarPanel(
wellPanel(
h4('Estimação da EFA'),
selectInput("model",
"Estimação do modelo",
c('isso', 'aquilo'),
'glasso'),
selectInput('algorithm',
'Algoritmo',
c('walktrap', 'louvain'),
'walktrap'),
conditionalPanel("input.model == 'isso'",
h5(strong('Você escolheu isso')),
h6(strong(
'isso é um algoritmo...'
))),
conditionalPanel("input.model == 'aquilo'",
h5(strong(
'Você escolheu aquilo'
)),
h6(strong(
'aquilo é um algoritmo...'
)))
)
),
mainPanel(
plotOutput('efa'))
)),
# Painel 2 - Texto
tabPanel('O que é a AFE?',
mainPanel('textotextotexto'))
)))
)
# Define server logic required to draw a histogram
server <- function(input, output) {
# EFA ====
output$efa <- renderPlot({
efa_plot()
})
# EGA ====
output$graph <- renderPlot({
ega_plot(modelo = input$model,
algoritmo = input$algorithm)
})
}
# Run the application
shinyApp(ui = ui, server = server) | /testes_de_app.R | permissive | GabrielReisR/est_lat_var_por_shiny | R | false | false | 4,377 | r | #
# This is a Shiny web application. You can run the application by clicking
# the 'Run App' button above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
source("funcoes.R", encoding = 'UTF-8')
source("texto.R", encoding = 'UTF-8')
ui <- fluidPage(
navbarPage(
title = 'Estimando Variáveis Latentes',
tabPanel(
'Análise Exploratória com Grafos',
tabsetPanel(tabPanel('Explore a AEG',
fluidRow(
sidebarPanel(
wellPanel(
h4('Estimação do Modelo'),
selectInput("model",
"Estimação do modelo",
c('glasso', 'TMFG'),
'glasso'),
selectInput('algorithm',
'Algoritmo',
c('walktrap', 'louvain'),
'walktrap'),
conditionalPanel("input.model == 'glasso'",
h5(strong(
'Você escolheu glasso'
)),
h6(strong(
'glasso é um algoritmo...'
))),
conditionalPanel("input.model == 'TMFG'",
h5(strong('Você escolheu TMFG')),
h6(strong(
'TMFG é um algoritmo...'
)))
)
),
mainPanel(plotOutput('graph'))
)),
tabPanel('O que é a AEG?',
mainPanel(texto_ega)))),
tabPanel('Análise Fatorial Exploratória',
tabsetPanel(
# Painel 1 - Figura
tabPanel(
'Explore a AFE',
fluidRow(
sidebarPanel(
wellPanel(
h4('Estimação da EFA'),
selectInput("model",
"Estimação do modelo",
c('isso', 'aquilo'),
'glasso'),
selectInput('algorithm',
'Algoritmo',
c('walktrap', 'louvain'),
'walktrap'),
conditionalPanel("input.model == 'isso'",
h5(strong('Você escolheu isso')),
h6(strong(
'isso é um algoritmo...'
))),
conditionalPanel("input.model == 'aquilo'",
h5(strong(
'Você escolheu aquilo'
)),
h6(strong(
'aquilo é um algoritmo...'
)))
)
),
mainPanel(
plotOutput('efa'))
)),
# Painel 2 - Texto
tabPanel('O que é a AFE?',
mainPanel('textotextotexto'))
)))
)
# Define server logic required to draw a histogram
server <- function(input, output) {
# EFA ====
output$efa <- renderPlot({
efa_plot()
})
# EGA ====
output$graph <- renderPlot({
ega_plot(modelo = input$model,
algoritmo = input$algorithm)
})
}
# Run the application
shinyApp(ui = ui, server = server) |
df <- read.csv("./output/example.csv")
# Only use observations with no intervention
tmp <- subset(df, df$group == 'base')
# Cashflow per phase
tmp$cash0 <- tmp$revenue0 - tmp$cost0
tmp$cash1 <- tmp$revenue1 - tmp$cost1
tmp$cash2 <- tmp$revenue2 - tmp$cost2
tmp$cash3 <- tmp$revenue3 - tmp$cost3
tmp$cash4 <- tmp$revenue4 - tmp$cost4
tmp$cash5 <- tmp$revenue5 - tmp$cost5
tmp$cash6 <- tmp$revenue6 - tmp$cost6
tmp$cash7 <- tmp$revenue7 - tmp$cost7
tmp$cash8 <- tmp$revenue8 - tmp$cost8
tmp$cash9 <- tmp$revenue9 - tmp$cost9
tmp$cash10 <- tmp$revenue10 - tmp$cost10
tmp$cash11 <- tmp$revenue11 - tmp$cost11
tmp$cash12 <- tmp$revenue12 - tmp$cost12
tmp$cash13 <- tmp$revenue13 - tmp$cost13
tmp$cash14 <- tmp$revenue14 - tmp$cost14
tmp$cash15 <- tmp$revenue15 - tmp$cost15
# Cumulative cashflow per phase
tmp$ccash0 <- tmp$cash0
tmp$ccash1 <- tmp$cash1 + tmp$ccash0
tmp$ccash2 <- tmp$cash2 + tmp$ccash1
tmp$ccash3 <- tmp$cash3 + tmp$ccash2
tmp$ccash4 <- tmp$cash4 + tmp$ccash3
tmp$ccash5 <- tmp$cash5 + tmp$ccash4
tmp$ccash6 <- tmp$cash6 + tmp$ccash5
tmp$ccash7 <- tmp$cash7 + tmp$ccash6
tmp$ccash8 <- tmp$cash8 + tmp$ccash7
tmp$ccash9 <- tmp$cash9 + tmp$ccash8
tmp$ccash10 <- tmp$cash10 + tmp$ccash9
tmp$ccash11 <- tmp$cash11 + tmp$ccash10
tmp$ccash12 <- tmp$cash12 + tmp$ccash11
tmp$ccash13 <- tmp$cash13 + tmp$ccash12
tmp$ccash14 <- tmp$cash14 + tmp$ccash13
tmp$ccash15 <- tmp$cash15 + tmp$ccash14
boxplot(tmp$ccash0, tmp$ccash1, tmp$ccash2, tmp$ccash3, tmp$ccash4, tmp$ccash5, tmp$ccash6, tmp$ccash7, tmp$ccash8, tmp$ccash9, tmp$ccash10, tmp$ccash11, tmp$ccash12, tmp$ccash13, tmp$ccash14, tmp$ccash15,
las = 1,
xlab = 'Development followed by sales',
ylab = 'Cumulative cashflow')
axis(1, at = seq(1,17), labels = c('PC','P1','P2','P3','P4',seq(0,11)))
| /examples/multiple/stats/cumulative-cashflow-01.R | permissive | chrokh/mc-rndsim | R | false | false | 1,803 | r | df <- read.csv("./output/example.csv")
# Only use observations with no intervention
tmp <- subset(df, df$group == 'base')
# Cashflow per phase
tmp$cash0 <- tmp$revenue0 - tmp$cost0
tmp$cash1 <- tmp$revenue1 - tmp$cost1
tmp$cash2 <- tmp$revenue2 - tmp$cost2
tmp$cash3 <- tmp$revenue3 - tmp$cost3
tmp$cash4 <- tmp$revenue4 - tmp$cost4
tmp$cash5 <- tmp$revenue5 - tmp$cost5
tmp$cash6 <- tmp$revenue6 - tmp$cost6
tmp$cash7 <- tmp$revenue7 - tmp$cost7
tmp$cash8 <- tmp$revenue8 - tmp$cost8
tmp$cash9 <- tmp$revenue9 - tmp$cost9
tmp$cash10 <- tmp$revenue10 - tmp$cost10
tmp$cash11 <- tmp$revenue11 - tmp$cost11
tmp$cash12 <- tmp$revenue12 - tmp$cost12
tmp$cash13 <- tmp$revenue13 - tmp$cost13
tmp$cash14 <- tmp$revenue14 - tmp$cost14
tmp$cash15 <- tmp$revenue15 - tmp$cost15
# Cumulative cashflow per phase
tmp$ccash0 <- tmp$cash0
tmp$ccash1 <- tmp$cash1 + tmp$ccash0
tmp$ccash2 <- tmp$cash2 + tmp$ccash1
tmp$ccash3 <- tmp$cash3 + tmp$ccash2
tmp$ccash4 <- tmp$cash4 + tmp$ccash3
tmp$ccash5 <- tmp$cash5 + tmp$ccash4
tmp$ccash6 <- tmp$cash6 + tmp$ccash5
tmp$ccash7 <- tmp$cash7 + tmp$ccash6
tmp$ccash8 <- tmp$cash8 + tmp$ccash7
tmp$ccash9 <- tmp$cash9 + tmp$ccash8
tmp$ccash10 <- tmp$cash10 + tmp$ccash9
tmp$ccash11 <- tmp$cash11 + tmp$ccash10
tmp$ccash12 <- tmp$cash12 + tmp$ccash11
tmp$ccash13 <- tmp$cash13 + tmp$ccash12
tmp$ccash14 <- tmp$cash14 + tmp$ccash13
tmp$ccash15 <- tmp$cash15 + tmp$ccash14
boxplot(tmp$ccash0, tmp$ccash1, tmp$ccash2, tmp$ccash3, tmp$ccash4, tmp$ccash5, tmp$ccash6, tmp$ccash7, tmp$ccash8, tmp$ccash9, tmp$ccash10, tmp$ccash11, tmp$ccash12, tmp$ccash13, tmp$ccash14, tmp$ccash15,
las = 1,
xlab = 'Development followed by sales',
ylab = 'Cumulative cashflow')
axis(1, at = seq(1,17), labels = c('PC','P1','P2','P3','P4',seq(0,11)))
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/nodes.R
\name{build_ranged_derived_field_apply_node}
\alias{build_ranged_derived_field_apply_node}
\title{Build Apply node with interval nodes for DerivedField node.}
\usage{
build_ranged_derived_field_apply_node(var_details_row, db_name)
}
\arguments{
\item{var_details_row}{Variable details sheet row.}
\item{db_name}{Database name.}
}
\value{
Apply node with intervals for DerivedField node.
}
\description{
Build Apply node with interval nodes for DerivedField node.
}
| /man/build_ranged_derived_field_apply_node.Rd | permissive | cran/recodeflow | R | false | true | 571 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/nodes.R
\name{build_ranged_derived_field_apply_node}
\alias{build_ranged_derived_field_apply_node}
\title{Build Apply node with interval nodes for DerivedField node.}
\usage{
build_ranged_derived_field_apply_node(var_details_row, db_name)
}
\arguments{
\item{var_details_row}{Variable details sheet row.}
\item{db_name}{Database name.}
}
\value{
Apply node with intervals for DerivedField node.
}
\description{
Build Apply node with interval nodes for DerivedField node.
}
|
# Social capital and the success of economic sanctions
#
# Data Collection
# Variables - IV and DV
# Analysis
#
# Taehee whang
# 02/09/2016
# 06/12/2017
library(foreign)
library(haven)
## Data = sanctions + sc + KimYW_NGO data = sanctions_sc_KimYW_merge.dta
sanctions_sc_KimYW_merge <- read_dta("C:/Users/thwha/Dropbox/KimH_prj/data/sanctions_sc_KimYW_merge.dta")
attach(sanctions_sc_KimYW_merge)
## Analysis
## Descriptive statistics: All observations
summary.data.frame(sanctions_sc_KimYW_merge)
dim(sanctions_sc_KimYW_merge)
newdata <- data.frame(sanctions_sc_KimYW_merge$sanctionoutcome,
sanctions_sc_KimYW_merge$trust,
sanctions_sc_KimYW_merge$m_polparty1,
sanctions_sc_KimYW_merge$m_profassociation1,
sanctions_sc_KimYW_merge$m_humanrights1,
sanctions_sc_KimYW_merge$m_religious1,
sanctions_sc_KimYW_merge$c_government1,
sanctions_sc_KimYW_merge$c_parliament1,
sanctions_sc_KimYW_merge$c_polparty1,
sanctions_sc_KimYW_merge$c_justicelegalsyscourts1,
sanctions_sc_KimYW_merge$c_armedforces1,
sanctions_sc_KimYW_merge$c_churches1,
sanctions_sc_KimYW_merge$c_police1,
sanctions_sc_KimYW_merge$contig,
sanctions_sc_KimYW_merge$distance,
sanctions_sc_KimYW_merge$lntarget_gdppc_gle,
sanctions_sc_KimYW_merge$salience_dummy2,
sanctions_sc_KimYW_merge$alliance2,
sanctions_sc_KimYW_merge$targetdem)
stargazer(newdata, type="text", out="newdata.txt")
cor(newdata, use="complete.obs")
summary(sanctions_sc_KimYW_merge$trust)
mean(sanctions_sc_KimYW_merge$trust, na.rm=T)
###########
## TRUST ##
###########
# probit sanctionoutcome trust contig distance lntarget_gdppc_gle salience_dummy2 alliance2 targetdem , robust /*full model*/
# margins, at(trust=(4.9(3)59.4))
trust <- glm(sanctionoutcome ~ trust + contig + distance + lntarget_gdppc_gle + salience_dummy2 + alliance2 + targetdem, family=binomial(link='logit'), data=newdata)
summary(trust)
stargazer(trust, type="text", out="trust.txt")
# probit version
trust_p <- glm(sanctionoutcome ~ trust + contig + distance + lntarget_gdppc_gle + salience_dummy2 + alliance2 + targetdem, family=binomial(link='probit'), data=sanctions_sc_KimYW_merge)
summary(trust_p)
stargazer(trust_p, type="text", out="trust.txt")
################
## MEMBERSHIP ##
################
# probit sanctionoutcome m_polparty1 contig distance lntarget_gdppc_gle salience_dummy2 alliance2 targetdem, robust
# margins, at(m_polparty1=(0(3)45.5))
# Probit sanctionoutcome m_profassociation1 contig distance lntarget_gdppc_gle salience_dummy2 alliance2 targetdem, robust
# margins, at(m_profassociation1=(0(1.5)25.7))
membership1 <- glm(sanctionoutcome ~ m_polparty1 + contig + distance + lntarget_gdppc_gle + salience_dummy2 + alliance2 + targetdem, family=binomial(link='logit'), data=sanctions_sc_KimYW_merge)
summary(membership1)
stargazer(membership1, type="text", out="membership1.txt")
membership2 <- glm(sanctionoutcome ~ m_profassociation1 + contig + distance + lntarget_gdppc_gle + salience_dummy2 + alliance2 + targetdem, family=binomial(link='logit'), data=sanctions_sc_KimYW_merge)
summary(membership2)
stargazer(membership2, type="text", out="membership2.txt")
stargazer(trust, membership1, membership2, type="text", out="trust_membership.txt")
################
## CONFIDENCE ##
################
# probit sanctionoutcome c_polparty1 contig distance lntarget_gdppc_gle salience_dummy2 alliance2 targetdem, robust
# margins, at(c_polparty1=(0.9(3)31.1))
#
# probit sanctionoutcome c_government1 contig distance lntarget_gdppc_gle salience_dummy2 alliance2 targetdem, robust
# margins, at(c_government1=(1.9(3)54.5))
#
# probit sanctionoutcome c_parliament1 contig distance lntarget_gdppc_gle salience_dummy2 alliance2 targetdem, robust
# margins, at(c_parliament1=(0.9(3)46.1))
#
# probit sanctionoutcome c_justicelegalsyscourts1 contig distance lntarget_gdppc_gle salience_dummy2 alliance2 targetdem, robust
# margins, at(c_justicelegalsyscourts1=(6.7(2)37.3))
confidence1 <- glm(sanctionoutcome ~ c_polparty1 + contig + distance + lntarget_gdppc_gle + salience_dummy2 + alliance2 + targetdem, family=binomial(link='logit'), data=sanctions_sc_KimYW_merge)
summary(confidence1)
confidence2 <- glm(sanctionoutcome ~ c_government1 + contig + distance + lntarget_gdppc_gle + salience_dummy2 + alliance2 + targetdem, family=binomial(link='logit'), data=sanctions_sc_KimYW_merge)
summary(confidence2)
confidence3 <- glm(sanctionoutcome ~ c_parliament1 + contig + distance + lntarget_gdppc_gle + salience_dummy2 + alliance2 + targetdem, family=binomial(link='logit'), data=sanctions_sc_KimYW_merge)
summary(confidence3)
confidence4 <- glm(sanctionoutcome ~ c_justicelegalsyscourts1 + contig + distance + lntarget_gdppc_gle + salience_dummy2 + alliance2 + targetdem, family=binomial(link='logit'), data=sanctions_sc_KimYW_merge)
summary(confidence4)
stargazer(confidence1, confidence2, confidence3, confidence4, type="text", out="confidence5.txt")
###############################################
## PREDICTED PROBABILITIES (for data points) ##
###############################################
pred_pr_trust <- predict(trust, data=newdata, type="response", se.fit=TRUE)
pred_pr_membership1 <- predict(membership1, data=newdata, type="response", se.fit=TRUE)
pred_pr_membership2 <- predict(membership2, data=newdata, type="response", se.fit=TRUE)
pred_pr_confidence1 <- predict(confidence1, data=newdata, type="response", se.fit=TRUE)
pred_pr_confidence2 <- predict(confidence2, data=newdata, type="response", se.fit=TRUE)
pred_pr_confidence3 <- predict(confidence3, data=newdata, type="response", se.fit=TRUE)
pred_pr_confidence4 <- predict(confidence4, data=newdata, type="response", se.fit=TRUE)
cbind(pred_pr_trust$fit, pred_pr_trust$se.fit)
####################################################################################
## PREDICTED PROBABILITIES (for key variables; here I show the results for trust) ##
####################################################################################
allmean <- data.frame(trust = seq(4.9, 59.4, length.out=100),
contig = rep(mean(newdata$sanctions_sc_KimYW_merge.contig,na.rm = T),100),
distance = rep(mean(newdata$sanctions_sc_KimYW_merge.distance,na.rm = T),100),
lntarget_gdppc_gle = rep(mean(newdata$sanctions_sc_KimYW_merge.lntarget_gdppc_gle,na.rm = T),100),
salience_dummy2 = rep(mean(newdata$sanctions_sc_KimYW_merge.salience_dummy2,na.rm = T),100),
alliance2 = rep(mean(newdata$sanctions_sc_KimYW_merge.alliance2,na.rm = T),100),
targetdem = rep(mean(newdata$sanctions_sc_KimYW_merge.targetdem,na.rm = T),100))
pred_pr_trust <- predict(trust, newdata=allmean, type="response", se.fit=TRUE)
pred_pr_trust_result <- cbind(allmean, pred_pr_trust$fit, pred_pr_trust$se.fit)
plot(pred_pr_trust_result$trust,pred_pr_trust$fit, xlim = c(4.9,59.4), ylim = c(0,0.4), lty=1, xlab="trust", ylab = "Pr(success)")
| /rdo/Rcode.R | no_license | pherephobia/SCEconSanction | R | false | false | 7,498 | r | # Social capital and the success of economic sanctions
#
# Data Collection
# Variables - IV and DV
# Analysis
#
# Taehee whang
# 02/09/2016
# 06/12/2017
library(foreign)
library(haven)
## Data = sanctions + sc + KimYW_NGO data = sanctions_sc_KimYW_merge.dta
sanctions_sc_KimYW_merge <- read_dta("C:/Users/thwha/Dropbox/KimH_prj/data/sanctions_sc_KimYW_merge.dta")
attach(sanctions_sc_KimYW_merge)
## Analysis
## Descriptive statistics: All observations
summary.data.frame(sanctions_sc_KimYW_merge)
dim(sanctions_sc_KimYW_merge)
newdata <- data.frame(sanctions_sc_KimYW_merge$sanctionoutcome,
sanctions_sc_KimYW_merge$trust,
sanctions_sc_KimYW_merge$m_polparty1,
sanctions_sc_KimYW_merge$m_profassociation1,
sanctions_sc_KimYW_merge$m_humanrights1,
sanctions_sc_KimYW_merge$m_religious1,
sanctions_sc_KimYW_merge$c_government1,
sanctions_sc_KimYW_merge$c_parliament1,
sanctions_sc_KimYW_merge$c_polparty1,
sanctions_sc_KimYW_merge$c_justicelegalsyscourts1,
sanctions_sc_KimYW_merge$c_armedforces1,
sanctions_sc_KimYW_merge$c_churches1,
sanctions_sc_KimYW_merge$c_police1,
sanctions_sc_KimYW_merge$contig,
sanctions_sc_KimYW_merge$distance,
sanctions_sc_KimYW_merge$lntarget_gdppc_gle,
sanctions_sc_KimYW_merge$salience_dummy2,
sanctions_sc_KimYW_merge$alliance2,
sanctions_sc_KimYW_merge$targetdem)
stargazer(newdata, type="text", out="newdata.txt")
cor(newdata, use="complete.obs")
summary(sanctions_sc_KimYW_merge$trust)
mean(sanctions_sc_KimYW_merge$trust, na.rm=T)
###########
## TRUST ##
###########
# probit sanctionoutcome trust contig distance lntarget_gdppc_gle salience_dummy2 alliance2 targetdem , robust /*full model*/
# margins, at(trust=(4.9(3)59.4))
trust <- glm(sanctionoutcome ~ trust + contig + distance + lntarget_gdppc_gle + salience_dummy2 + alliance2 + targetdem, family=binomial(link='logit'), data=newdata)
summary(trust)
stargazer(trust, type="text", out="trust.txt")
# probit version
trust_p <- glm(sanctionoutcome ~ trust + contig + distance + lntarget_gdppc_gle + salience_dummy2 + alliance2 + targetdem, family=binomial(link='probit'), data=sanctions_sc_KimYW_merge)
summary(trust_p)
stargazer(trust_p, type="text", out="trust.txt")
################
## MEMBERSHIP ##
################
# probit sanctionoutcome m_polparty1 contig distance lntarget_gdppc_gle salience_dummy2 alliance2 targetdem, robust
# margins, at(m_polparty1=(0(3)45.5))
# Probit sanctionoutcome m_profassociation1 contig distance lntarget_gdppc_gle salience_dummy2 alliance2 targetdem, robust
# margins, at(m_profassociation1=(0(1.5)25.7))
membership1 <- glm(sanctionoutcome ~ m_polparty1 + contig + distance + lntarget_gdppc_gle + salience_dummy2 + alliance2 + targetdem, family=binomial(link='logit'), data=sanctions_sc_KimYW_merge)
summary(membership1)
stargazer(membership1, type="text", out="membership1.txt")
membership2 <- glm(sanctionoutcome ~ m_profassociation1 + contig + distance + lntarget_gdppc_gle + salience_dummy2 + alliance2 + targetdem, family=binomial(link='logit'), data=sanctions_sc_KimYW_merge)
summary(membership2)
stargazer(membership2, type="text", out="membership2.txt")
stargazer(trust, membership1, membership2, type="text", out="trust_membership.txt")
################
## CONFIDENCE ##
################
# probit sanctionoutcome c_polparty1 contig distance lntarget_gdppc_gle salience_dummy2 alliance2 targetdem, robust
# margins, at(c_polparty1=(0.9(3)31.1))
#
# probit sanctionoutcome c_government1 contig distance lntarget_gdppc_gle salience_dummy2 alliance2 targetdem, robust
# margins, at(c_government1=(1.9(3)54.5))
#
# probit sanctionoutcome c_parliament1 contig distance lntarget_gdppc_gle salience_dummy2 alliance2 targetdem, robust
# margins, at(c_parliament1=(0.9(3)46.1))
#
# probit sanctionoutcome c_justicelegalsyscourts1 contig distance lntarget_gdppc_gle salience_dummy2 alliance2 targetdem, robust
# margins, at(c_justicelegalsyscourts1=(6.7(2)37.3))
confidence1 <- glm(sanctionoutcome ~ c_polparty1 + contig + distance + lntarget_gdppc_gle + salience_dummy2 + alliance2 + targetdem, family=binomial(link='logit'), data=sanctions_sc_KimYW_merge)
summary(confidence1)
confidence2 <- glm(sanctionoutcome ~ c_government1 + contig + distance + lntarget_gdppc_gle + salience_dummy2 + alliance2 + targetdem, family=binomial(link='logit'), data=sanctions_sc_KimYW_merge)
summary(confidence2)
confidence3 <- glm(sanctionoutcome ~ c_parliament1 + contig + distance + lntarget_gdppc_gle + salience_dummy2 + alliance2 + targetdem, family=binomial(link='logit'), data=sanctions_sc_KimYW_merge)
summary(confidence3)
confidence4 <- glm(sanctionoutcome ~ c_justicelegalsyscourts1 + contig + distance + lntarget_gdppc_gle + salience_dummy2 + alliance2 + targetdem, family=binomial(link='logit'), data=sanctions_sc_KimYW_merge)
summary(confidence4)
stargazer(confidence1, confidence2, confidence3, confidence4, type="text", out="confidence5.txt")
###############################################
## PREDICTED PROBABILITIES (for data points) ##
###############################################
pred_pr_trust <- predict(trust, data=newdata, type="response", se.fit=TRUE)
pred_pr_membership1 <- predict(membership1, data=newdata, type="response", se.fit=TRUE)
pred_pr_membership2 <- predict(membership2, data=newdata, type="response", se.fit=TRUE)
pred_pr_confidence1 <- predict(confidence1, data=newdata, type="response", se.fit=TRUE)
pred_pr_confidence2 <- predict(confidence2, data=newdata, type="response", se.fit=TRUE)
pred_pr_confidence3 <- predict(confidence3, data=newdata, type="response", se.fit=TRUE)
pred_pr_confidence4 <- predict(confidence4, data=newdata, type="response", se.fit=TRUE)
cbind(pred_pr_trust$fit, pred_pr_trust$se.fit)
####################################################################################
## PREDICTED PROBABILITIES (for key variables; here I show the results for trust) ##
####################################################################################
allmean <- data.frame(trust = seq(4.9, 59.4, length.out=100),
contig = rep(mean(newdata$sanctions_sc_KimYW_merge.contig,na.rm = T),100),
distance = rep(mean(newdata$sanctions_sc_KimYW_merge.distance,na.rm = T),100),
lntarget_gdppc_gle = rep(mean(newdata$sanctions_sc_KimYW_merge.lntarget_gdppc_gle,na.rm = T),100),
salience_dummy2 = rep(mean(newdata$sanctions_sc_KimYW_merge.salience_dummy2,na.rm = T),100),
alliance2 = rep(mean(newdata$sanctions_sc_KimYW_merge.alliance2,na.rm = T),100),
targetdem = rep(mean(newdata$sanctions_sc_KimYW_merge.targetdem,na.rm = T),100))
pred_pr_trust <- predict(trust, newdata=allmean, type="response", se.fit=TRUE)
pred_pr_trust_result <- cbind(allmean, pred_pr_trust$fit, pred_pr_trust$se.fit)
plot(pred_pr_trust_result$trust,pred_pr_trust$fit, xlim = c(4.9,59.4), ylim = c(0,0.4), lty=1, xlab="trust", ylab = "Pr(success)")
|
library(rgee)
# ee_reattach() # reattach ee as a reserved word
ee_Initialize()
dataset <- ee$Image("CSP/ERGo/1_0/US/landforms")
landforms <- dataset$select("constant")
landformsVis <- list(
min = 11.0,
max = 42.0,
palette = c(
"141414", "383838", "808080", "EBEB8F", "F7D311", "AA0000", "D89382",
"DDC9C9", "DCCDCE", "1C6330", "68AA63", "B5C98E", "E1F0E5", "a975ba",
"6f198c"
)
)
Map$setCenter(-105.58, 40.5498, 11)
Map$addLayer(landforms, landformsVis, "NED Landforms")
| /examples/Datasets/Terrain/us_ned_landforms.R | permissive | benardonyango/rgee | R | false | false | 494 | r | library(rgee)
# ee_reattach() # reattach ee as a reserved word
ee_Initialize()
dataset <- ee$Image("CSP/ERGo/1_0/US/landforms")
landforms <- dataset$select("constant")
landformsVis <- list(
min = 11.0,
max = 42.0,
palette = c(
"141414", "383838", "808080", "EBEB8F", "F7D311", "AA0000", "D89382",
"DDC9C9", "DCCDCE", "1C6330", "68AA63", "B5C98E", "E1F0E5", "a975ba",
"6f198c"
)
)
Map$setCenter(-105.58, 40.5498, 11)
Map$addLayer(landforms, landformsVis, "NED Landforms")
|
library(dplyr)
library(jsonlite)
library(Hmisc)
library(stringr)
trans_level <- function(x,nsep=" to ") {
n <- str_extract_all(x,"\\d+")[[1]] ## extract numbers
v <- formatC(as.numeric(n), big.mark = ",", format = 'd') ## change format
x <- as.character(x)
paste0(
substring(x, 1, 1),
paste(v,collapse=nsep),
substring(x, nchar(x), nchar(x))) ## recombine
}
choroOutputBill <- function(choroData, states, billName) {
if(is.null(billName)) {
return('{}')
}
choroDataBill <- choroData %>%
filter(bill == billName) %>%
ungroup() %>%
select(-bill)
states <- read.csv('data/state_table.csv', stringsAsFactors = FALSE)
states <- select(states, -id)
names(states)[9] <- 'id'
ccc <- inner_join(choroDataBill, states, by = 'id')
ccc <- select(ccc, name, dollar)
colnames(ccc) <- c('region', 'value')
ccc$region <- tolower(ccc$region)
ccc$value <- cut2(ccc$value,
c(100000, 300000, 800000, 3000000, 1000000))
ccc$value2 <- ccc$value
levels(ccc$value2) <- sapply(levels(ccc$value),
trans_level)
ccc <- select(ccc, region, value2)
colnames(ccc) <- c('region', 'value')
choro <- StateChoropleth$new(ccc)
choro$title <- "Contributions Denisty in US States"
choro$ggplot_scale <- scale_fill_brewer(name="Dollars",
palette = 6, drop = FALSE)
p <- choro$render() +
theme(legend.position = "bottom",
title = element_text(size = 16),
legend.title = element_blank(),
legend.text = element_text(size = 8)) +
guides(col = guide_legend(nrow = 2, byrow = TRUE))
grid.arrange(p, nrow = 1)
}
| /shinyApp/gman-viz/choroOutput.R | no_license | michaelDemertzi/GMAN | R | false | false | 1,682 | r | library(dplyr)
library(jsonlite)
library(Hmisc)
library(stringr)
trans_level <- function(x,nsep=" to ") {
n <- str_extract_all(x,"\\d+")[[1]] ## extract numbers
v <- formatC(as.numeric(n), big.mark = ",", format = 'd') ## change format
x <- as.character(x)
paste0(
substring(x, 1, 1),
paste(v,collapse=nsep),
substring(x, nchar(x), nchar(x))) ## recombine
}
choroOutputBill <- function(choroData, states, billName) {
if(is.null(billName)) {
return('{}')
}
choroDataBill <- choroData %>%
filter(bill == billName) %>%
ungroup() %>%
select(-bill)
states <- read.csv('data/state_table.csv', stringsAsFactors = FALSE)
states <- select(states, -id)
names(states)[9] <- 'id'
ccc <- inner_join(choroDataBill, states, by = 'id')
ccc <- select(ccc, name, dollar)
colnames(ccc) <- c('region', 'value')
ccc$region <- tolower(ccc$region)
ccc$value <- cut2(ccc$value,
c(100000, 300000, 800000, 3000000, 1000000))
ccc$value2 <- ccc$value
levels(ccc$value2) <- sapply(levels(ccc$value),
trans_level)
ccc <- select(ccc, region, value2)
colnames(ccc) <- c('region', 'value')
choro <- StateChoropleth$new(ccc)
choro$title <- "Contributions Denisty in US States"
choro$ggplot_scale <- scale_fill_brewer(name="Dollars",
palette = 6, drop = FALSE)
p <- choro$render() +
theme(legend.position = "bottom",
title = element_text(size = 16),
legend.title = element_blank(),
legend.text = element_text(size = 8)) +
guides(col = guide_legend(nrow = 2, byrow = TRUE))
grid.arrange(p, nrow = 1)
}
|
library(pracma)
# on the cluster
# library(pracma,lib='~/R')
Pi=read.table('Pi',header=FALSE)
F=diag(c(t(Pi)))
Q=read.table('Q',header=FALSE)
ev <- eigen(Q)
Lambda=ev$values
S=ev$vectors
inv_S=solve(S)
n_mats = scan('t.mat',what = double(0))
num_aln = 100
list_dis=c(0)
for (i in 2:num_aln){
j=i*160000
n_mat = array(n_mats[(j-159999):j],dim=c(400,400))
n_mat = n_mat + 1
loglikeN <- function (t) {
-sum(n_mat * log(F %*% (S %*% diag(exp(Lambda *t )) %*% inv_S)) )
}
min_dis=fminbnd(loglikeN,1,1000,maxiter=50)
list_dis=c(list_dis,min_dis$xmin)
}
write.table(list_dis,file='t.dis',row.names = FALSE, col.names = FALSE)
| /cath/sum_cao_dis.R | no_license | heichiyidui/dev | R | false | false | 672 | r | library(pracma)
# on the cluster
# library(pracma,lib='~/R')
Pi=read.table('Pi',header=FALSE)
F=diag(c(t(Pi)))
Q=read.table('Q',header=FALSE)
ev <- eigen(Q)
Lambda=ev$values
S=ev$vectors
inv_S=solve(S)
n_mats = scan('t.mat',what = double(0))
num_aln = 100
list_dis=c(0)
for (i in 2:num_aln){
j=i*160000
n_mat = array(n_mats[(j-159999):j],dim=c(400,400))
n_mat = n_mat + 1
loglikeN <- function (t) {
-sum(n_mat * log(F %*% (S %*% diag(exp(Lambda *t )) %*% inv_S)) )
}
min_dis=fminbnd(loglikeN,1,1000,maxiter=50)
list_dis=c(list_dis,min_dis$xmin)
}
write.table(list_dis,file='t.dis',row.names = FALSE, col.names = FALSE)
|
\name{check_hyperpar_gaussian}
\alias{check_hyperpar_gaussian}
\title{check hyperpar gaussian}
\usage{check_hyperpar_gaussian(hyperpar)}
\arguments{
\item{hyperpar}{
}
}
\author{Thiago Guerrera Martins}
| /functions/priorSens/man/check_hyperpar_gaussian.Rd | permissive | kassteele/Contact-patterns | R | false | false | 232 | rd | \name{check_hyperpar_gaussian}
\alias{check_hyperpar_gaussian}
\title{check hyperpar gaussian}
\usage{check_hyperpar_gaussian(hyperpar)}
\arguments{
\item{hyperpar}{
}
}
\author{Thiago Guerrera Martins}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tools.R
\name{get.gr.lagsat}
\alias{get.gr.lagsat}
\title{Extract exponential growth rate assuming lagged exponential growth that saturates}
\usage{
get.gr.lagsat(x, y, plotQ = F, fpath = NA, id = "")
}
\arguments{
\item{x}{Time steps}
\item{y}{ln(abundance)}
\item{plotQ}{logical; should the fit be plotted?}
\item{fpath}{character; path specifying where plot should be saved, if generated}
\item{id}{Label corresponding to the population/strain/species of interest}
}
\value{
This function returns a nonlinear least-squares regression model
}
\description{
This function fits a smoothed piecewise linear model to ln(abundance) data, with
the assumption that abundances are nearly constant for several time points, before
exponential growth kicks in; subsequently, growth saturates and abundances become
constant again.
}
| /man/get.gr.lagsat.Rd | permissive | ctkremer/growthTools | R | false | true | 908 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tools.R
\name{get.gr.lagsat}
\alias{get.gr.lagsat}
\title{Extract exponential growth rate assuming lagged exponential growth that saturates}
\usage{
get.gr.lagsat(x, y, plotQ = F, fpath = NA, id = "")
}
\arguments{
\item{x}{Time steps}
\item{y}{ln(abundance)}
\item{plotQ}{logical; should the fit be plotted?}
\item{fpath}{character; path specifying where plot should be saved, if generated}
\item{id}{Label corresponding to the population/strain/species of interest}
}
\value{
This function returns a nonlinear least-squares regression model
}
\description{
This function fits a smoothed piecewise linear model to ln(abundance) data, with
the assumption that abundances are nearly constant for several time points, before
exponential growth kicks in; subsequently, growth saturates and abundances become
constant again.
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dasl.R
\docType{data}
\name{dasl.erp_effectiveness}
\alias{dasl.erp_effectiveness}
\title{ERP Effectiveness}
\format{8 observations}
\source{
DASL – The Data And Story Library: \href{https://dasl.datadescription.com/datafile/erp-effectiveness/?sf_paged=13}{ERP Effectiveness}
}
\description{
When implementing a packaged Enterprise Resource Planning (ERP) system, many companies report that the module they first install is Financial Accounting. Among the measures used to gauge the effectiveness of their ERP system implementation is acceleration of the financial close process. The data hold a sample of 8 companies that report their average time (in weeks) to financial close before and after the implementation of their ERP system.
}
\details{
\url{https://github.com/sigbertklinke/wwwdata/tree/master/wwwdata/dasl}
}
\references{
unknown
}
\concept{Comparing Two Groups}
\concept{Nonparametric Methods}
| /man/dasl.erp_effectiveness.Rd | no_license | sigbertklinke/mmstat.data | R | false | true | 988 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dasl.R
\docType{data}
\name{dasl.erp_effectiveness}
\alias{dasl.erp_effectiveness}
\title{ERP Effectiveness}
\format{8 observations}
\source{
DASL – The Data And Story Library: \href{https://dasl.datadescription.com/datafile/erp-effectiveness/?sf_paged=13}{ERP Effectiveness}
}
\description{
When implementing a packaged Enterprise Resource Planning (ERP) system, many companies report that the module they first install is Financial Accounting. Among the measures used to gauge the effectiveness of their ERP system implementation is acceleration of the financial close process. The data hold a sample of 8 companies that report their average time (in weeks) to financial close before and after the implementation of their ERP system.
}
\details{
\url{https://github.com/sigbertklinke/wwwdata/tree/master/wwwdata/dasl}
}
\references{
unknown
}
\concept{Comparing Two Groups}
\concept{Nonparametric Methods}
|
# leave-one-out and 10-fold cross-validation prediction error for
# the mcycle data set
data(mcycle, package="MASS")
mcycle.glm <- glm(mcycle$accel, mcycle$times, data = mcycle)
(cv.err <- cv.glm(mcycle, mcycle.glm)$delta)
(cv.err.10 <- cv.glm(mcycle, mcycle.glm, K = 10)$delta)
# As this is a linea model we could calculate the leave-one-out
# cross-validation estimate without any extra model-fitting.
muhat <- fitted(mcycle.glm)
mcycle.diag <- glm.diag(mcycle.glm)
(cv.err <- mean((mcycle.glm$accel - muhat)^2/(1 - mcycle.diag$times)^2))
## Source https://stat.ethz.ch/R-manual/R-devel/library/boot/html/cv.glm.html
| /AsignaturaMineriadeDatos/tarea/foo.r | no_license | pbarbero/MasterModelizacionMatematica | R | false | false | 624 | r | # leave-one-out and 10-fold cross-validation prediction error for
# the mcycle data set
data(mcycle, package="MASS")
mcycle.glm <- glm(mcycle$accel, mcycle$times, data = mcycle)
(cv.err <- cv.glm(mcycle, mcycle.glm)$delta)
(cv.err.10 <- cv.glm(mcycle, mcycle.glm, K = 10)$delta)
# As this is a linea model we could calculate the leave-one-out
# cross-validation estimate without any extra model-fitting.
muhat <- fitted(mcycle.glm)
mcycle.diag <- glm.diag(mcycle.glm)
(cv.err <- mean((mcycle.glm$accel - muhat)^2/(1 - mcycle.diag$times)^2))
## Source https://stat.ethz.ch/R-manual/R-devel/library/boot/html/cv.glm.html
|
library(RJDBC)
library(tidyverse)
library(scales)
library(psych)
library(countrycode)
library(xtable)
setwd("C:/Users/Anna/Documents/MSBA/Capstone/SubmitterProfile_Capstone/LaTeX_Files")
driver <- JDBC("com.amazon.redshift.jdbc42.Driver",
"~/RedshiftJDBC42-1.2.12.1017.jar", identifier.quote="`")
url <- "jdbc:redshift://localhost:5439/any?ssl=false&UID=any&PWD=any"
conn <- dbConnect(driver, url)
#users by createdate and subdate
createq <- "select count(distinct u.userid), extract(year from u.createdate) as year
from submittable_db.smmuser u
group by 2"
create_users <- dbGetQuery(conn, createq)
subq <- "select count(distinct userid), extract(year from createdon) as year
from submittable_db.submission
group by 2"
submit_users <- dbGetQuery(conn, subq)
jpeg("userCreate_plot.jpg")
create_users%>%
filter(!is.na(year), year != 2019)%>%
ggplot(aes(x = factor(year), y = count))+
geom_bar(stat = "identity")+
labs(title = "New Submitters by Year",
x = "Year",
y = "New Users")
dev.off()
total_users <- sum(create_users$count) # 4,276,490 submitters
jpeg("userSubmit_plot.jpg")
submit_users%>%
filter(!is.na(year), year != 2019, year >=2010)%>%
ggplot(aes(x = factor(year), y = count))+
geom_bar(stat = "identity")+
labs(title = "Number of Active Submitters by Year",
x = "Year",
y = "Active Submitters")+
scale_y_continuous(labels = comma)
dev.off()
#user submission stats
num_subq <- "select s.userid, extract(year from s.createdon) as year,
extract(month from s.createdon) as month,
count(s.submissionid)
from submittable_db.submission s
join submittable_db.smmuser u on s.userid = u.userid
where u.email not like '%@submittable.com'
group by 1,2,3"
subs <- dbGetQuery(conn, num_subq)
avg_subs <- subs%>%
group_by(userid)%>%
summarise(total_subs = sum(count))
mean(avg_subs$total_subs)#avg 5 subs per user
sub_describe <- describe(avg_subs$total_subs)
sub_count <- avg_subs%>%
group_by(total_subs)%>%
count()
#62% of users only submit once
percent_one_sub <- sub_count$n[sub_count$total_subs == 1]/sum(sub_count$n)
#user demographics
genderq <- "select gender, count(userid)
from submittable_db.smmuser
group by 1"
gender <- dbGetQuery(conn, genderq)
ageq <- "select extract(year from dob), count(userid)
from submittable_db.smmuser
group by 1"
age <- dbGetQuery(conn, ageq)
real_age <- age%>%
filter(date_part <= 2000, !is.na(date_part), date_part >=1930)
sum(real_age$count)
age_buckets <- data.frame(bucket = c(replicate(18,"<18"), replicate(7,"19-25"), replicate(10,"26-35"),
replicate(10, "36-45"), replicate(10, "46-55"), replicate(10, "56-65"),
replicate(35, "66+")),
age = c(1:18, 19:25,26:35,36:45, 46:55, 56:65,66:100))
real_age <- real_age%>%
mutate(age = 2019 - date_part)%>%
inner_join(age_buckets, by = c("age" = "age"))
bucket_order <- c("<18", "19-25", "26-35", "36-45", "46-55", "56-65", "66+")
jpeg("userAge_plot.jpg")
real_age%>%
group_by(bucket)%>%
summarize(total = sum(count))%>%
ggplot(aes(x = factor(bucket, levels = bucket_order), y = total))+
geom_bar(stat = "identity")+
labs(title = "Self-Reported User Age Distribution",
x = "User Age", y = "Number of Users")
dev.off()
addressq <- "select a.country, count(u.userid)
from submittable_db.smmuser u
join submittable_db.address a on u.addressid = a.addressid
where a.country similar to '[A-Z][A-Z]'
group by 1"
address <- dbGetQuery(conn, addressq)
sum(address$count)
address <- address%>%
mutate(fullname = countrycode(country, "iso2c", "country.name"))
top_address <- address%>%
top_n(10, count)%>%
select(fullname, count)%>%
arrange(desc(count))
colnames(top_address) <- c("Country", "Number of Submitters")
sink("userCountry.txt")
xtable(top_address)
sink()
top_address$`Number of Submitters`[top_address$Country == 'United States']/sum(address$count)
#user descriptions and interests
descq <- "
select count(userid)
from submittable_db.smmuser
where description is not null and description != ''"
desc <- dbGetQuery(conn, descq)
#forms and submissions
productq <- "select count(distinct p.productid)
from submittable_db.product p
join submittable_db.publisher pub on p.publisherid = pub.publisherid
where pub.accounttypeid not in (11,16,64)"
formCount <- dbGetQuery(conn, productq)
productYearq <- "select extract(year from p.createdate) as year, count(p.productid)
from submittable_db.product p
join submittable_db.publisher pub on p.publisherid = pub.publisherid
where pub.accounttypeid not in (11,16,64)
group by 1"
productYear <- dbGetQuery(conn, productYearq)
jpeg("formsByYear_plot.jpg")
productYear%>%
filter(year != 2019)%>%
ggplot(aes(x = year, y = count))+
geom_bar(stat = "identity")+
scale_x_continuous(breaks = c(2010:2019))+
labs(title = "New Opportunities by Year",
x ="Year",
y = "Number of New Opportunities")
dev.off()
subcountq <- "select count(s.submissionid)
from submittable_db.submission s
join submittable_db.product p on s.productid = p.productid
join submittable_db.publisher pub on p.publisherid = pub.publisherid
where pub.accounttypeid not in (11,16,64)"
subCount <- dbGetQuery(conn, subcountq)
subYearq <- "select extract(year from s.createdon) as year, count(s.submissionid)
from submittable_db.submission s
join submittable_db.product p on s.productid = p.productid
join submittable_db.publisher pub on p.publisherid = pub.publisherid
where pub.accounttypeid not in (11,16,64)
group by 1"
subYear <- dbGetQuery(conn, subYearq)
jpeg("subsByYear_plot.jpg")
subYear%>%
filter(year != 2019, year >= 2010)%>%
ggplot(aes(x = year, y = count))+
geom_bar(stat = "identity")+
scale_x_continuous(breaks = c(2010:2019))+
scale_y_continuous(labels = comma)+
labs(title ="Total Submissions by Year",
x ="Year",
y = "Total Number of Submissions")
dev.off()
subYear <- subYear%>%
arrange(year)%>%
mutate(change = count - lag(count))
numSubsq <- "
select s.productid, count(distinct s.submissionid)
from submittable_db.submission s
join submittable_db.product p on s.productid = p.productid
join submittable_db.publisher pub on p.publisherid =pub.publisherid
where pub.accounttypeid not in (11,16,64)
group by 1"
numsubs <- dbGetQuery(conn, numSubsq)
mean(numsubs$count)
range(numsubs$count)
overOne <- numsubs%>%
filter(count > 1)
overFive <- numsubs%>%
filter(count >5)
overTwenty <- numsubs%>%
filter(count >20)
# Giving WAY low numbers--not sure why...
# activeFormq <- "
# select extract(year from s.createdon) as year, count(distinct s.productid)
# from submittable_db.submission s
# left join submittable_db.product p on s.productid = p.productid
# left join submittable_db.publisher pub on p.productid = pub.publisherid
# where pub.accounttypeid not in (11,16,64)
# group by 1
# "
# activeForm <- dbGetQuery(conn, activeFormq)
#
#
# activeForm%>%
# filter(year != 2019, year >= 2010)%>%
# ggplot(aes(x = year, y = count))+
# geom_bar(stat = "identity")+
# scale_x_continuous(breaks = c(2010:2018))+
# labs(title = "Number of Active Opportunities by Year",
# x = "Year",
# y = "Number of Opportunities Receiving at least One Submission")
numSubsYearq <- "
select extract(year from s.createdon) as year, s.productid, count(distinct s.submissionid)
from submittable_db.submission s
join submittable_db.product p on s.productid = p.productid
join submittable_db.publisher pub on p.publisherid =pub.publisherid
where pub.accounttypeid not in (11,16,64)
group by 1, 2"
numSubsYear <- dbGetQuery(conn, numSubsYearq)
jpeg("activeForms_plot.jpg")
numSubsYear%>%
filter(year != 2019, year >=2010)%>%
group_by(year)%>%
count()%>%
ggplot(aes(x = year, y = n))+
geom_bar(stat = "identity")+
scale_x_continuous(breaks = c(2010:2018))+
labs(title = "Number of Active Opportunities by Year",
x = "Year",
y ="Number of Opportunities Receiving at least One Submission")
dev.off()
jpeg("avgSubs_plot.jpg")
numSubsYear%>%
filter(year != 2019, year >= 2010)%>%
group_by(year)%>%
summarize(avg = mean(count))%>%
ggplot(aes(x = year, y = avg))+
geom_line()+
geom_point()+
scale_y_continuous(limits = c(50, 100))+
scale_x_continuous(breaks = c(2010:2018))+
labs(title = "Average Submissions per Opportunity by Year",
x = "Year",
y = "Average Number of Submissions per Opportunity")
dev.off()
#usecases
usecaseq <- "
select hd.properties__use_case__value, count(p.productid)
from hubspot.deals hd
join submittable_db.product p on hd.properties__admin_id__value__string = p.publisherid
group by 1"
usecases <- dbGetQuery(conn, usecaseq)
usecases <- usecases%>%
filter(!grepl("Do Not Use", properties__use_case__value), !is.na(properties__use_case__value))
top_usecases <- usecases%>%
top_n(10, count)%>%
arrange(desc(count))
sink("top_usecases.txt")
xtable(top_usecases)
sink()
#discover tags
discoverq <- '
select t.name, count(distinct p.productid)
from submittable_db."tag" t
join submittable_db.producttag2 pt on t.id = pt.tagid
join submittable_db.product p on pt.productid = p.productid
join submittable_db.publisher pub on p.publisherid = pub.publisherid
where pub.accounttypeid not in (11,16,64)
group by 1
'
discover <- dbGetQuery(conn, discoverq)
sum(discover$count)
newtags <- read.csv("discovertags.csv")
newtags <- newtags%>%
mutate(name = as.character(name),
newlabel = ifelse(newlabel=='', NA, as.character(newlabel)))%>%
filter(!is.na(newlabel))
discovertags <- paste(unique(newtags$name))
discovertags <- paste(discovertags, collapse = "','")
discovertags <- paste("'", discovertags, collapse = "")
discovertags <- paste(discovertags, "'", collapse = "")
discoverq2 <- paste(
'select p.productid, t.name
from submittable_db."tag" t
join submittable_db.producttag2 pt on t.id = pt.tagid
join submittable_db.product p on pt.productid = p.productid
join submittable_db.publisher pub on p.publisherid = pub.publisherid
where pub.accounttypeid not in (11,16,64)
and t.name in (', discovertags,
')', collapse = " ")
discover2 <- dbGetQuery(conn, discoverq2)
discover_new <- discover2%>%
left_join(newtags, by = c("name" = "name"))
discover_grouped <- discover_new%>%
group_by(newlabel)%>%
summarise(count = n_distinct(productid))
sum(discover_grouped$count)
colnames(discover_grouped) <- c("Discover Group", "Count")
sink("discovergroup.txt")
xtable(discover_grouped)
sink()
discoverlist <- data.frame(`Discover Label` = discover$name)
usecaselist <- data.frame(Usecase = usecases$properties__use_case__value)
sink("discoverlist.txt")
xtable(discoverlist)
sink()
sink("usecaselist.txt")
xtable(usecaselist)
sink()
| /Descriptive Stats.R | no_license | amarbut/SubmitterProfile_Capstone | R | false | false | 11,007 | r | library(RJDBC)
library(tidyverse)
library(scales)
library(psych)
library(countrycode)
library(xtable)
setwd("C:/Users/Anna/Documents/MSBA/Capstone/SubmitterProfile_Capstone/LaTeX_Files")
driver <- JDBC("com.amazon.redshift.jdbc42.Driver",
"~/RedshiftJDBC42-1.2.12.1017.jar", identifier.quote="`")
url <- "jdbc:redshift://localhost:5439/any?ssl=false&UID=any&PWD=any"
conn <- dbConnect(driver, url)
#users by createdate and subdate
createq <- "select count(distinct u.userid), extract(year from u.createdate) as year
from submittable_db.smmuser u
group by 2"
create_users <- dbGetQuery(conn, createq)
subq <- "select count(distinct userid), extract(year from createdon) as year
from submittable_db.submission
group by 2"
submit_users <- dbGetQuery(conn, subq)
jpeg("userCreate_plot.jpg")
create_users%>%
filter(!is.na(year), year != 2019)%>%
ggplot(aes(x = factor(year), y = count))+
geom_bar(stat = "identity")+
labs(title = "New Submitters by Year",
x = "Year",
y = "New Users")
dev.off()
total_users <- sum(create_users$count) # 4,276,490 submitters
jpeg("userSubmit_plot.jpg")
submit_users%>%
filter(!is.na(year), year != 2019, year >=2010)%>%
ggplot(aes(x = factor(year), y = count))+
geom_bar(stat = "identity")+
labs(title = "Number of Active Submitters by Year",
x = "Year",
y = "Active Submitters")+
scale_y_continuous(labels = comma)
dev.off()
#user submission stats
num_subq <- "select s.userid, extract(year from s.createdon) as year,
extract(month from s.createdon) as month,
count(s.submissionid)
from submittable_db.submission s
join submittable_db.smmuser u on s.userid = u.userid
where u.email not like '%@submittable.com'
group by 1,2,3"
subs <- dbGetQuery(conn, num_subq)
avg_subs <- subs%>%
group_by(userid)%>%
summarise(total_subs = sum(count))
mean(avg_subs$total_subs)#avg 5 subs per user
sub_describe <- describe(avg_subs$total_subs)
sub_count <- avg_subs%>%
group_by(total_subs)%>%
count()
#62% of users only submit once
percent_one_sub <- sub_count$n[sub_count$total_subs == 1]/sum(sub_count$n)
#user demographics
genderq <- "select gender, count(userid)
from submittable_db.smmuser
group by 1"
gender <- dbGetQuery(conn, genderq)
ageq <- "select extract(year from dob), count(userid)
from submittable_db.smmuser
group by 1"
age <- dbGetQuery(conn, ageq)
real_age <- age%>%
filter(date_part <= 2000, !is.na(date_part), date_part >=1930)
sum(real_age$count)
age_buckets <- data.frame(bucket = c(replicate(18,"<18"), replicate(7,"19-25"), replicate(10,"26-35"),
replicate(10, "36-45"), replicate(10, "46-55"), replicate(10, "56-65"),
replicate(35, "66+")),
age = c(1:18, 19:25,26:35,36:45, 46:55, 56:65,66:100))
real_age <- real_age%>%
mutate(age = 2019 - date_part)%>%
inner_join(age_buckets, by = c("age" = "age"))
bucket_order <- c("<18", "19-25", "26-35", "36-45", "46-55", "56-65", "66+")
jpeg("userAge_plot.jpg")
real_age%>%
group_by(bucket)%>%
summarize(total = sum(count))%>%
ggplot(aes(x = factor(bucket, levels = bucket_order), y = total))+
geom_bar(stat = "identity")+
labs(title = "Self-Reported User Age Distribution",
x = "User Age", y = "Number of Users")
dev.off()
addressq <- "select a.country, count(u.userid)
from submittable_db.smmuser u
join submittable_db.address a on u.addressid = a.addressid
where a.country similar to '[A-Z][A-Z]'
group by 1"
address <- dbGetQuery(conn, addressq)
sum(address$count)
address <- address%>%
mutate(fullname = countrycode(country, "iso2c", "country.name"))
top_address <- address%>%
top_n(10, count)%>%
select(fullname, count)%>%
arrange(desc(count))
colnames(top_address) <- c("Country", "Number of Submitters")
sink("userCountry.txt")
xtable(top_address)
sink()
top_address$`Number of Submitters`[top_address$Country == 'United States']/sum(address$count)
#user descriptions and interests
descq <- "
select count(userid)
from submittable_db.smmuser
where description is not null and description != ''"
desc <- dbGetQuery(conn, descq)
#forms and submissions
productq <- "select count(distinct p.productid)
from submittable_db.product p
join submittable_db.publisher pub on p.publisherid = pub.publisherid
where pub.accounttypeid not in (11,16,64)"
formCount <- dbGetQuery(conn, productq)
productYearq <- "select extract(year from p.createdate) as year, count(p.productid)
from submittable_db.product p
join submittable_db.publisher pub on p.publisherid = pub.publisherid
where pub.accounttypeid not in (11,16,64)
group by 1"
productYear <- dbGetQuery(conn, productYearq)
jpeg("formsByYear_plot.jpg")
productYear%>%
filter(year != 2019)%>%
ggplot(aes(x = year, y = count))+
geom_bar(stat = "identity")+
scale_x_continuous(breaks = c(2010:2019))+
labs(title = "New Opportunities by Year",
x ="Year",
y = "Number of New Opportunities")
dev.off()
subcountq <- "select count(s.submissionid)
from submittable_db.submission s
join submittable_db.product p on s.productid = p.productid
join submittable_db.publisher pub on p.publisherid = pub.publisherid
where pub.accounttypeid not in (11,16,64)"
subCount <- dbGetQuery(conn, subcountq)
subYearq <- "select extract(year from s.createdon) as year, count(s.submissionid)
from submittable_db.submission s
join submittable_db.product p on s.productid = p.productid
join submittable_db.publisher pub on p.publisherid = pub.publisherid
where pub.accounttypeid not in (11,16,64)
group by 1"
subYear <- dbGetQuery(conn, subYearq)
jpeg("subsByYear_plot.jpg")
subYear%>%
filter(year != 2019, year >= 2010)%>%
ggplot(aes(x = year, y = count))+
geom_bar(stat = "identity")+
scale_x_continuous(breaks = c(2010:2019))+
scale_y_continuous(labels = comma)+
labs(title ="Total Submissions by Year",
x ="Year",
y = "Total Number of Submissions")
dev.off()
subYear <- subYear%>%
arrange(year)%>%
mutate(change = count - lag(count))
numSubsq <- "
select s.productid, count(distinct s.submissionid)
from submittable_db.submission s
join submittable_db.product p on s.productid = p.productid
join submittable_db.publisher pub on p.publisherid =pub.publisherid
where pub.accounttypeid not in (11,16,64)
group by 1"
numsubs <- dbGetQuery(conn, numSubsq)
mean(numsubs$count)
range(numsubs$count)
overOne <- numsubs%>%
filter(count > 1)
overFive <- numsubs%>%
filter(count >5)
overTwenty <- numsubs%>%
filter(count >20)
# Giving WAY low numbers--not sure why...
# activeFormq <- "
# select extract(year from s.createdon) as year, count(distinct s.productid)
# from submittable_db.submission s
# left join submittable_db.product p on s.productid = p.productid
# left join submittable_db.publisher pub on p.productid = pub.publisherid
# where pub.accounttypeid not in (11,16,64)
# group by 1
# "
# activeForm <- dbGetQuery(conn, activeFormq)
#
#
# activeForm%>%
# filter(year != 2019, year >= 2010)%>%
# ggplot(aes(x = year, y = count))+
# geom_bar(stat = "identity")+
# scale_x_continuous(breaks = c(2010:2018))+
# labs(title = "Number of Active Opportunities by Year",
# x = "Year",
# y = "Number of Opportunities Receiving at least One Submission")
numSubsYearq <- "
select extract(year from s.createdon) as year, s.productid, count(distinct s.submissionid)
from submittable_db.submission s
join submittable_db.product p on s.productid = p.productid
join submittable_db.publisher pub on p.publisherid =pub.publisherid
where pub.accounttypeid not in (11,16,64)
group by 1, 2"
numSubsYear <- dbGetQuery(conn, numSubsYearq)
jpeg("activeForms_plot.jpg")
numSubsYear%>%
filter(year != 2019, year >=2010)%>%
group_by(year)%>%
count()%>%
ggplot(aes(x = year, y = n))+
geom_bar(stat = "identity")+
scale_x_continuous(breaks = c(2010:2018))+
labs(title = "Number of Active Opportunities by Year",
x = "Year",
y ="Number of Opportunities Receiving at least One Submission")
dev.off()
jpeg("avgSubs_plot.jpg")
numSubsYear%>%
filter(year != 2019, year >= 2010)%>%
group_by(year)%>%
summarize(avg = mean(count))%>%
ggplot(aes(x = year, y = avg))+
geom_line()+
geom_point()+
scale_y_continuous(limits = c(50, 100))+
scale_x_continuous(breaks = c(2010:2018))+
labs(title = "Average Submissions per Opportunity by Year",
x = "Year",
y = "Average Number of Submissions per Opportunity")
dev.off()
#usecases
usecaseq <- "
select hd.properties__use_case__value, count(p.productid)
from hubspot.deals hd
join submittable_db.product p on hd.properties__admin_id__value__string = p.publisherid
group by 1"
usecases <- dbGetQuery(conn, usecaseq)
usecases <- usecases%>%
filter(!grepl("Do Not Use", properties__use_case__value), !is.na(properties__use_case__value))
top_usecases <- usecases%>%
top_n(10, count)%>%
arrange(desc(count))
sink("top_usecases.txt")
xtable(top_usecases)
sink()
#discover tags
discoverq <- '
select t.name, count(distinct p.productid)
from submittable_db."tag" t
join submittable_db.producttag2 pt on t.id = pt.tagid
join submittable_db.product p on pt.productid = p.productid
join submittable_db.publisher pub on p.publisherid = pub.publisherid
where pub.accounttypeid not in (11,16,64)
group by 1
'
discover <- dbGetQuery(conn, discoverq)
sum(discover$count)
newtags <- read.csv("discovertags.csv")
newtags <- newtags%>%
mutate(name = as.character(name),
newlabel = ifelse(newlabel=='', NA, as.character(newlabel)))%>%
filter(!is.na(newlabel))
discovertags <- paste(unique(newtags$name))
discovertags <- paste(discovertags, collapse = "','")
discovertags <- paste("'", discovertags, collapse = "")
discovertags <- paste(discovertags, "'", collapse = "")
discoverq2 <- paste(
'select p.productid, t.name
from submittable_db."tag" t
join submittable_db.producttag2 pt on t.id = pt.tagid
join submittable_db.product p on pt.productid = p.productid
join submittable_db.publisher pub on p.publisherid = pub.publisherid
where pub.accounttypeid not in (11,16,64)
and t.name in (', discovertags,
')', collapse = " ")
discover2 <- dbGetQuery(conn, discoverq2)
discover_new <- discover2%>%
left_join(newtags, by = c("name" = "name"))
discover_grouped <- discover_new%>%
group_by(newlabel)%>%
summarise(count = n_distinct(productid))
sum(discover_grouped$count)
colnames(discover_grouped) <- c("Discover Group", "Count")
sink("discovergroup.txt")
xtable(discover_grouped)
sink()
discoverlist <- data.frame(`Discover Label` = discover$name)
usecaselist <- data.frame(Usecase = usecases$properties__use_case__value)
sink("discoverlist.txt")
xtable(discoverlist)
sink()
sink("usecaselist.txt")
xtable(usecaselist)
sink()
|
build.lm.model <- lm(price ~ carat + color + clarity +
cut + channel + store,
data = train.tmsalary) | /build_lm_model_script.R | no_license | DarrelDent/P412-Assignment-2 | R | false | false | 160 | r | build.lm.model <- lm(price ~ carat + color + clarity +
cut + channel + store,
data = train.tmsalary) |
library(stats)
library(MASS)
library(CompQuadForm)
rm(list= ls())
set.seed(4)
#--------------------------- Load functions ----------------------------------
RealisticSimulation <- function(Sample_size,SNP_posi,Risk_Model, risk_effect){
#------------------------------ 1.1. Load the data ------------------------------------
Gene_RBJ <- read.table("RBJ.txt", header=FALSE) #load the SNP information of the Gene RBJ
Gene_RBJ <- as.matrix(Gene_RBJ)
Gene_GPRC5B <- read.table("GPRC5B.txt", header=FALSE) #load the SNP information of the Gene GPRC5B
Gene_GPRC5B <- as.matrix(Gene_GPRC5B)
N_snp_RBJ <- ncol(Gene_RBJ) #find how many SNPs in the RBJ gene
N_snp_GPRC5B <- ncol(Gene_GPRC5B) #find how many SNPs in the GPRC5B gene
N_sample_RBJ <- nrow(Gene_RBJ) #find how many individuals in the RBJ gene
N_sample_GPRC5B <- nrow(Gene_GPRC5B) #find how many individuals in the GPRC5B gene
#------------------------------ 1.2. Set the parameters -------------------------------
SNP11_posi <- SNP_posi[1] #locate the 1st causal SNP in RBJ
SNP12_posi <- SNP_posi[2] #locate the 2nd causal SNP in RBJ
SNP21_posi <- SNP_posi[3] #locate the 1st causal SNP in GPRC5B
SNP22_posi <- SNP_posi[4] #locate the 2nd causal SNP in GPRC5B
causal_posi <- c(SNP11_posi,SNP12_posi,SNP21_posi+N_snp_RBJ,SNP22_posi+N_snp_RBJ) #when we consider the position as two genes, we need to add the number of first gene which is RBJ here
#------------------------------ 1.3 Genearte the genotype -----------------------------
Genotype <- array(0, dim=c(Sample_size, N_snp_RBJ+N_snp_GPRC5B)) # the final genotype output
Phenotype <- array(0, dim=c(Sample_size, 1)) # the final phenotype output
tempA <- round(runif(Sample_size,1,N_sample_RBJ)) # randomly pick sample size individuals from the Gene bank of RBJ
GeneA <- Gene_RBJ[tempA,]
tempB <- round(runif(Sample_size,1,N_sample_GPRC5B)) # randomly pick sample size individuals from the Gene bank of GPRC5B
GeneB <- Gene_GPRC5B[tempB,]
genotype <- cbind(GeneA,GeneB)
Causal_SNP <- genotype[,causal_posi]
if (Risk_Model==0){
Main_effect <- 0
Epi_effect <- 0
}
if (Risk_Model==1){
Main_effect <- risk_effect*(Causal_SNP[,1]+Causal_SNP[,3])
Epi_effect <- 0
}
if (Risk_Model==2){
Main_effect <- 0
Epi_effect <- risk_effect*(Causal_SNP[,1]*Causal_SNP[,2]+Causal_SNP[,3]*Causal_SNP[,4])
}
if (Risk_Model==3){
Main_effect <- risk_effect*Causal_SNP[,1]
Epi_effect <- risk_effect*(Causal_SNP[,2]*Causal_SNP[,4])
}
if (Risk_Model==4){
Main_effect <- risk_effect*Causal_SNP[,1]
Epi_effect <- risk_effect*(Causal_SNP[,2]*Causal_SNP[,4]+Causal_SNP[,3]*Causal_SNP[,4])
}
if (Risk_Model==5){
Main_effect <- 0
Epi_effect <- risk_effect*(Causal_SNP[,1]*Causal_SNP[,3]+Causal_SNP[,2]*Causal_SNP[,4])
}
#------------------------------ 1.4 Calculate the phenotype ---------------------------
error_variance<- 1
error <- rnorm(Sample_size, 0,error_variance)
Phenotype <- Main_effect + Epi_effect + error
dim(Phenotype) <- c(Sample_size,1)
Output <- cbind(Phenotype, genotype)
X = array(1,dim=c(Sample_size,1))
geno1 <- genotype[,1:N_snp_RBJ]
geno2 <- genotype[,-(1:N_snp_RBJ)]
return(list(Y = Phenotype , X=X,geno1 = geno1,geno2 = geno2))
}
AverageIBS_genotype = function (geno){
#- If the data has a 2-allelic genotype format, i.e. there are only 2 allele for each
#- marker, say A, a, and the genotype is recorded by a scale, that is, AA:0, Aa:1 and aa:2
#- if the genotype has more than 2 allelic data, we may want to calculated in other way
#- Sepetating the genotype into 2 haplotype matrix ----
hA = geno # hA is used to how many A's in the allele, it equals to genotype
ha = 2-geno # ha is used to how many a's in the allele, it equals to 2 - genotype
N_gene = ncol(geno) # the number of markers
S_temp = hA %*% t(hA) + ha %*% t(ha)
S = S_temp/(4*N_gene)
eg = eigen(S, symmetric=T)
evalue = eg$values
le = evalue[1] # Biggest eigenvalue.
if (le == 0){
le = 1
}
tmpkey = (evalue / le) > 1e-7
ll = sum(tmpkey) # Rank of SSS
RRRR = diag(evalue[tmpkey])
HHHH = eg$vectors[,tmpkey]
return(list(S = S, R = RRRR, H = HHHH, L = ll))
}
EpiTest = function(Y,X,Simi1,Simi2){
N = nrow(Y) # the number of individual
S1 = Simi1$S
q1 = Simi1$L
S2 = Simi2$S
q2 = Simi2$L
S12 = S1 * S2 # The similarity matrix for the interaction
p = ncol(X) # The number of fix effect
P_X = X %*% solve(crossprod(X,X))%*% t(X) # The projection of X
#----------------------- Initialize Tau and sigma ----------------------------
Tau_A_new = 0.5
Tau_B_new = 0.5
sigma_new = 1
#------------------------------- updata --------------------------------------
repeat{
Tau_A = Tau_A_new
Tau_B = Tau_B_new
sigma = sigma_new
V0 = Tau_A*S1 + Tau_B*S2 + sigma*diag(N)
inv_V0 = solve(V0)
inv_V0X = inv_V0%*%X # To speed up the computation
P = inv_V0-inv_V0X %*% solve(t(X)%*%inv_V0X)%*%t(inv_V0X)
Tau_A_new = as.numeric(1/q1 * (Tau_A^2 * t(Y) %*% P %*% S1 %*% P %*% Y + Tau_A*q1 - sum(diag(Tau_A^2 * P %*% S1))))
Tau_B_new = as.numeric(1/q2 * (Tau_B^2 * t(Y) %*% P %*% S2 %*% P %*% Y + Tau_B*q2 - sum(diag(Tau_B^2 * P %*% S2))))
Y_star = Y - Tau_A_new * S1 %*% P %*% Y - Tau_B_new * S2 %*% P %*% Y
IP_X = diag(N)-P_X
VA =Tau_A*S1+Tau_B*S2-(Tau_A*S1+Tau_B*S2) %*% P %*% (Tau_A*S1+Tau_B*S2)
sigma_new = as.numeric(1/(N-p) * (t(Y_star) %*% IP_X %*% Y_star + sum(diag(IP_X%*%VA))))
diff_A = abs((Tau_A_new-Tau_A)/Tau_A_new) # use the relative difference rather than the absoult difference
diff_B = abs((Tau_B_new-Tau_B)/Tau_B_new)
diff_s = abs((sigma_new-sigma)/sigma_new)
if ((diff_A<0.001) & (diff_B<0.001) & (diff_s<0.001)) break
}
P0 = P
T0 = 1/2*t(Y) %*% P0 %*% S12 %*% P0 %*% Y #Get T0 under null hypothesis
e = eigen(V0, symmetric=TRUE)
V_eigen = e$vectors
V_square_root = V_eigen %*% diag(sqrt(e$values)) %*% t(V_eigen)
Weights_all = eigen(1/2*V_square_root %*% P0 %*% S12 %*% P0 %*% V_square_root, symmetric=TRUE, only.value=TRUE)
temp = Weights_all$values
temp2 = sort(temp,decreasing=TRUE)
dim(temp2) = c(N,1)
big_enough = sum(temp>10^-3) # Get the number of big eigen values. here, the threshold for "big" is 10^-3
Weights = array(temp2[1:big_enough,1],dim=c(big_enough,1))
p = liu(T0, Weights, h = rep(1, length(Weights)), delta = rep(0, length(Weights)))
return(P=p)
}
#---------------------------- Main part -------------------------------------
N = 300
N.iter= 500
SNP_posi=array(c(1,2,1,2),dim=c(1,4))
p = 0
for (i in 1:N.iter){
SData = RealisticSimulation(Sample_size=N,SNP_posi,Risk_Model=0, risk_effect=0)
Y = SData$Y
X = SData$X
gene1 = SData$geno1
gene2 = SData$geno2
Simi1 = AverageIBS_genotype(gene1)
Simi2 = AverageIBS_genotype(gene2)
pvalue = EpiTest(Y,X,Simi2,Simi1)
cat("round=",i,"\n")
if (pvalue<0.05) p=p+1
}
type.I.error = p/N.iter
cat("The type I error for Epi test is ",type.I.error,"\n") | /Codes/Simulation/type I error/HPC2011-10-21/HPC_epi_average.r | no_license | seahearman/Jung-Ying | R | false | false | 7,484 | r | library(stats)
library(MASS)
library(CompQuadForm)
rm(list= ls())
set.seed(4)
#--------------------------- Load functions ----------------------------------
RealisticSimulation <- function(Sample_size,SNP_posi,Risk_Model, risk_effect){
#------------------------------ 1.1. Load the data ------------------------------------
Gene_RBJ <- read.table("RBJ.txt", header=FALSE) #load the SNP information of the Gene RBJ
Gene_RBJ <- as.matrix(Gene_RBJ)
Gene_GPRC5B <- read.table("GPRC5B.txt", header=FALSE) #load the SNP information of the Gene GPRC5B
Gene_GPRC5B <- as.matrix(Gene_GPRC5B)
N_snp_RBJ <- ncol(Gene_RBJ) #find how many SNPs in the RBJ gene
N_snp_GPRC5B <- ncol(Gene_GPRC5B) #find how many SNPs in the GPRC5B gene
N_sample_RBJ <- nrow(Gene_RBJ) #find how many individuals in the RBJ gene
N_sample_GPRC5B <- nrow(Gene_GPRC5B) #find how many individuals in the GPRC5B gene
#------------------------------ 1.2. Set the parameters -------------------------------
SNP11_posi <- SNP_posi[1] #locate the 1st causal SNP in RBJ
SNP12_posi <- SNP_posi[2] #locate the 2nd causal SNP in RBJ
SNP21_posi <- SNP_posi[3] #locate the 1st causal SNP in GPRC5B
SNP22_posi <- SNP_posi[4] #locate the 2nd causal SNP in GPRC5B
causal_posi <- c(SNP11_posi,SNP12_posi,SNP21_posi+N_snp_RBJ,SNP22_posi+N_snp_RBJ) #when we consider the position as two genes, we need to add the number of first gene which is RBJ here
#------------------------------ 1.3 Genearte the genotype -----------------------------
Genotype <- array(0, dim=c(Sample_size, N_snp_RBJ+N_snp_GPRC5B)) # the final genotype output
Phenotype <- array(0, dim=c(Sample_size, 1)) # the final phenotype output
tempA <- round(runif(Sample_size,1,N_sample_RBJ)) # randomly pick sample size individuals from the Gene bank of RBJ
GeneA <- Gene_RBJ[tempA,]
tempB <- round(runif(Sample_size,1,N_sample_GPRC5B)) # randomly pick sample size individuals from the Gene bank of GPRC5B
GeneB <- Gene_GPRC5B[tempB,]
genotype <- cbind(GeneA,GeneB)
Causal_SNP <- genotype[,causal_posi]
if (Risk_Model==0){
Main_effect <- 0
Epi_effect <- 0
}
if (Risk_Model==1){
Main_effect <- risk_effect*(Causal_SNP[,1]+Causal_SNP[,3])
Epi_effect <- 0
}
if (Risk_Model==2){
Main_effect <- 0
Epi_effect <- risk_effect*(Causal_SNP[,1]*Causal_SNP[,2]+Causal_SNP[,3]*Causal_SNP[,4])
}
if (Risk_Model==3){
Main_effect <- risk_effect*Causal_SNP[,1]
Epi_effect <- risk_effect*(Causal_SNP[,2]*Causal_SNP[,4])
}
if (Risk_Model==4){
Main_effect <- risk_effect*Causal_SNP[,1]
Epi_effect <- risk_effect*(Causal_SNP[,2]*Causal_SNP[,4]+Causal_SNP[,3]*Causal_SNP[,4])
}
if (Risk_Model==5){
Main_effect <- 0
Epi_effect <- risk_effect*(Causal_SNP[,1]*Causal_SNP[,3]+Causal_SNP[,2]*Causal_SNP[,4])
}
#------------------------------ 1.4 Calculate the phenotype ---------------------------
error_variance<- 1
error <- rnorm(Sample_size, 0,error_variance)
Phenotype <- Main_effect + Epi_effect + error
dim(Phenotype) <- c(Sample_size,1)
Output <- cbind(Phenotype, genotype)
X = array(1,dim=c(Sample_size,1))
geno1 <- genotype[,1:N_snp_RBJ]
geno2 <- genotype[,-(1:N_snp_RBJ)]
return(list(Y = Phenotype , X=X,geno1 = geno1,geno2 = geno2))
}
AverageIBS_genotype = function (geno){
#- If the data has a 2-allelic genotype format, i.e. there are only 2 allele for each
#- marker, say A, a, and the genotype is recorded by a scale, that is, AA:0, Aa:1 and aa:2
#- if the genotype has more than 2 allelic data, we may want to calculated in other way
#- Sepetating the genotype into 2 haplotype matrix ----
hA = geno # hA is used to how many A's in the allele, it equals to genotype
ha = 2-geno # ha is used to how many a's in the allele, it equals to 2 - genotype
N_gene = ncol(geno) # the number of markers
S_temp = hA %*% t(hA) + ha %*% t(ha)
S = S_temp/(4*N_gene)
eg = eigen(S, symmetric=T)
evalue = eg$values
le = evalue[1] # Biggest eigenvalue.
if (le == 0){
le = 1
}
tmpkey = (evalue / le) > 1e-7
ll = sum(tmpkey) # Rank of SSS
RRRR = diag(evalue[tmpkey])
HHHH = eg$vectors[,tmpkey]
return(list(S = S, R = RRRR, H = HHHH, L = ll))
}
EpiTest = function(Y,X,Simi1,Simi2){
N = nrow(Y) # the number of individual
S1 = Simi1$S
q1 = Simi1$L
S2 = Simi2$S
q2 = Simi2$L
S12 = S1 * S2 # The similarity matrix for the interaction
p = ncol(X) # The number of fix effect
P_X = X %*% solve(crossprod(X,X))%*% t(X) # The projection of X
#----------------------- Initialize Tau and sigma ----------------------------
Tau_A_new = 0.5
Tau_B_new = 0.5
sigma_new = 1
#------------------------------- updata --------------------------------------
repeat{
Tau_A = Tau_A_new
Tau_B = Tau_B_new
sigma = sigma_new
V0 = Tau_A*S1 + Tau_B*S2 + sigma*diag(N)
inv_V0 = solve(V0)
inv_V0X = inv_V0%*%X # To speed up the computation
P = inv_V0-inv_V0X %*% solve(t(X)%*%inv_V0X)%*%t(inv_V0X)
Tau_A_new = as.numeric(1/q1 * (Tau_A^2 * t(Y) %*% P %*% S1 %*% P %*% Y + Tau_A*q1 - sum(diag(Tau_A^2 * P %*% S1))))
Tau_B_new = as.numeric(1/q2 * (Tau_B^2 * t(Y) %*% P %*% S2 %*% P %*% Y + Tau_B*q2 - sum(diag(Tau_B^2 * P %*% S2))))
Y_star = Y - Tau_A_new * S1 %*% P %*% Y - Tau_B_new * S2 %*% P %*% Y
IP_X = diag(N)-P_X
VA =Tau_A*S1+Tau_B*S2-(Tau_A*S1+Tau_B*S2) %*% P %*% (Tau_A*S1+Tau_B*S2)
sigma_new = as.numeric(1/(N-p) * (t(Y_star) %*% IP_X %*% Y_star + sum(diag(IP_X%*%VA))))
diff_A = abs((Tau_A_new-Tau_A)/Tau_A_new) # use the relative difference rather than the absoult difference
diff_B = abs((Tau_B_new-Tau_B)/Tau_B_new)
diff_s = abs((sigma_new-sigma)/sigma_new)
if ((diff_A<0.001) & (diff_B<0.001) & (diff_s<0.001)) break
}
P0 = P
T0 = 1/2*t(Y) %*% P0 %*% S12 %*% P0 %*% Y #Get T0 under null hypothesis
e = eigen(V0, symmetric=TRUE)
V_eigen = e$vectors
V_square_root = V_eigen %*% diag(sqrt(e$values)) %*% t(V_eigen)
Weights_all = eigen(1/2*V_square_root %*% P0 %*% S12 %*% P0 %*% V_square_root, symmetric=TRUE, only.value=TRUE)
temp = Weights_all$values
temp2 = sort(temp,decreasing=TRUE)
dim(temp2) = c(N,1)
big_enough = sum(temp>10^-3) # Get the number of big eigen values. here, the threshold for "big" is 10^-3
Weights = array(temp2[1:big_enough,1],dim=c(big_enough,1))
p = liu(T0, Weights, h = rep(1, length(Weights)), delta = rep(0, length(Weights)))
return(P=p)
}
#---------------------------- Main part -------------------------------------
N = 300
N.iter= 500
SNP_posi=array(c(1,2,1,2),dim=c(1,4))
p = 0
for (i in 1:N.iter){
SData = RealisticSimulation(Sample_size=N,SNP_posi,Risk_Model=0, risk_effect=0)
Y = SData$Y
X = SData$X
gene1 = SData$geno1
gene2 = SData$geno2
Simi1 = AverageIBS_genotype(gene1)
Simi2 = AverageIBS_genotype(gene2)
pvalue = EpiTest(Y,X,Simi2,Simi1)
cat("round=",i,"\n")
if (pvalue<0.05) p=p+1
}
type.I.error = p/N.iter
cat("The type I error for Epi test is ",type.I.error,"\n") |
library(ape)
library(adegraphics)
library(treespace)
treefiles <- list.files(file.path(getwd(), "datasets", "ch4", "gene_trees"),full.names = TRUE)
tree_list <- lapply(treefiles, read.tree)
class(tree_list) <- "multiPhylo"
comparisons <- treespace(tree_list, nf = 3)
adegraphics::table.image(comparisons$D, nclass=25)
plotGroves(comparisons$pco, lab.show=TRUE, lab.cex=1.5)
groves <- findGroves(comparisons, nclust = 4)
plotGroves(groves) | /example_code_files/Chapter04/recipe_3.R | no_license | jakesauter/R_Bioinformatics_Cookbook | R | false | false | 454 | r | library(ape)
library(adegraphics)
library(treespace)
treefiles <- list.files(file.path(getwd(), "datasets", "ch4", "gene_trees"),full.names = TRUE)
tree_list <- lapply(treefiles, read.tree)
class(tree_list) <- "multiPhylo"
comparisons <- treespace(tree_list, nf = 3)
adegraphics::table.image(comparisons$D, nclass=25)
plotGroves(comparisons$pco, lab.show=TRUE, lab.cex=1.5)
groves <- findGroves(comparisons, nclust = 4)
plotGroves(groves) |
add2 <- function(x, y) {
x + y
}
above10 <- function(x){
use <- x > 10
x[use]
}
above <- function(x, n = 10){
use <- x > n
x[use]
}
columnmean <- function(y, removeNA = TRUE){
nc <- ncol(y)
means <- numeric(nc) #numeric vector equal to length of the column
for(i in 1:nc){
means[i] <- mean(y[, i], na.rm = removeNA)
}
means
} | /functions.R | no_license | abasu1007/R-Programming | R | false | false | 340 | r | add2 <- function(x, y) {
x + y
}
above10 <- function(x){
use <- x > 10
x[use]
}
above <- function(x, n = 10){
use <- x > n
x[use]
}
columnmean <- function(y, removeNA = TRUE){
nc <- ncol(y)
means <- numeric(nc) #numeric vector equal to length of the column
for(i in 1:nc){
means[i] <- mean(y[, i], na.rm = removeNA)
}
means
} |
# test time complexity
require(Matrix)
source("jacobi.R")
nn <- ceiling(exp(log(10)/4 * 1:17))
nreps <- floor(max(nn)/nn)
nn.times <- rep(1,length(nn))
for(i in seq_along(nn)){
# declare transition/generating matrix
n <- nn[i]
rightii <- 1:(n*(n-1))
rightjj <- rightii + n
upii <- 1:(n^2)
upii <- upii[-n*(1:n)]
upjj <- upii + 1
leftii <- (n+1):(n^2)
leftjj <- leftii - n
downii <- 1:(n^2)
downii <- downii[-(1+n*(0:(n-1)))]
downjj <- downii - 1
gc()
R <- sparseMatrix(i = c(rightii,upii,leftii,downii),
j = c(rightjj,upjj,leftjj,downjj),
x = rnorm(4*n*(n-1),0,1))
D_1 <- sparseMatrix(i = 1:n^2, j = 1:n^2, x = 1/rowSums(abs(R)))
# free memory
rm("rightii","rightjj","upii","upjj",
"leftii","leftjj","downii","downjj")
gc()
# pre-calculate stuff
b <- rnorm(n^2,0,1)
D_R <- (D_1)%*%(R)
D_1b <- (-1)*(D_1)%*%b
# free memory again
rm("R","D_1")
gc()
t <- system.time(for(k in 1:nreps[i]) x <- jacobi(D_R,D_1b))
nn.times[i] <- t["user.self"]/nreps[i]
# once more, with feeling
rm("D_R",D_1b)
gc()
}
pdf(file="jacobi-timing.pdf")
plot(nn,nn.times,log='xy',xlab="side length of graph",ylab="seconds to jacobi()")
dev.off()
| /inference-testing/test_time_jacobi.R | no_license | petrelharp/tortoisescape | R | false | false | 1,259 | r | # test time complexity
require(Matrix)
source("jacobi.R")
nn <- ceiling(exp(log(10)/4 * 1:17))
nreps <- floor(max(nn)/nn)
nn.times <- rep(1,length(nn))
for(i in seq_along(nn)){
# declare transition/generating matrix
n <- nn[i]
rightii <- 1:(n*(n-1))
rightjj <- rightii + n
upii <- 1:(n^2)
upii <- upii[-n*(1:n)]
upjj <- upii + 1
leftii <- (n+1):(n^2)
leftjj <- leftii - n
downii <- 1:(n^2)
downii <- downii[-(1+n*(0:(n-1)))]
downjj <- downii - 1
gc()
R <- sparseMatrix(i = c(rightii,upii,leftii,downii),
j = c(rightjj,upjj,leftjj,downjj),
x = rnorm(4*n*(n-1),0,1))
D_1 <- sparseMatrix(i = 1:n^2, j = 1:n^2, x = 1/rowSums(abs(R)))
# free memory
rm("rightii","rightjj","upii","upjj",
"leftii","leftjj","downii","downjj")
gc()
# pre-calculate stuff
b <- rnorm(n^2,0,1)
D_R <- (D_1)%*%(R)
D_1b <- (-1)*(D_1)%*%b
# free memory again
rm("R","D_1")
gc()
t <- system.time(for(k in 1:nreps[i]) x <- jacobi(D_R,D_1b))
nn.times[i] <- t["user.self"]/nreps[i]
# once more, with feeling
rm("D_R",D_1b)
gc()
}
pdf(file="jacobi-timing.pdf")
plot(nn,nn.times,log='xy',xlab="side length of graph",ylab="seconds to jacobi()")
dev.off()
|
#' @importFrom magrittr %>%
#' @title Current Probability of Failure for LV switchgear and others
#' @description This function calculates the current
#' annual probability of failure for LV switchgear and others
#' The function is a cubic curve that is based on
#' the first three terms of the Taylor series for an
#' exponential function. For more information about the
#' probability of failure function see section 6
#' on page 30 in CNAIM (2017).
#' @param lv_asset_category String.
#' A sting that refers to the specific asset category.
#' See See page 15, table 1 in CNAIM (2017).
#' @param lv_asset_category String The type of LV asset category
#' @param placement String. Specify if the asset is located outdoor or indoor.
#' @param altitude_m Numeric. Specify the altitude location for
#' the asset measured in meters from sea level.\code{altitude_m}
#' is used to derive the altitude factor. See page 107,
#' table 23 in CNAIM (2017). A setting of \code{"Default"}
#' will set the altitude factor to 1 independent of \code{asset_type}.
#' @param distance_from_coast_km Numeric. Specify the distance from the
#' coast measured in kilometers. \code{distance_from_coast_km} is used
#' to derive the distance from coast factor See page 106,
#' table 22 in CNAIM (2017). A setting of \code{"Default"} will set the
#' distance from coast factor to 1 independent of \code{asset_type}.
#' @param corrosion_category_index Integer.
#' Specify the corrosion index category, 1-5.
#' @param age Numeric. The current age in years of the conductor.
#' @param measured_condition_inputs Named list observed_conditions_input
#' @param observed_condition_inputs Named list observed_conditions_input
#' \code{conductor_samp = c("Low","Medium/Normal","High","Default")}.
#' See page 146-147, table 192 and 194 in CNAIM (2017).
#' @inheritParams current_health
#' @return Numeric. Current probability of failure
#' per annum per kilometer.
#' @source DNO Common Network Asset Indices Methodology (CNAIM),
#' Health & Criticality - Version 1.1, 2017:
#' \url{https://www.ofgem.gov.uk/system/files/docs/2017/05/dno_common_network_asset_indices_methodology_v1.1.pdf}
#' @export
#' @examples
#' # Current annual probability of failure for LV Switchgear and other
#'pof_lv_switchgear_and_other(
#'lv_asset_category = "LV Circuit Breaker",
#'placement = "Default",
#'altitude_m = "Default",
#'distance_from_coast_km = "Default",
#'corrosion_category_index = "Default",
#'age = 10,
#'observed_condition_inputs =
#'list("external_condition" =
#'list("Condition Criteria: Observed Condition" = "Default")),
#'measured_condition_inputs =
#'list("operational_adequacy" =
#'list("Condition Criteria: Operational Adequacy" = "Default")),
#'reliability_factor = "Default")
pof_lv_switchgear_and_other <-
function(lv_asset_category = "LV Circuit Breaker",
placement = "Default",
altitude_m = "Default",
distance_from_coast_km = "Default",
corrosion_category_index = "Default",
age,
measured_condition_inputs,
observed_condition_inputs,
reliability_factor = "Default") {
`Asset Register Category` = `Health Index Asset Category` =
`Generic Term...1` = `Generic Term...2` = `Functional Failure Category` =
`K-Value (%)` = `C-Value` = `Asset Register Category` = NULL
# due to NSE notes in R CMD check
asset_category <- gb_ref$categorisation_of_assets %>%
dplyr::filter(`Asset Register Category` ==
lv_asset_category) %>%
dplyr::select(`Health Index Asset Category`) %>% dplyr::pull()
generic_term_1 <- gb_ref$generic_terms_for_assets %>%
dplyr::filter(`Health Index Asset Category` == asset_category) %>%
dplyr::select(`Generic Term...1`) %>% dplyr::pull()
generic_term_2 <- gb_ref$generic_terms_for_assets %>%
dplyr::filter(`Health Index Asset Category` == asset_category) %>%
dplyr::select(`Generic Term...2`) %>% dplyr::pull()
# Normal expected life -------------------------
normal_expected_life_cond <- gb_ref$normal_expected_life %>%
dplyr::filter(`Asset Register Category` ==
lv_asset_category) %>%
dplyr::pull()
# Constants C and K for PoF function --------------------------------------
k <- gb_ref$pof_curve_parameters %>%
dplyr::filter(`Functional Failure Category` %in% lv_asset_category) %>%
dplyr::select(`K-Value (%)`) %>%
dplyr::pull()/100
c <- gb_ref$pof_curve_parameters %>%
dplyr::filter(`Functional Failure Category` %in% lv_asset_category) %>%
dplyr::select(`C-Value`) %>%
dplyr::pull()
# Duty factor -------------------------------------------------------------
duty_factor_cond <- 1
# Location factor ----------------------------------------------------
location_factor_cond <- location_factor(placement,
altitude_m,
distance_from_coast_km,
corrosion_category_index,
asset_type = lv_asset_category)
# Expected life ------------------------------
expected_life_years <- expected_life(normal_expected_life_cond,
duty_factor_cond,
location_factor_cond)
# b1 (Initial Ageing Rate) ------------------------------------------------
b1 <- beta_1(expected_life_years)
# Initial health score ----------------------------------------------------
initial_health_score <- initial_health(b1, age)
asset_category_mmi <- get_mmi_lv_switchgear_asset_category(lv_asset_category)
# Measured conditions
mci_table_names <- get_gb_ref_measured_conditions_table_names_lv_switchgear(asset_category_mmi)
measured_condition_modifier <-
get_measured_conditions_modifier_lv_switchgear(asset_category_mmi,
mci_table_names,
measured_condition_inputs)
# Observed conditions -----------------------------------------------------
oci_table_names <- get_gb_ref_observed_conditions_table_names_lv_switchgear(asset_category_mmi)
observed_condition_modifier <-
get_observed_conditions_modifier_lv_switchgear(asset_category_mmi,
oci_table_names,
observed_condition_inputs)
# Health score factor ---------------------------------------------------
health_score_factor <-
health_score_excl_ehv_132kv_tf(observed_condition_modifier$condition_factor,
measured_condition_modifier$condition_factor)
# Health score cap --------------------------------------------------------
health_score_cap <- min(observed_condition_modifier$condition_cap,
measured_condition_modifier$condition_cap)
# Health score collar -----------------------------------------------------
health_score_collar <- max(observed_condition_modifier$condition_collar,
measured_condition_modifier$condition_collar)
# Health score modifier ---------------------------------------------------
health_score_modifier <- data.frame(health_score_factor,
health_score_cap,
health_score_collar)
# Current health score ----------------------------------------------------
current_health_score <-
current_health(initial_health_score,
health_score_modifier$health_score_factor,
health_score_modifier$health_score_cap,
health_score_modifier$health_score_collar,
reliability_factor = reliability_factor)
# Probability of failure ---------------------------------------------------
probability_of_failure <- k *
(1 + (c * current_health_score) +
(((c * current_health_score)^2) / factorial(2)) +
(((c * current_health_score)^3) / factorial(3)))
return(probability_of_failure)
}
get_mmi_lv_switchgear_asset_category <- function(asset_category){
if(grepl("LV Board", asset_category, fixed = T))
return("LV Board (WM)")
if(grepl("LV Pillar", asset_category, fixed = T))
return("LV Pillars")
if(grepl("LV Circuit Breaker", asset_category, fixed = T))
return("LV Circuit Breaker")
}
get_gb_ref_measured_conditions_table_names_lv_switchgear <- function(asset_category_mmi){
if(asset_category_mmi == "LV Board (WM)")
return(list("operational_adequacy" = "mci_lv_board_wm_opsal_adequacy",
"security" = "mci_lv_board_wm_security"))
if(asset_category_mmi == "LV Pillars")
return(list("operational_adequacy" = "mci_lv_pillar_opsal_adequacy"))
if(asset_category_mmi == "LV Circuit Breaker")
return(list("operational_adequacy" ="mci_lv_cb_opsal_adequacy"))
}
get_gb_ref_observed_conditions_table_names_lv_switchgear <- function(asset_category_mmi){
if(asset_category_mmi == "LV Board (WM)")
return(list("switchgear_external_condition" = "oci_lv_board_swg_ext_cond",
"compound_leak" = "oci_lv_board_wm_compound_leak",
"switchgear_internal_condition_and_operation" = "oci_lv_board_wm_swg_int_cond"))
if(asset_category_mmi == "LV Pillars")
return(list("compound_leak" = "oci_lv_pillar_compound_leak",
"switchgear_external_condition" = "oci_lv_pillar_swg_ext_cond",
"switchgear_internal_condition_and_operation" = "oci_lv_pillar_swg_int_cond_op",
"insulation_condition" = "oci_lv_pillar_insulation_cond",
"signs_heating" = "oci_lv_pillar_signs_heating",
"phase_barrier" = "oci_lv_pillar_phase_barrier"
))
if(asset_category_mmi == "LV Circuit Breaker")
return(list("external_condition" ="oci_lv_circuit_breakr_ext_cond"))
}
get_measured_conditions_modifier_lv_switchgear <- function(asset_category_mmi, table_names, measured_condition_inputs){
mcm_mmi_cal_df <-
gb_ref$measured_cond_modifier_mmi_cal
mcm_mmi_cal_df <-
mcm_mmi_cal_df[which(
mcm_mmi_cal_df$`Asset Category` == asset_category_mmi), ]
factor_divider_1 <-
as.numeric(
mcm_mmi_cal_df$
`Parameters for Combination Using MMI Technique - Factor Divider 1`)
factor_divider_2 <-
as.numeric(
mcm_mmi_cal_df$
`Parameters for Combination Using MMI Technique - Factor Divider 2`)
max_no_combined_factors <-
as.numeric(
mcm_mmi_cal_df$
`Parameters for Combination Using MMI Technique - Max. No. of Combined Factors`
)
# Measured inputs-----------------------------------------------------------
factor_dfs <- list()
for(table_name in names(table_names)){
gb_ref_table_name <- table_names[[table_name]]
mci_table <- gb_ref[[gb_ref_table_name]]
mci_table_check_col_name <- names(measured_condition_inputs[[table_name]])[1]
mci_table_check_col_val <- measured_condition_inputs[[table_name]][1]
row_number <- which(mci_table[[mci_table_check_col_name]] ==
mci_table_check_col_val)
factor_df <- mci_table[row_number,] %>%
dplyr::select(c("Condition Input Factor", "Condition Input Cap",
"Condition Input Collar"))
factor_dfs[[table_name]] <- factor_df
}
mci_factor_df <- factor_dfs %>% plyr::ldply()
measured_condition_factor <- mmi(mci_factor_df[["Condition Input Factor"]],
factor_divider_1,
factor_divider_2,
max_no_combined_factors)
measured_condition_cap <- min(mci_factor_df[["Condition Input Cap"]])
measured_condition_collar <- max(mci_factor_df[["Condition Input Collar"]])
# Measured condition modifier ---------------------------------------------
measured_condition_modifier <- data.frame(condition_factor = measured_condition_factor,
condition_cap = measured_condition_cap,
condition_collar = measured_condition_collar)
return(measured_condition_modifier)
}
get_observed_conditions_modifier_lv_switchgear <- function(asset_category_mmi, table_names, observed_condition_inputs){
oci_mmi_cal_df <-
gb_ref$observed_cond_modifier_mmi_cal
oci_mmi_cal_df <-
oci_mmi_cal_df[which(
oci_mmi_cal_df$`Asset Category` == asset_category_mmi), ]
factor_divider_1 <-
as.numeric(
oci_mmi_cal_df$`Parameters for Combination Using MMI Technique - Factor Divider 1`)
factor_divider_2 <-
as.numeric(
oci_mmi_cal_df$`Parameters for Combination Using MMI Technique - Factor Divider 2`)
max_no_combined_factors <-
as.numeric(
oci_mmi_cal_df$
`Parameters for Combination Using MMI Technique - Max. No. of Combined Factors`
)
# Observed inputs-----------------------------------------------------------
factor_dfs <- list()
for(table_name in names(table_names)){
gb_ref_table_name <- table_names[[table_name]]
oci_table <- gb_ref[[gb_ref_table_name]]
oci_table_check_col_name <- names(observed_condition_inputs[[table_name]])[1]
oci_table_check_col_val <- observed_condition_inputs[[table_name]][1]
row_number <- which(oci_table[[oci_table_check_col_name]] ==
oci_table_check_col_val)
factor_df <- oci_table[row_number,] %>%
dplyr::select(c("Condition Input Factor", "Condition Input Cap",
"Condition Input Collar"))
factor_dfs[[table_name]] <- factor_df
}
oci_factor_df <- factor_dfs %>% plyr::ldply()
observed_condition_factor <- mmi(oci_factor_df[["Condition Input Factor"]],
factor_divider_1,
factor_divider_2,
max_no_combined_factors)
observed_condition_cap <- min(oci_factor_df[["Condition Input Cap"]])
observed_condition_collar <- max(oci_factor_df[["Condition Input Collar"]])
# Observed condition modifier ---------------------------------------------
observed_condition_modifier <- data.frame(condition_factor = observed_condition_factor,
condition_cap = observed_condition_cap,
condition_collar = observed_condition_collar)
return(observed_condition_modifier)
}
| /R/pof_lv_switchgear_and_other.R | permissive | scoultersdcoe/CNAIM | R | false | false | 14,670 | r | #' @importFrom magrittr %>%
#' @title Current Probability of Failure for LV switchgear and others
#' @description This function calculates the current
#' annual probability of failure for LV switchgear and others
#' The function is a cubic curve that is based on
#' the first three terms of the Taylor series for an
#' exponential function. For more information about the
#' probability of failure function see section 6
#' on page 30 in CNAIM (2017).
#' @param lv_asset_category String.
#' A sting that refers to the specific asset category.
#' See See page 15, table 1 in CNAIM (2017).
#' @param lv_asset_category String The type of LV asset category
#' @param placement String. Specify if the asset is located outdoor or indoor.
#' @param altitude_m Numeric. Specify the altitude location for
#' the asset measured in meters from sea level.\code{altitude_m}
#' is used to derive the altitude factor. See page 107,
#' table 23 in CNAIM (2017). A setting of \code{"Default"}
#' will set the altitude factor to 1 independent of \code{asset_type}.
#' @param distance_from_coast_km Numeric. Specify the distance from the
#' coast measured in kilometers. \code{distance_from_coast_km} is used
#' to derive the distance from coast factor See page 106,
#' table 22 in CNAIM (2017). A setting of \code{"Default"} will set the
#' distance from coast factor to 1 independent of \code{asset_type}.
#' @param corrosion_category_index Integer.
#' Specify the corrosion index category, 1-5.
#' @param age Numeric. The current age in years of the conductor.
#' @param measured_condition_inputs Named list observed_conditions_input
#' @param observed_condition_inputs Named list observed_conditions_input
#' \code{conductor_samp = c("Low","Medium/Normal","High","Default")}.
#' See page 146-147, table 192 and 194 in CNAIM (2017).
#' @inheritParams current_health
#' @return Numeric. Current probability of failure
#' per annum per kilometer.
#' @source DNO Common Network Asset Indices Methodology (CNAIM),
#' Health & Criticality - Version 1.1, 2017:
#' \url{https://www.ofgem.gov.uk/system/files/docs/2017/05/dno_common_network_asset_indices_methodology_v1.1.pdf}
#' @export
#' @examples
#' # Current annual probability of failure for LV Switchgear and other
#'pof_lv_switchgear_and_other(
#'lv_asset_category = "LV Circuit Breaker",
#'placement = "Default",
#'altitude_m = "Default",
#'distance_from_coast_km = "Default",
#'corrosion_category_index = "Default",
#'age = 10,
#'observed_condition_inputs =
#'list("external_condition" =
#'list("Condition Criteria: Observed Condition" = "Default")),
#'measured_condition_inputs =
#'list("operational_adequacy" =
#'list("Condition Criteria: Operational Adequacy" = "Default")),
#'reliability_factor = "Default")
pof_lv_switchgear_and_other <-
function(lv_asset_category = "LV Circuit Breaker",
placement = "Default",
altitude_m = "Default",
distance_from_coast_km = "Default",
corrosion_category_index = "Default",
age,
measured_condition_inputs,
observed_condition_inputs,
reliability_factor = "Default") {
`Asset Register Category` = `Health Index Asset Category` =
`Generic Term...1` = `Generic Term...2` = `Functional Failure Category` =
`K-Value (%)` = `C-Value` = `Asset Register Category` = NULL
# due to NSE notes in R CMD check
asset_category <- gb_ref$categorisation_of_assets %>%
dplyr::filter(`Asset Register Category` ==
lv_asset_category) %>%
dplyr::select(`Health Index Asset Category`) %>% dplyr::pull()
generic_term_1 <- gb_ref$generic_terms_for_assets %>%
dplyr::filter(`Health Index Asset Category` == asset_category) %>%
dplyr::select(`Generic Term...1`) %>% dplyr::pull()
generic_term_2 <- gb_ref$generic_terms_for_assets %>%
dplyr::filter(`Health Index Asset Category` == asset_category) %>%
dplyr::select(`Generic Term...2`) %>% dplyr::pull()
# Normal expected life -------------------------
normal_expected_life_cond <- gb_ref$normal_expected_life %>%
dplyr::filter(`Asset Register Category` ==
lv_asset_category) %>%
dplyr::pull()
# Constants C and K for PoF function --------------------------------------
k <- gb_ref$pof_curve_parameters %>%
dplyr::filter(`Functional Failure Category` %in% lv_asset_category) %>%
dplyr::select(`K-Value (%)`) %>%
dplyr::pull()/100
c <- gb_ref$pof_curve_parameters %>%
dplyr::filter(`Functional Failure Category` %in% lv_asset_category) %>%
dplyr::select(`C-Value`) %>%
dplyr::pull()
# Duty factor -------------------------------------------------------------
duty_factor_cond <- 1
# Location factor ----------------------------------------------------
location_factor_cond <- location_factor(placement,
altitude_m,
distance_from_coast_km,
corrosion_category_index,
asset_type = lv_asset_category)
# Expected life ------------------------------
expected_life_years <- expected_life(normal_expected_life_cond,
duty_factor_cond,
location_factor_cond)
# b1 (Initial Ageing Rate) ------------------------------------------------
b1 <- beta_1(expected_life_years)
# Initial health score ----------------------------------------------------
initial_health_score <- initial_health(b1, age)
asset_category_mmi <- get_mmi_lv_switchgear_asset_category(lv_asset_category)
# Measured conditions
mci_table_names <- get_gb_ref_measured_conditions_table_names_lv_switchgear(asset_category_mmi)
measured_condition_modifier <-
get_measured_conditions_modifier_lv_switchgear(asset_category_mmi,
mci_table_names,
measured_condition_inputs)
# Observed conditions -----------------------------------------------------
oci_table_names <- get_gb_ref_observed_conditions_table_names_lv_switchgear(asset_category_mmi)
observed_condition_modifier <-
get_observed_conditions_modifier_lv_switchgear(asset_category_mmi,
oci_table_names,
observed_condition_inputs)
# Health score factor ---------------------------------------------------
health_score_factor <-
health_score_excl_ehv_132kv_tf(observed_condition_modifier$condition_factor,
measured_condition_modifier$condition_factor)
# Health score cap --------------------------------------------------------
health_score_cap <- min(observed_condition_modifier$condition_cap,
measured_condition_modifier$condition_cap)
# Health score collar -----------------------------------------------------
health_score_collar <- max(observed_condition_modifier$condition_collar,
measured_condition_modifier$condition_collar)
# Health score modifier ---------------------------------------------------
health_score_modifier <- data.frame(health_score_factor,
health_score_cap,
health_score_collar)
# Current health score ----------------------------------------------------
current_health_score <-
current_health(initial_health_score,
health_score_modifier$health_score_factor,
health_score_modifier$health_score_cap,
health_score_modifier$health_score_collar,
reliability_factor = reliability_factor)
# Probability of failure ---------------------------------------------------
probability_of_failure <- k *
(1 + (c * current_health_score) +
(((c * current_health_score)^2) / factorial(2)) +
(((c * current_health_score)^3) / factorial(3)))
return(probability_of_failure)
}
get_mmi_lv_switchgear_asset_category <- function(asset_category){
if(grepl("LV Board", asset_category, fixed = T))
return("LV Board (WM)")
if(grepl("LV Pillar", asset_category, fixed = T))
return("LV Pillars")
if(grepl("LV Circuit Breaker", asset_category, fixed = T))
return("LV Circuit Breaker")
}
get_gb_ref_measured_conditions_table_names_lv_switchgear <- function(asset_category_mmi){
if(asset_category_mmi == "LV Board (WM)")
return(list("operational_adequacy" = "mci_lv_board_wm_opsal_adequacy",
"security" = "mci_lv_board_wm_security"))
if(asset_category_mmi == "LV Pillars")
return(list("operational_adequacy" = "mci_lv_pillar_opsal_adequacy"))
if(asset_category_mmi == "LV Circuit Breaker")
return(list("operational_adequacy" ="mci_lv_cb_opsal_adequacy"))
}
get_gb_ref_observed_conditions_table_names_lv_switchgear <- function(asset_category_mmi){
if(asset_category_mmi == "LV Board (WM)")
return(list("switchgear_external_condition" = "oci_lv_board_swg_ext_cond",
"compound_leak" = "oci_lv_board_wm_compound_leak",
"switchgear_internal_condition_and_operation" = "oci_lv_board_wm_swg_int_cond"))
if(asset_category_mmi == "LV Pillars")
return(list("compound_leak" = "oci_lv_pillar_compound_leak",
"switchgear_external_condition" = "oci_lv_pillar_swg_ext_cond",
"switchgear_internal_condition_and_operation" = "oci_lv_pillar_swg_int_cond_op",
"insulation_condition" = "oci_lv_pillar_insulation_cond",
"signs_heating" = "oci_lv_pillar_signs_heating",
"phase_barrier" = "oci_lv_pillar_phase_barrier"
))
if(asset_category_mmi == "LV Circuit Breaker")
return(list("external_condition" ="oci_lv_circuit_breakr_ext_cond"))
}
get_measured_conditions_modifier_lv_switchgear <- function(asset_category_mmi, table_names, measured_condition_inputs){
mcm_mmi_cal_df <-
gb_ref$measured_cond_modifier_mmi_cal
mcm_mmi_cal_df <-
mcm_mmi_cal_df[which(
mcm_mmi_cal_df$`Asset Category` == asset_category_mmi), ]
factor_divider_1 <-
as.numeric(
mcm_mmi_cal_df$
`Parameters for Combination Using MMI Technique - Factor Divider 1`)
factor_divider_2 <-
as.numeric(
mcm_mmi_cal_df$
`Parameters for Combination Using MMI Technique - Factor Divider 2`)
max_no_combined_factors <-
as.numeric(
mcm_mmi_cal_df$
`Parameters for Combination Using MMI Technique - Max. No. of Combined Factors`
)
# Measured inputs-----------------------------------------------------------
factor_dfs <- list()
for(table_name in names(table_names)){
gb_ref_table_name <- table_names[[table_name]]
mci_table <- gb_ref[[gb_ref_table_name]]
mci_table_check_col_name <- names(measured_condition_inputs[[table_name]])[1]
mci_table_check_col_val <- measured_condition_inputs[[table_name]][1]
row_number <- which(mci_table[[mci_table_check_col_name]] ==
mci_table_check_col_val)
factor_df <- mci_table[row_number,] %>%
dplyr::select(c("Condition Input Factor", "Condition Input Cap",
"Condition Input Collar"))
factor_dfs[[table_name]] <- factor_df
}
mci_factor_df <- factor_dfs %>% plyr::ldply()
measured_condition_factor <- mmi(mci_factor_df[["Condition Input Factor"]],
factor_divider_1,
factor_divider_2,
max_no_combined_factors)
measured_condition_cap <- min(mci_factor_df[["Condition Input Cap"]])
measured_condition_collar <- max(mci_factor_df[["Condition Input Collar"]])
# Measured condition modifier ---------------------------------------------
measured_condition_modifier <- data.frame(condition_factor = measured_condition_factor,
condition_cap = measured_condition_cap,
condition_collar = measured_condition_collar)
return(measured_condition_modifier)
}
get_observed_conditions_modifier_lv_switchgear <- function(asset_category_mmi, table_names, observed_condition_inputs){
oci_mmi_cal_df <-
gb_ref$observed_cond_modifier_mmi_cal
oci_mmi_cal_df <-
oci_mmi_cal_df[which(
oci_mmi_cal_df$`Asset Category` == asset_category_mmi), ]
factor_divider_1 <-
as.numeric(
oci_mmi_cal_df$`Parameters for Combination Using MMI Technique - Factor Divider 1`)
factor_divider_2 <-
as.numeric(
oci_mmi_cal_df$`Parameters for Combination Using MMI Technique - Factor Divider 2`)
max_no_combined_factors <-
as.numeric(
oci_mmi_cal_df$
`Parameters for Combination Using MMI Technique - Max. No. of Combined Factors`
)
# Observed inputs-----------------------------------------------------------
factor_dfs <- list()
for(table_name in names(table_names)){
gb_ref_table_name <- table_names[[table_name]]
oci_table <- gb_ref[[gb_ref_table_name]]
oci_table_check_col_name <- names(observed_condition_inputs[[table_name]])[1]
oci_table_check_col_val <- observed_condition_inputs[[table_name]][1]
row_number <- which(oci_table[[oci_table_check_col_name]] ==
oci_table_check_col_val)
factor_df <- oci_table[row_number,] %>%
dplyr::select(c("Condition Input Factor", "Condition Input Cap",
"Condition Input Collar"))
factor_dfs[[table_name]] <- factor_df
}
oci_factor_df <- factor_dfs %>% plyr::ldply()
observed_condition_factor <- mmi(oci_factor_df[["Condition Input Factor"]],
factor_divider_1,
factor_divider_2,
max_no_combined_factors)
observed_condition_cap <- min(oci_factor_df[["Condition Input Cap"]])
observed_condition_collar <- max(oci_factor_df[["Condition Input Collar"]])
# Observed condition modifier ---------------------------------------------
observed_condition_modifier <- data.frame(condition_factor = observed_condition_factor,
condition_cap = observed_condition_cap,
condition_collar = observed_condition_collar)
return(observed_condition_modifier)
}
|
# LatticeKrig
# Copyright 2004-2011, Institute for Mathematics Applied Geosciences
# University Corporation for Atmospheric Research
# Licensed under the GPL -- www.gpl.org/licenses/gpl.html
library( LatticeKrig)
options( echo=FALSE)
##########################################
test.for.zero.flag<- 1
data( ozone2)
x<-ozone2$lon.lat
y<- ozone2$y[16,]
good <- !is.na( y)
x<- x[good,]
y<- y[good]
N<- length( y)
a.wght<- 5
lambda <- 1.5
obj<- LKrig( x,y,NC=16, lambda=lambda, a.wght=a.wght, alpha=1, nlevel=1, NtrA=20,iseed=122)
LKinfo<- obj$LKinfo
K<- LKrig.cov( x,x,LKinfo)
tempM<- K
diag(tempM) <- (lambda) + diag(tempM)
# Mi is proportional to the inverse of the covariance matrix for the observations
Mi<- solve( tempM)
T.matrix<- cbind( rep(1,N),x)
# this is estimating the fixed part using generalized least squares
d.coef0 <- solve( t(T.matrix)%*%Mi%*%T.matrix, t(T.matrix)%*%Mi%*%y)
test.for.zero( obj$d.coef, d.coef0, tag="d from LKrig and by hand")
#### this is "c" coefficients for standard Kriging equations as done in mKrig
temp2<- chol( tempM)
c.coef0 <- forwardsolve(temp2, transpose = TRUE,
(y- T.matrix%*%d.coef0), upper.tri = TRUE)
c.coef0 <- backsolve(temp2, c.coef0)
### find these using mKrig (still standard Kriging) but using the the LatticeKrig covariance function:
obj0<- mKrig( x,y, lambda=lambda, m=2, cov.function="LKrig.cov",
cov.args=list(LKinfo=LKinfo),
NtrA=20, iseed=122)
test.for.zero( obj0$c, c.coef0, tag="c from mKrig and by hand" )
# we also know that for standard Kriging
# residuals = lambda* c.coef0
# use this to check the initial LatticeKrig result
test.for.zero( obj0$fitted.values, obj$fitted.values)
# here is a nontrivial test: compaaring the same Kriging estimate
# using mKrig (via covariance function) and LatticeKrig (via precision and S-M-W identities)
test.for.zero( lambda*obj0$c, (y-obj$fitted.values),
tag="c from mKrig and from residuals of LatticeKrig (this is big!)" )
# compare Monte Carlo estimates of trace
test.for.zero( obj$trA.info, obj0$trA.info, tag="Monte Carlo traces")
#
# test more complex covariance model:
#
alpha<- c(1,.5,.2)
nlevel<-3
a.wght<- c(5,5,10)
lambda<- .1
obj<- LKrig( x,y,NC=5, lambda=lambda,
nlevel=nlevel, alpha=alpha,a.wght=a.wght, NtrA=20,iseed=122)
LKinfo<- obj$LKinfo
obj0<- mKrig( x,y, lambda=lambda, m=2, cov.function="LKrig.cov",
cov.args=list(LKinfo=LKinfo),
NtrA=20, iseed=122)
test.for.zero( obj0$fitted.values, obj$fitted.values)
test.for.zero( obj$d.coef, obj0$d, tag= "d from Lattice Krig and mKrig")
#### tests with spatially varying alpha's
# first a sanity check that the marginalization and alpha option is working
# even when alpha's constant (obviously this is useful for debugging!)
# nu weighted alpha that constant by level to get the basis function counts.
LKinfo<- LKrigSetup( x, NC=5, nlevel=3, nu=1, a.wght=5)
N<- LKinfo$latticeInfo$mx[,1] * LKinfo$latticeInfo$mx[,2]
glist<- fields.x.to.grid( x,10, 10)
xg<- make.surface.grid(glist)
alpha.list<- list( rep( 1,N[1]), rep(.5,N[2]), rep( .2,N[3]))
LKinfo2<- LKrigSetup( x, NC=5, nlevel=3, alpha= alpha.list, a.wght=5)
PHI1<- LKrig.basis(x, LKinfo2)
PHI2<- LKrig.basis(xg, LKinfo2)
Q<- LKrig.precision( LKinfo2)
# find covariane matrix "by hand" for this case.
Ktest1<- PHI1%*%solve(Q)%*%t(PHI2)
test.for.zero( Ktest1, LKrig.cov( x,xg, LKinfo=LKinfo2), tag="spatial alpha cov")
# check marginal variance
Ktest2<- PHI2%*%solve(Q)%*%t(PHI2)
test.for.zero( diag(Ktest2), LKrig.cov( xg, LKinfo=LKinfo2, marginal =TRUE), tag="spatial alpha mar var")
# now varying alphas using same kind of tests
set.seed( 678)
# here alpha weights are all different and random.
alpha.list<- list( runif(N[1]), runif(N[2]), runif(N[3]) )
LKinfo2<- LKrigSetup( x, NC=5, nlevel=3, alpha= alpha.list, a.wght=5)
PHI1<- LKrig.basis(x, LKinfo2)
PHI2<- LKrig.basis(xg, LKinfo2)
Q<- LKrig.precision( LKinfo2)
Ktest1<- PHI1%*%solve(Q)%*%t(PHI2)
test.for.zero( Ktest1, LKrig.cov( x,xg, LKinfo=LKinfo2), tag="spatial alpha cov w/ tricksy alpha")
Ktest2<- PHI2%*%solve(Q)%*%t(PHI2)
test.for.zero( diag(Ktest2), LKrig.cov( xg, LKinfo=LKinfo2, marginal =TRUE), tag="spatial alpha mar var tricksy alpha")
lambda<- .1
obj<- LKrig( x,y,NC=5, lambda=lambda,
nlevel=nlevel, alpha=alpha,a.wght=a.wght, NtrA=20,iseed=122)
LKinfo<- obj$LKinfo
obj0<- mKrig( x,y, lambda=lambda, m=2, cov.function="LKrig.cov",
cov.args=list(LKinfo=LKinfo),
NtrA=20, iseed=122)
# check predicted values
glist<- fields.x.to.grid( x,10, 10)
xg<- make.surface.grid(glist)
grid.info<- obj$LKinfo$grid.info
LKinfo<- obj$LKinfo
# first "by hand"
Tmatrix<- cbind( rep(1,nrow(xg)), xg)
yhat0<- Tmatrix%*%obj0$d +
LKrig.cov( xg,x, LKinfo)%*%obj0$c
PHIg<- LKrig.basis( xg, LKinfo)
yhat1<- Tmatrix%*%obj$d.coef + PHIg%*%obj$c.coef
test.for.zero( yhat1, predict(obj,xg), tag="predicted values from LatticeKrig and by hand, spatial alpha" )
test.for.zero( predict(obj,xg), predict(obj0,xg),
tag="predicted values LatticeKrig and mKrig, spatial alpha")
########## done with spatially varying alpha
# tests for computing the determinant and quad form from log likelihood
# see LatticeKrig tech report to sort these opaque computations!
rm( obj, obj0) # remove previous objects
test.for.zero.flag<- 1
alpha<- c(1,.5,.5)
nlevel<-3
a.wght<- c(5,5,10)
lnDet<- function(A){
sum( log( eigen( A, symmetric=TRUE)$values))}
data( ozone2)
x<-ozone2$lon.lat
y<- ozone2$y[,16]
good <- !is.na( y)
x<- x[good,]
y<- y[good]
x<- x[1:6,]
y<- y[1:6]
#x<- transformx(x, "range")
N<- length( y)
lambda <- .8
# a micro sized lattice so determinant is not too big or small
obj<- LKrig( x,y,NC=3, NC.buffer=1, lambda=lambda,nlevel=nlevel,alpha=alpha,a.wght=a.wght,
NtrA=5,iseed=122)
LKinfo<- obj$LKinfo
grid.info<- LKinfo$grid.info
PHI<- LKrig.basis( x,LKinfo)
Q <- LKrig.precision(LKinfo)
# coerce to full matrix
Q<- as.matrix(Q)
Mtest<- PHI%*% (solve( Q)) %*% t( PHI) + diag(lambda, N)
temp<- t(PHI)%*%PHI + lambda*Q
A<- Q*lambda
B1<- PHI%*% (solve( A)) %*% t( PHI) + diag(1, N)
B2<- t(PHI)%*%PHI + A
# the bullet proof application of identity
test.for.zero(lnDet( B1),lnDet( B2)- lnDet(A))
test.for.zero(
lnDet( PHI%*% (solve( Q*lambda)) %*% t( PHI) + diag(1, N)),
lnDet( t(PHI)%*%PHI + Q*lambda) - lnDet(Q*lambda) )
# now adjusting for lambda factor
test.for.zero( lambda*B1, Mtest)
test.for.zero(lnDet( Mtest), lnDet(B2) - lnDet(lambda*Q) + N*log(lambda) )
test.for.zero(lnDet( Mtest), lnDet(B2) - lnDet(Q) + (-LKinfo$latticeInfo$m + N)*log(lambda) )
# find log determinant of temp using cholesky factors
# applying det identity
temp<- t(PHI)%*%PHI + lambda*Q
chol( temp)-> Mc
lnDetReg <- 2 * sum(log(diag(Mc)))
lnDetQ<- 2* sum( log( diag( chol(Q))))
lnDetCov<- lnDetReg - lnDetQ + (-LKinfo$latticeInfo$m + N)*log(lambda)
test.for.zero( lnDetCov, lnDet( Mtest))
test.for.zero( obj$lnDetCov, lnDet( Mtest), tag="LatticeKrig and direct test of lnDetCov")
#
###### check of formula with weights
set.seed(123)
weights<- runif(N)
W<- diag(weights)
lambda<- .5
PHI<- as.matrix(LKrig.basis( x,LKinfo))
Q <- as.matrix(LKrig.precision(LKinfo))
M1<- PHI%*%solve( Q)%*%t(PHI) + lambda*solve( W)
B1<- (t(PHI)%*%(W/lambda)%*%PHI + Q)
B2<- (1/lambda) * ( t(PHI)%*%(W)%*%PHI + lambda*Q)
B3<- t(PHI)%*%(W)%*%PHI + lambda*Q
N2<- nrow(Q)
hold<- lnDet( M1)
test.for.zero( lnDet( B1) - lnDet(Q) - lnDet( W/lambda), hold, tag="Det with weights1")
test.for.zero( lnDet( B2) - lnDet(Q) - lnDet( W/lambda), hold, tag="Det with weights1=2")
test.for.zero( lnDet( B3) - lnDet(Q) - sum( log( weights)) + (N-N2)*log(lambda), hold, tag="Det with weights3")
# now check these formulas as implemented in LatticeKrig
rm( obj) # remove previous objects
data( ozone2)
x<-ozone2$lon.lat[1:10,]
y<- ozone2$y[16,1:10]
good <- !is.na( y)
x<- x[good,]
y<- y[good]
#x<- transformx(x, "range")
N<- length( y)
lambda <- .8
# a micro sized lattice so determinant is not too big or small
obj<- LKrig( x,y,NC=5, lambda=lambda,nlevel=nlevel,alpha=alpha,a.wght=a.wght,
NtrA=5,iseed=122)
obj0<- mKrig( x,y, lambda=lambda, m=2, cov.function="LKrig.cov",
cov.args=list(LKinfo=obj$LKinfo),
NtrA=20, iseed=122)
test.for.zero( obj$lnDetCov,obj0$lnDetCov, tag= "lnDetCov for mKrig and LatticeKrig")
test.for.zero( obj$quad.form, obj0$quad.form, tag= "quadratic forms for rho hat")
test.for.zero( obj0$lnProfileLike, obj$lnProfileLike,
tag="Profile Likelihood concentrated on lambda" )
# repeat tests for weighted measurement errors.
# recopy data to make reading easier
rm( obj, obj0) # remove previous objects
data( ozone2)
x<-ozone2$lon.lat[1:10,]
y<- ozone2$y[16,1:10]
good <- !is.na( y)
x<- x[good,]
y<- y[good]
#x<- transformx(x, "range")
N<- length( y)
alpha<- c(1,.5,.25)
nlevel<-3
a.wght<- c(5, 5, 4.5)
lambda <- .5
N<- length(y)
set.seed(243)
weights<- runif(N)*10 + 30
# weights<- rep( 1, N)
test.for.zero.flag<- 1
obj<- LKrig( x,y,weights,NC=5,
lambda=lambda,alpha=alpha,nlevel=nlevel, a.wght=a.wght, NtrA=5,iseed=122)
# compare mKrig and Krig with weights and LatticeKrig
obj0<- mKrig( x,y,weights, lambda=lambda, m=2, cov.function="LKrig.cov",
cov.args=list(LKinfo=obj$LKinfo),
NtrA=20, iseed=122)
obj1<- Krig( x,y,weights=weights, lambda=lambda,GCV=TRUE, m=2,
cov.function="LKrig.cov", cov.args=list(LKinfo=obj$LKinfo))
test.for.zero( obj0$fitted.values, obj1$fitted.values)
test.for.zero( predict(obj0), predict(obj1), tag="predicted values mKrig/Krig w/weights")
test.for.zero( obj0$rhohat, obj1$rhohat,tag="compare rhohat for mKrig and Krig with weights")
############ now tests for LatticeKrig
test.for.zero( obj$fitted.values, obj0$fitted.values)
test.for.zero( obj$rho.MLE, obj0$rho.MLE)
test.for.zero( obj$lnDetCov, obj0$lnDetCov)
############# tests using reuse Mc options
data( ozone2)
x<-ozone2$lon.lat[1:20,]
y<- ozone2$y[16,1:20]
good <- !is.na( y)
x<- x[good,]
y<- y[good]
N<- length(y)
set.seed(243)
weights<- runif(N)*10
#x<- transformx(x, "range")
N<- length( y)
alpha<- c(1,.5,.5)
nlevel<-3
a.wght<- c(5,5,10)
lambda <- .8
obj<- LKrig( x,y,weights=weights,NC=15, lambda=lambda,alpha=alpha,
nlevel=nlevel,a.wght=a.wght, return.cholesky=TRUE)
obj2<- LKrig( x,y,weights=weights,NC=15, lambda=2*lambda,alpha=alpha,
nlevel=nlevel,a.wght=a.wght, use.cholesky=obj$Mc)
obj3<- LKrig( x,y,weights=weights,NC=15, lambda=2*lambda,alpha=alpha,
nlevel=nlevel,a.wght=a.wght, return.cholesky=TRUE)
test.for.zero( obj3$c.coef, obj2$c.coef, tag="test of LatticeKrig.coef c")
test.for.zero( obj3$d.coef, obj2$d.coef, tag="test of LatticeKrig.coef d")
Q<- LKrig.precision(obj3$LKinfo)
look2<-LKrig.lnPlike(obj3$Mc,Q,sqrt(weights)*y, obj3$residuals, weights,obj3$LKinfo)
test.for.zero( look2$lnProfileLike, obj3$lnProfileLike)
# all done!
cat("Done testing LatticeKrig",fill=TRUE)
options( echo=FALSE)
# SUPPLEMENT: commented out sanity checks for weighted/unweighted versions of mKrig and Krig
#hold0<-Krig ( x,y,weights=weights,method="user",GCV=TRUE,lambda=1e-3,
# cov.function="Exp.simple.cov", cov.args=list( theta=300) )
#hold1<-mKrig(x,y,weights, lambda=1e-3,cov.function="Exp.simple.cov", cov.args=list( theta=300))
#test.for.zero( predict(hold0), predict(hold1))
| /LatticeKrig/tests/LKrig.test.R | no_license | ingted/R-Examples | R | false | false | 12,204 | r | # LatticeKrig
# Copyright 2004-2011, Institute for Mathematics Applied Geosciences
# University Corporation for Atmospheric Research
# Licensed under the GPL -- www.gpl.org/licenses/gpl.html
library( LatticeKrig)
options( echo=FALSE)
##########################################
test.for.zero.flag<- 1
data( ozone2)
x<-ozone2$lon.lat
y<- ozone2$y[16,]
good <- !is.na( y)
x<- x[good,]
y<- y[good]
N<- length( y)
a.wght<- 5
lambda <- 1.5
obj<- LKrig( x,y,NC=16, lambda=lambda, a.wght=a.wght, alpha=1, nlevel=1, NtrA=20,iseed=122)
LKinfo<- obj$LKinfo
K<- LKrig.cov( x,x,LKinfo)
tempM<- K
diag(tempM) <- (lambda) + diag(tempM)
# Mi is proportional to the inverse of the covariance matrix for the observations
Mi<- solve( tempM)
T.matrix<- cbind( rep(1,N),x)
# this is estimating the fixed part using generalized least squares
d.coef0 <- solve( t(T.matrix)%*%Mi%*%T.matrix, t(T.matrix)%*%Mi%*%y)
test.for.zero( obj$d.coef, d.coef0, tag="d from LKrig and by hand")
#### this is "c" coefficients for standard Kriging equations as done in mKrig
temp2<- chol( tempM)
c.coef0 <- forwardsolve(temp2, transpose = TRUE,
(y- T.matrix%*%d.coef0), upper.tri = TRUE)
c.coef0 <- backsolve(temp2, c.coef0)
### find these using mKrig (still standard Kriging) but using the the LatticeKrig covariance function:
obj0<- mKrig( x,y, lambda=lambda, m=2, cov.function="LKrig.cov",
cov.args=list(LKinfo=LKinfo),
NtrA=20, iseed=122)
test.for.zero( obj0$c, c.coef0, tag="c from mKrig and by hand" )
# we also know that for standard Kriging
# residuals = lambda* c.coef0
# use this to check the initial LatticeKrig result
test.for.zero( obj0$fitted.values, obj$fitted.values)
# here is a nontrivial test: compaaring the same Kriging estimate
# using mKrig (via covariance function) and LatticeKrig (via precision and S-M-W identities)
test.for.zero( lambda*obj0$c, (y-obj$fitted.values),
tag="c from mKrig and from residuals of LatticeKrig (this is big!)" )
# compare Monte Carlo estimates of trace
test.for.zero( obj$trA.info, obj0$trA.info, tag="Monte Carlo traces")
#
# test more complex covariance model:
#
alpha<- c(1,.5,.2)
nlevel<-3
a.wght<- c(5,5,10)
lambda<- .1
obj<- LKrig( x,y,NC=5, lambda=lambda,
nlevel=nlevel, alpha=alpha,a.wght=a.wght, NtrA=20,iseed=122)
LKinfo<- obj$LKinfo
obj0<- mKrig( x,y, lambda=lambda, m=2, cov.function="LKrig.cov",
cov.args=list(LKinfo=LKinfo),
NtrA=20, iseed=122)
test.for.zero( obj0$fitted.values, obj$fitted.values)
test.for.zero( obj$d.coef, obj0$d, tag= "d from Lattice Krig and mKrig")
#### tests with spatially varying alpha's
# first a sanity check that the marginalization and alpha option is working
# even when alpha's constant (obviously this is useful for debugging!)
# nu weighted alpha that constant by level to get the basis function counts.
LKinfo<- LKrigSetup( x, NC=5, nlevel=3, nu=1, a.wght=5)
N<- LKinfo$latticeInfo$mx[,1] * LKinfo$latticeInfo$mx[,2]
glist<- fields.x.to.grid( x,10, 10)
xg<- make.surface.grid(glist)
alpha.list<- list( rep( 1,N[1]), rep(.5,N[2]), rep( .2,N[3]))
LKinfo2<- LKrigSetup( x, NC=5, nlevel=3, alpha= alpha.list, a.wght=5)
PHI1<- LKrig.basis(x, LKinfo2)
PHI2<- LKrig.basis(xg, LKinfo2)
Q<- LKrig.precision( LKinfo2)
# find covariane matrix "by hand" for this case.
Ktest1<- PHI1%*%solve(Q)%*%t(PHI2)
test.for.zero( Ktest1, LKrig.cov( x,xg, LKinfo=LKinfo2), tag="spatial alpha cov")
# check marginal variance
Ktest2<- PHI2%*%solve(Q)%*%t(PHI2)
test.for.zero( diag(Ktest2), LKrig.cov( xg, LKinfo=LKinfo2, marginal =TRUE), tag="spatial alpha mar var")
# now varying alphas using same kind of tests
set.seed( 678)
# here alpha weights are all different and random.
alpha.list<- list( runif(N[1]), runif(N[2]), runif(N[3]) )
LKinfo2<- LKrigSetup( x, NC=5, nlevel=3, alpha= alpha.list, a.wght=5)
PHI1<- LKrig.basis(x, LKinfo2)
PHI2<- LKrig.basis(xg, LKinfo2)
Q<- LKrig.precision( LKinfo2)
Ktest1<- PHI1%*%solve(Q)%*%t(PHI2)
test.for.zero( Ktest1, LKrig.cov( x,xg, LKinfo=LKinfo2), tag="spatial alpha cov w/ tricksy alpha")
Ktest2<- PHI2%*%solve(Q)%*%t(PHI2)
test.for.zero( diag(Ktest2), LKrig.cov( xg, LKinfo=LKinfo2, marginal =TRUE), tag="spatial alpha mar var tricksy alpha")
lambda<- .1
obj<- LKrig( x,y,NC=5, lambda=lambda,
nlevel=nlevel, alpha=alpha,a.wght=a.wght, NtrA=20,iseed=122)
LKinfo<- obj$LKinfo
obj0<- mKrig( x,y, lambda=lambda, m=2, cov.function="LKrig.cov",
cov.args=list(LKinfo=LKinfo),
NtrA=20, iseed=122)
# check predicted values
glist<- fields.x.to.grid( x,10, 10)
xg<- make.surface.grid(glist)
grid.info<- obj$LKinfo$grid.info
LKinfo<- obj$LKinfo
# first "by hand"
Tmatrix<- cbind( rep(1,nrow(xg)), xg)
yhat0<- Tmatrix%*%obj0$d +
LKrig.cov( xg,x, LKinfo)%*%obj0$c
PHIg<- LKrig.basis( xg, LKinfo)
yhat1<- Tmatrix%*%obj$d.coef + PHIg%*%obj$c.coef
test.for.zero( yhat1, predict(obj,xg), tag="predicted values from LatticeKrig and by hand, spatial alpha" )
test.for.zero( predict(obj,xg), predict(obj0,xg),
tag="predicted values LatticeKrig and mKrig, spatial alpha")
########## done with spatially varying alpha
# tests for computing the determinant and quad form from log likelihood
# see LatticeKrig tech report to sort these opaque computations!
rm( obj, obj0) # remove previous objects
test.for.zero.flag<- 1
alpha<- c(1,.5,.5)
nlevel<-3
a.wght<- c(5,5,10)
lnDet<- function(A){
sum( log( eigen( A, symmetric=TRUE)$values))}
data( ozone2)
x<-ozone2$lon.lat
y<- ozone2$y[,16]
good <- !is.na( y)
x<- x[good,]
y<- y[good]
x<- x[1:6,]
y<- y[1:6]
#x<- transformx(x, "range")
N<- length( y)
lambda <- .8
# a micro sized lattice so determinant is not too big or small
obj<- LKrig( x,y,NC=3, NC.buffer=1, lambda=lambda,nlevel=nlevel,alpha=alpha,a.wght=a.wght,
NtrA=5,iseed=122)
LKinfo<- obj$LKinfo
grid.info<- LKinfo$grid.info
PHI<- LKrig.basis( x,LKinfo)
Q <- LKrig.precision(LKinfo)
# coerce to full matrix
Q<- as.matrix(Q)
Mtest<- PHI%*% (solve( Q)) %*% t( PHI) + diag(lambda, N)
temp<- t(PHI)%*%PHI + lambda*Q
A<- Q*lambda
B1<- PHI%*% (solve( A)) %*% t( PHI) + diag(1, N)
B2<- t(PHI)%*%PHI + A
# the bullet proof application of identity
test.for.zero(lnDet( B1),lnDet( B2)- lnDet(A))
test.for.zero(
lnDet( PHI%*% (solve( Q*lambda)) %*% t( PHI) + diag(1, N)),
lnDet( t(PHI)%*%PHI + Q*lambda) - lnDet(Q*lambda) )
# now adjusting for lambda factor
test.for.zero( lambda*B1, Mtest)
test.for.zero(lnDet( Mtest), lnDet(B2) - lnDet(lambda*Q) + N*log(lambda) )
test.for.zero(lnDet( Mtest), lnDet(B2) - lnDet(Q) + (-LKinfo$latticeInfo$m + N)*log(lambda) )
# find log determinant of temp using cholesky factors
# applying det identity
temp<- t(PHI)%*%PHI + lambda*Q
chol( temp)-> Mc
lnDetReg <- 2 * sum(log(diag(Mc)))
lnDetQ<- 2* sum( log( diag( chol(Q))))
lnDetCov<- lnDetReg - lnDetQ + (-LKinfo$latticeInfo$m + N)*log(lambda)
test.for.zero( lnDetCov, lnDet( Mtest))
test.for.zero( obj$lnDetCov, lnDet( Mtest), tag="LatticeKrig and direct test of lnDetCov")
#
###### check of formula with weights
set.seed(123)
weights<- runif(N)
W<- diag(weights)
lambda<- .5
PHI<- as.matrix(LKrig.basis( x,LKinfo))
Q <- as.matrix(LKrig.precision(LKinfo))
M1<- PHI%*%solve( Q)%*%t(PHI) + lambda*solve( W)
B1<- (t(PHI)%*%(W/lambda)%*%PHI + Q)
B2<- (1/lambda) * ( t(PHI)%*%(W)%*%PHI + lambda*Q)
B3<- t(PHI)%*%(W)%*%PHI + lambda*Q
N2<- nrow(Q)
hold<- lnDet( M1)
test.for.zero( lnDet( B1) - lnDet(Q) - lnDet( W/lambda), hold, tag="Det with weights1")
test.for.zero( lnDet( B2) - lnDet(Q) - lnDet( W/lambda), hold, tag="Det with weights1=2")
test.for.zero( lnDet( B3) - lnDet(Q) - sum( log( weights)) + (N-N2)*log(lambda), hold, tag="Det with weights3")
# now check these formulas as implemented in LatticeKrig
rm( obj) # remove previous objects
data( ozone2)
x<-ozone2$lon.lat[1:10,]
y<- ozone2$y[16,1:10]
good <- !is.na( y)
x<- x[good,]
y<- y[good]
#x<- transformx(x, "range")
N<- length( y)
lambda <- .8
# a micro sized lattice so determinant is not too big or small
obj<- LKrig( x,y,NC=5, lambda=lambda,nlevel=nlevel,alpha=alpha,a.wght=a.wght,
NtrA=5,iseed=122)
obj0<- mKrig( x,y, lambda=lambda, m=2, cov.function="LKrig.cov",
cov.args=list(LKinfo=obj$LKinfo),
NtrA=20, iseed=122)
test.for.zero( obj$lnDetCov,obj0$lnDetCov, tag= "lnDetCov for mKrig and LatticeKrig")
test.for.zero( obj$quad.form, obj0$quad.form, tag= "quadratic forms for rho hat")
test.for.zero( obj0$lnProfileLike, obj$lnProfileLike,
tag="Profile Likelihood concentrated on lambda" )
# repeat tests for weighted measurement errors.
# recopy data to make reading easier
rm( obj, obj0) # remove previous objects
data( ozone2)
x<-ozone2$lon.lat[1:10,]
y<- ozone2$y[16,1:10]
good <- !is.na( y)
x<- x[good,]
y<- y[good]
#x<- transformx(x, "range")
N<- length( y)
alpha<- c(1,.5,.25)
nlevel<-3
a.wght<- c(5, 5, 4.5)
lambda <- .5
N<- length(y)
set.seed(243)
weights<- runif(N)*10 + 30
# weights<- rep( 1, N)
test.for.zero.flag<- 1
obj<- LKrig( x,y,weights,NC=5,
lambda=lambda,alpha=alpha,nlevel=nlevel, a.wght=a.wght, NtrA=5,iseed=122)
# compare mKrig and Krig with weights and LatticeKrig
obj0<- mKrig( x,y,weights, lambda=lambda, m=2, cov.function="LKrig.cov",
cov.args=list(LKinfo=obj$LKinfo),
NtrA=20, iseed=122)
obj1<- Krig( x,y,weights=weights, lambda=lambda,GCV=TRUE, m=2,
cov.function="LKrig.cov", cov.args=list(LKinfo=obj$LKinfo))
test.for.zero( obj0$fitted.values, obj1$fitted.values)
test.for.zero( predict(obj0), predict(obj1), tag="predicted values mKrig/Krig w/weights")
test.for.zero( obj0$rhohat, obj1$rhohat,tag="compare rhohat for mKrig and Krig with weights")
############ now tests for LatticeKrig
test.for.zero( obj$fitted.values, obj0$fitted.values)
test.for.zero( obj$rho.MLE, obj0$rho.MLE)
test.for.zero( obj$lnDetCov, obj0$lnDetCov)
############# tests using reuse Mc options
data( ozone2)
x<-ozone2$lon.lat[1:20,]
y<- ozone2$y[16,1:20]
good <- !is.na( y)
x<- x[good,]
y<- y[good]
N<- length(y)
set.seed(243)
weights<- runif(N)*10
#x<- transformx(x, "range")
N<- length( y)
alpha<- c(1,.5,.5)
nlevel<-3
a.wght<- c(5,5,10)
lambda <- .8
obj<- LKrig( x,y,weights=weights,NC=15, lambda=lambda,alpha=alpha,
nlevel=nlevel,a.wght=a.wght, return.cholesky=TRUE)
obj2<- LKrig( x,y,weights=weights,NC=15, lambda=2*lambda,alpha=alpha,
nlevel=nlevel,a.wght=a.wght, use.cholesky=obj$Mc)
obj3<- LKrig( x,y,weights=weights,NC=15, lambda=2*lambda,alpha=alpha,
nlevel=nlevel,a.wght=a.wght, return.cholesky=TRUE)
test.for.zero( obj3$c.coef, obj2$c.coef, tag="test of LatticeKrig.coef c")
test.for.zero( obj3$d.coef, obj2$d.coef, tag="test of LatticeKrig.coef d")
Q<- LKrig.precision(obj3$LKinfo)
look2<-LKrig.lnPlike(obj3$Mc,Q,sqrt(weights)*y, obj3$residuals, weights,obj3$LKinfo)
test.for.zero( look2$lnProfileLike, obj3$lnProfileLike)
# all done!
cat("Done testing LatticeKrig",fill=TRUE)
options( echo=FALSE)
# SUPPLEMENT: commented out sanity checks for weighted/unweighted versions of mKrig and Krig
#hold0<-Krig ( x,y,weights=weights,method="user",GCV=TRUE,lambda=1e-3,
# cov.function="Exp.simple.cov", cov.args=list( theta=300) )
#hold1<-mKrig(x,y,weights, lambda=1e-3,cov.function="Exp.simple.cov", cov.args=list( theta=300))
#test.for.zero( predict(hold0), predict(hold1))
|
## --------------------------------------------------------------------------------------------------------------------
##
## Function: rankall
## Input: Quality of care data from over 4,000 Medicare-certified hospitals in the United States.
## Source of data is the Hospital Compare website run by the U.S. Department of Health and Human Services.
## Output: Hospitals with the specified ranking for a given condition (either "heart attack", "heart failure" or
## "pneumonia") across all states.
##
## --------------------------------------------------------------------------------------------------------------------
rankall <- function(outcome, num="best") {
## read outcome data and check for validity of input
ds <- read.csv("outcome-of-care-measures.csv", colClasses="character")
if (!outcome %in% c("heart attack","heart failure","pneumonia")) stop("invalid outcome")
## subset and format data
col_idx <- if(outcome == 'heart attack') { 11 } else if (outcome == 'heart failure') { 17 } else { 23 }
sub <- ds[,c(2,7,col_idx)]
suppressWarnings(sub[,3] <- as.numeric(sub[,3]))
## create empty dataframe that will hold output data
outdf <- data.frame(Hospital.Name=character(0),State=character(0),stringsAsFactors=FALSE)
## for each state, rank hospitals by mortality rate
for (i in unique(sub$State)) {
sub_state <- sub[sub$State==i,]
ranks <- sub_state[order(sub_state[,3],sub_state[,1],na.last=NA),]
ranks["hosp_rank"] <- c(1:nrow(ranks))
## append to output dataframe based on value of new_num
new_num <- if (num=="best") { 1 } else if (num=="worst") { nrow(ranks) } else { num }
ranks_avail <- ranks$hosp_rank
if(new_num %in% ranks_avail) { outdf <- rbind(outdf,ranks[ranks$hosp_rank==new_num,c(1,2)]) }
else { outdf <- rbind(outdf,data.frame(Hospital.Name="<NA>",State=i)) }
}
## format output dataframe
outdf_final <- outdf[order(outdf[2]),]
names(outdf_final)[1] <- "hospital"; names(outdf_final)[2] <- "state"
row.names(outdf_final) <- outdf_final$state
outdf_final
}
| /rankall.R | no_license | Mshraddha26/Project-3 | R | false | false | 2,245 | r | ## --------------------------------------------------------------------------------------------------------------------
##
## Function: rankall
## Input: Quality of care data from over 4,000 Medicare-certified hospitals in the United States.
## Source of data is the Hospital Compare website run by the U.S. Department of Health and Human Services.
## Output: Hospitals with the specified ranking for a given condition (either "heart attack", "heart failure" or
## "pneumonia") across all states.
##
## --------------------------------------------------------------------------------------------------------------------
rankall <- function(outcome, num="best") {
## read outcome data and check for validity of input
ds <- read.csv("outcome-of-care-measures.csv", colClasses="character")
if (!outcome %in% c("heart attack","heart failure","pneumonia")) stop("invalid outcome")
## subset and format data
col_idx <- if(outcome == 'heart attack') { 11 } else if (outcome == 'heart failure') { 17 } else { 23 }
sub <- ds[,c(2,7,col_idx)]
suppressWarnings(sub[,3] <- as.numeric(sub[,3]))
## create empty dataframe that will hold output data
outdf <- data.frame(Hospital.Name=character(0),State=character(0),stringsAsFactors=FALSE)
## for each state, rank hospitals by mortality rate
for (i in unique(sub$State)) {
sub_state <- sub[sub$State==i,]
ranks <- sub_state[order(sub_state[,3],sub_state[,1],na.last=NA),]
ranks["hosp_rank"] <- c(1:nrow(ranks))
## append to output dataframe based on value of new_num
new_num <- if (num=="best") { 1 } else if (num=="worst") { nrow(ranks) } else { num }
ranks_avail <- ranks$hosp_rank
if(new_num %in% ranks_avail) { outdf <- rbind(outdf,ranks[ranks$hosp_rank==new_num,c(1,2)]) }
else { outdf <- rbind(outdf,data.frame(Hospital.Name="<NA>",State=i)) }
}
## format output dataframe
outdf_final <- outdf[order(outdf[2]),]
names(outdf_final)[1] <- "hospital"; names(outdf_final)[2] <- "state"
row.names(outdf_final) <- outdf_final$state
outdf_final
}
|
library(tidyverse)
library(TDA)
library(TSclust)
lands <- read_csv("data/mazda/processed/lands.csv")
lands %>%
ggplot(aes(tseq, land_1, group=number))+
geom_line(size=.3)+
facet_grid(name ~ type, scales="free_y")+
labs(x = "radius", y="landscape")+
theme_bw()+
theme(text = element_text(size=5))
ggsave("report/fig/land_1.png",
height = 12,
width = 8,
units = "cm")
lands %>%
ggplot(aes(tseq, land_2, group=number))+
geom_line(size=.3)+
facet_grid(name ~ type, scales="free_y")+
labs(x = "radius", y="landscape")+
theme_bw()+
theme(text = element_text(size=5))
ggsave("report/fig/land_2.png",
height = 12,
width = 8,
units = "cm")
| /report/landscape.R | no_license | kur0cky/TDA | R | false | false | 703 | r | library(tidyverse)
library(TDA)
library(TSclust)
lands <- read_csv("data/mazda/processed/lands.csv")
lands %>%
ggplot(aes(tseq, land_1, group=number))+
geom_line(size=.3)+
facet_grid(name ~ type, scales="free_y")+
labs(x = "radius", y="landscape")+
theme_bw()+
theme(text = element_text(size=5))
ggsave("report/fig/land_1.png",
height = 12,
width = 8,
units = "cm")
lands %>%
ggplot(aes(tseq, land_2, group=number))+
geom_line(size=.3)+
facet_grid(name ~ type, scales="free_y")+
labs(x = "radius", y="landscape")+
theme_bw()+
theme(text = element_text(size=5))
ggsave("report/fig/land_2.png",
height = 12,
width = 8,
units = "cm")
|
library(httr)
x= POST(
'https://www.iso-ne.com/isoexpress/web/reports/load-and-demand?p_p_id=operdataviewdetails_WAR_isoneoperdataviewportlet&p_p_lifecycle=2&p_p_state=normal&p_p_mode=view&p_p_cacheability=cacheLevelPage&p_p_col_id=column-2&p_p_col_count=1',
encod='json',
body=list(
'_operdataviewdetails_WAR_isoneoperdataviewportlet_treenode'='dmnd',
'_operdataviewdetails_WAR_isoneoperdataviewportlet_fileName'='dmnd',
'p_p_resource_id'='downloadHistZips',
'_operdataviewdetails_WAR_isoneoperdataviewportlet_reportId'='014',
'_operdataviewdetails_WAR_isoneoperdataviewportlet_from'='06/20/2011',
'_operdataviewdetails_WAR_isoneoperdataviewportlet_to'='06/20/2017',
'_operdataviewdetails_WAR_isoneoperdataviewportlet_captchaText'='2633',
'p_p_resource_id'='validateCaptcha'
)
)
N <- 100
x <- runif(N)
y <- runif(N)
cols <- c("red", "black")
color <- sample(cols, N, replace=T, prob=c(.1, .9))
plot(y~x, col=color, pch=19)
year <- 2000+floor(x*10)
plot(y~year, col=ifelse(year>2007, cols[1], cols[2]), pch=19)
| /data-science-scripts/zach/ts_charts.R | no_license | mcohenmcohen/DataRobot | R | false | false | 1,052 | r | library(httr)
x= POST(
'https://www.iso-ne.com/isoexpress/web/reports/load-and-demand?p_p_id=operdataviewdetails_WAR_isoneoperdataviewportlet&p_p_lifecycle=2&p_p_state=normal&p_p_mode=view&p_p_cacheability=cacheLevelPage&p_p_col_id=column-2&p_p_col_count=1',
encod='json',
body=list(
'_operdataviewdetails_WAR_isoneoperdataviewportlet_treenode'='dmnd',
'_operdataviewdetails_WAR_isoneoperdataviewportlet_fileName'='dmnd',
'p_p_resource_id'='downloadHistZips',
'_operdataviewdetails_WAR_isoneoperdataviewportlet_reportId'='014',
'_operdataviewdetails_WAR_isoneoperdataviewportlet_from'='06/20/2011',
'_operdataviewdetails_WAR_isoneoperdataviewportlet_to'='06/20/2017',
'_operdataviewdetails_WAR_isoneoperdataviewportlet_captchaText'='2633',
'p_p_resource_id'='validateCaptcha'
)
)
N <- 100
x <- runif(N)
y <- runif(N)
cols <- c("red", "black")
color <- sample(cols, N, replace=T, prob=c(.1, .9))
plot(y~x, col=color, pch=19)
year <- 2000+floor(x*10)
plot(y~year, col=ifelse(year>2007, cols[1], cols[2]), pch=19)
|
##################################################################
## MODEL FREE ESTIMATES FOR SURVIVAL ANALYSIS ##
##################################################################
# 01/04/2021
# This rscript is a supplement to the paper:
# Model-free estimates that complement information obtained from the Hazard Ratio.
# Salil V Deo, Vaishali Deo, Varun Sundaram
#
# Louis Stokes Cleveland VA Medical Center,Cleveland Ohio
# svd14 at case.edu
# This is an rscript that is a supplement file to the paper on model-free estimates #
# for reporting results of survival analysis. This file will provide
# readers an introduction to the package and functions that can be used
# to obtain model free estimates for their data.
# Packages used:
# The packages that are beneficial for these calculations are:
# survRM2, surv2sampleComp, and ComparisonSurv.
# All these packages can be installed from CRAN.
install.packages("survRM2", dependencies = T)
install.packages("surv2sampleComp", dependencies = T)
install.packages("ComparisonSurv", dependencies = T)
# apart from these, we will load some useful packages
library(pacman)
p_load(tidyverse, survival, survminer, survRM2,
surv2sampleComp, ComparisonSurv, rms, Hmisc)
# we will use the lung package in the survival library to
# work on the functions provided in these packages.
df <- lung
# see the data.
glimpse(df)
# for these functions, we need to make some changes
# status - needs to be coded as 0/1
df$died = df$status - 1 # this converts it to 0/1 with 1 = died.
df$female = df$sex - 1 # here now , 0 = males, 1 = females.
# surv2sample function in the surv2sampleComp can be used to
# obtain ratio/difference in survival percentile estimates,
# ratio/difference in median survival time, and
# it can also be used to obtain ratio/difference in t-year survival rates.
# using the function requires input of the following values:
# time - time till event or censoring.
# status - 0/1 censored/observed event.
# arm - two arms to be compared, they have to fit as 0/1.
# tau - this is the maximum time at which we would like to calculate the RMST.
# However, for the RMST, we will use the survRM2 package and not this function.
# quantprob - if we want to obtain survival probabilities at specific quantile values.
#
# There are some other options like 'tau_start'. But, we will not discuss
# them here. They are not needed for routine right censored data.
# The rest of the parameters can be kept at their default values.
# The conf.int paramter can be changed to provide results at different
# confidence intervals. However, default = 0.95.
# We would recommend that the result for surv2sample be saved as an object.
# As we have also provided a supplemental file to provide examples in STATA, we will
# replicate the same analysis that is done there.
# thus after converting the variables female and status to 0/1 format, we will covert
# time to years.
df$surv_years <- (1 + df$time)/365.24
res <- surv2sample(time = df$surv_years,
status = df$died,
arm = df$female,
tau = 1,
quanprobs = 0.5, # to calculate the median survival time
SEED = 1974,
timepoints = c(1))
# as this depends upon bootstrapped resampling, please provide a seed.
# please note that if the status and arm are not coded as 0/1
# the function will throw an error. The error is not helpful at
# understanding the mistake. So, we would recommend to take note of this
# important step prior to fitting the function.
# After the function is fit,
# the output object is very large and contains many parts.
# please see all the parts at once.
str(res)
# This function provides values for each parameter according to group and then we can obtain the contrast
# between groups. Hence, unlike STATA output, we need only run this function to obtain
# all results.
# to see the results for group 0 - males.
res$group0
# Est. Lower 95% Upper 95% SE
# RMST 0.6632342 0.6078744 0.7162000 0.02796118
# Loss time 0.3367658 0.2838000 0.3921256 0.02796118
# Prob at 1 0.3360878 0.2575620 0.4206296 0.04272376
# Quantile at 50 % 0.7419779 0.5777023 0.8405432 0.07662344
# Ave of t-year event rates 0.3360878 0.2575620 0.4206296 0.04272376
# Ave percentiles 0.7419779 0.5777023 0.8405432 0.07662344
# Here -- the results we are interested in are (1) Prob at 1 - this is the
# survival probability at 1 year, hence in males 0.33 % survive at 1 year.
# (2) quantile at 50% - 0.74 , hence 50% are surviving at 0.74 years from the start.
res$group1 # this will similarly give the same values for the group = 1 (female)
# as can be seen from results, they are almost exactly similar to those
# provided by STATA.
# This function also provides the RMST at the time point specified by
# input to tau. If we compare the RMST reported here, again, it is almost
# exactly the same as that reported by stmrst command in STATA. The small
# variation in the final decimal places is due to computation and resampling.
res$contrast.diff10
#
# Est. Lower 95% Upper 95% p-val
# RMST Group1-Group0 0.1528465 0.07255657 0.23313634 0.0001906082
# Loss time Group1-Group0 -0.1528465 -0.23313634 -0.07255657 0.0001906082
# Prob at 1 Group1-Group0 0.1903752 0.04464380 0.33610659 0.0104556172
# Quantile at 50 % Group1-Group0 0.4271164 0.08983419 0.76439864 0.0130649109
# Ave of t-year event rates Group1-Group0 0.1903752 0.04464380 0.33610659 0.0104556172
# Ave percentiles Group1-Group0 0.4271164 0.08983419 0.76439864 0.0130649109
# The result for difference between female and males with confidence intervals and p-value.
# This package surv2sampleComp provides all the parameters that we have discussed in the paper.
# The survRM2 is useful for computing the rmst and performing covariate adjusted rmst calculations.
# References:
# 1. https://cran.r-project.org/web/packages/survRM2/vignettes/survRM2-vignette3-2.html -
# An excellent vignette prepared by developers of survRM2 to explain their main function rmst2.
| /supplement_rscript.R | permissive | svd09/RMST-part4 | R | false | false | 6,380 | r | ##################################################################
## MODEL FREE ESTIMATES FOR SURVIVAL ANALYSIS ##
##################################################################
# 01/04/2021
# This rscript is a supplement to the paper:
# Model-free estimates that complement information obtained from the Hazard Ratio.
# Salil V Deo, Vaishali Deo, Varun Sundaram
#
# Louis Stokes Cleveland VA Medical Center,Cleveland Ohio
# svd14 at case.edu
# This is an rscript that is a supplement file to the paper on model-free estimates #
# for reporting results of survival analysis. This file will provide
# readers an introduction to the package and functions that can be used
# to obtain model free estimates for their data.
# Packages used:
# The packages that are beneficial for these calculations are:
# survRM2, surv2sampleComp, and ComparisonSurv.
# All these packages can be installed from CRAN.
install.packages("survRM2", dependencies = T)
install.packages("surv2sampleComp", dependencies = T)
install.packages("ComparisonSurv", dependencies = T)
# apart from these, we will load some useful packages
library(pacman)
p_load(tidyverse, survival, survminer, survRM2,
surv2sampleComp, ComparisonSurv, rms, Hmisc)
# we will use the lung package in the survival library to
# work on the functions provided in these packages.
df <- lung
# see the data.
glimpse(df)
# for these functions, we need to make some changes
# status - needs to be coded as 0/1
df$died = df$status - 1 # this converts it to 0/1 with 1 = died.
df$female = df$sex - 1 # here now , 0 = males, 1 = females.
# surv2sample function in the surv2sampleComp can be used to
# obtain ratio/difference in survival percentile estimates,
# ratio/difference in median survival time, and
# it can also be used to obtain ratio/difference in t-year survival rates.
# using the function requires input of the following values:
# time - time till event or censoring.
# status - 0/1 censored/observed event.
# arm - two arms to be compared, they have to fit as 0/1.
# tau - this is the maximum time at which we would like to calculate the RMST.
# However, for the RMST, we will use the survRM2 package and not this function.
# quantprob - if we want to obtain survival probabilities at specific quantile values.
#
# There are some other options like 'tau_start'. But, we will not discuss
# them here. They are not needed for routine right censored data.
# The rest of the parameters can be kept at their default values.
# The conf.int paramter can be changed to provide results at different
# confidence intervals. However, default = 0.95.
# We would recommend that the result for surv2sample be saved as an object.
# As we have also provided a supplemental file to provide examples in STATA, we will
# replicate the same analysis that is done there.
# thus after converting the variables female and status to 0/1 format, we will covert
# time to years.
df$surv_years <- (1 + df$time)/365.24
res <- surv2sample(time = df$surv_years,
status = df$died,
arm = df$female,
tau = 1,
quanprobs = 0.5, # to calculate the median survival time
SEED = 1974,
timepoints = c(1))
# as this depends upon bootstrapped resampling, please provide a seed.
# please note that if the status and arm are not coded as 0/1
# the function will throw an error. The error is not helpful at
# understanding the mistake. So, we would recommend to take note of this
# important step prior to fitting the function.
# After the function is fit,
# the output object is very large and contains many parts.
# please see all the parts at once.
str(res)
# This function provides values for each parameter according to group and then we can obtain the contrast
# between groups. Hence, unlike STATA output, we need only run this function to obtain
# all results.
# to see the results for group 0 - males.
res$group0
# Est. Lower 95% Upper 95% SE
# RMST 0.6632342 0.6078744 0.7162000 0.02796118
# Loss time 0.3367658 0.2838000 0.3921256 0.02796118
# Prob at 1 0.3360878 0.2575620 0.4206296 0.04272376
# Quantile at 50 % 0.7419779 0.5777023 0.8405432 0.07662344
# Ave of t-year event rates 0.3360878 0.2575620 0.4206296 0.04272376
# Ave percentiles 0.7419779 0.5777023 0.8405432 0.07662344
# Here -- the results we are interested in are (1) Prob at 1 - this is the
# survival probability at 1 year, hence in males 0.33 % survive at 1 year.
# (2) quantile at 50% - 0.74 , hence 50% are surviving at 0.74 years from the start.
res$group1 # this will similarly give the same values for the group = 1 (female)
# as can be seen from results, they are almost exactly similar to those
# provided by STATA.
# This function also provides the RMST at the time point specified by
# input to tau. If we compare the RMST reported here, again, it is almost
# exactly the same as that reported by stmrst command in STATA. The small
# variation in the final decimal places is due to computation and resampling.
res$contrast.diff10
#
# Est. Lower 95% Upper 95% p-val
# RMST Group1-Group0 0.1528465 0.07255657 0.23313634 0.0001906082
# Loss time Group1-Group0 -0.1528465 -0.23313634 -0.07255657 0.0001906082
# Prob at 1 Group1-Group0 0.1903752 0.04464380 0.33610659 0.0104556172
# Quantile at 50 % Group1-Group0 0.4271164 0.08983419 0.76439864 0.0130649109
# Ave of t-year event rates Group1-Group0 0.1903752 0.04464380 0.33610659 0.0104556172
# Ave percentiles Group1-Group0 0.4271164 0.08983419 0.76439864 0.0130649109
# The result for difference between female and males with confidence intervals and p-value.
# This package surv2sampleComp provides all the parameters that we have discussed in the paper.
# The survRM2 is useful for computing the rmst and performing covariate adjusted rmst calculations.
# References:
# 1. https://cran.r-project.org/web/packages/survRM2/vignettes/survRM2-vignette3-2.html -
# An excellent vignette prepared by developers of survRM2 to explain their main function rmst2.
|
## trains the emulator on the given round-robin folder.
## further functions can be used to generate the implausibility slices etc
## this is designed to be called by something that has defined mwString and
## is of course in the right place for the paths to work :)
source("~/local/include/libRbind/EmuRbind.R") # load the emu bindings
initEmu()
source("~/local/include/emu-analysis/fnAnalysis.R")
## load the model data
##
## first the luminosity data
#lumOutputFile <- paste("./",mwString, "/output/lum_fun_outps_", mwString.Up, "_5par.dat", sep="")
modelDataLum <- as.matrix(read.table(lumOutputFile))
nbinsLum <- dim(modelDataLum)[2]
nruns <- dim(modelDataLum)[1]
##
## now the metallicity data
#metOutputFile <- paste("./", mwString, "/output/metallicity_MV_outputs_", mwString.Up, "_5par.dat", sep="")
modelDataMet <- abs(as.matrix(read.table(metOutputFile)))
nbinsMet <- dim(modelDataMet)[2]
if(nruns != dim(modelDataMet)[1]){
stop("nruns modelDataMet doesn't match modelDataLum")
}
nbins <- nbinsMet + nbinsLum
modelData.big <- cbind(modelDataLum, modelDataMet)
modelData <- modelData.big
## load the design
#designFile <- paste("./", mwString, "/design/design_", mwString.Up, "_5par_sorted.dat", sep="")
## this is a 5 param system, so we have more names than before..
#desNames <- c("Zr", "Fescp", "Fbary", "sfe", "yfe2")
nparams <- length(desNames)
designData.big <- as.matrix(read.table(designFile, col.names=desNames))
designData <- designData.big
## load the experimental data, this is not what we actually want to do
## since we want to do a broad round-robin comparison we need to load the
## exp data for each of the runs we're going to generate comparisons against
## the exp-data is not used in the estimation process so we can start by setting this
## as a blank list
##
expData <- list()
rebuild <- 1
buffer <- paste("functional-data-", nruns, "-test-", mwString, ".dat", sep="")
if(rebuild == 1 || file.exists(buffer) == FALSE){
##
## generate a functional sample from the vars in global sope
## regression with a constant term seems to work better in general than
## a first order term
fnData <- fn.sample.gen(cov.fn=1, reg.order=0)
## now do the pca decomp
fnData <- fn.pca.gen(fnData, cutOff=0.98)
## estimate the thetas
fnData <- fn.estimate(fnData)
save(fnData, file=buffer)
} else {
load(buffer)
}
| /msu-chemtreeN-analysis/exp-analysis/round-robin-5param/combAna.R | no_license | scottedwardpratt/stat | R | false | false | 2,372 | r | ## trains the emulator on the given round-robin folder.
## further functions can be used to generate the implausibility slices etc
## this is designed to be called by something that has defined mwString and
## is of course in the right place for the paths to work :)
source("~/local/include/libRbind/EmuRbind.R") # load the emu bindings
initEmu()
source("~/local/include/emu-analysis/fnAnalysis.R")
## load the model data
##
## first the luminosity data
#lumOutputFile <- paste("./",mwString, "/output/lum_fun_outps_", mwString.Up, "_5par.dat", sep="")
modelDataLum <- as.matrix(read.table(lumOutputFile))
nbinsLum <- dim(modelDataLum)[2]
nruns <- dim(modelDataLum)[1]
##
## now the metallicity data
#metOutputFile <- paste("./", mwString, "/output/metallicity_MV_outputs_", mwString.Up, "_5par.dat", sep="")
modelDataMet <- abs(as.matrix(read.table(metOutputFile)))
nbinsMet <- dim(modelDataMet)[2]
if(nruns != dim(modelDataMet)[1]){
stop("nruns modelDataMet doesn't match modelDataLum")
}
nbins <- nbinsMet + nbinsLum
modelData.big <- cbind(modelDataLum, modelDataMet)
modelData <- modelData.big
## load the design
#designFile <- paste("./", mwString, "/design/design_", mwString.Up, "_5par_sorted.dat", sep="")
## this is a 5 param system, so we have more names than before..
#desNames <- c("Zr", "Fescp", "Fbary", "sfe", "yfe2")
nparams <- length(desNames)
designData.big <- as.matrix(read.table(designFile, col.names=desNames))
designData <- designData.big
## load the experimental data, this is not what we actually want to do
## since we want to do a broad round-robin comparison we need to load the
## exp data for each of the runs we're going to generate comparisons against
## the exp-data is not used in the estimation process so we can start by setting this
## as a blank list
##
expData <- list()
rebuild <- 1
buffer <- paste("functional-data-", nruns, "-test-", mwString, ".dat", sep="")
if(rebuild == 1 || file.exists(buffer) == FALSE){
##
## generate a functional sample from the vars in global sope
## regression with a constant term seems to work better in general than
## a first order term
fnData <- fn.sample.gen(cov.fn=1, reg.order=0)
## now do the pca decomp
fnData <- fn.pca.gen(fnData, cutOff=0.98)
## estimate the thetas
fnData <- fn.estimate(fnData)
save(fnData, file=buffer)
} else {
load(buffer)
}
|
######r code for run MCMC for estimating the Discrete Normal distribution parameters########
## see definition for normal distribution in Kemp, Characterizations of a discrete normal distribution
## Journal of statistical planning and inference 63(1997) p223
###########################################
#define variables
div_dat<-read.table("division_destiny_withBCL-3and_cpg40hr.csv", header=T, sep=",")
div_dat<-read.table("division_destiny_withBCL-3and_cpg60hr.csv", header=T, sep=",")
lamdaArr<-c();
sigmaArr<-c();
qArr<-c();
logllArr<-c();
lastLogll<- -1E+100;
lastLamda<-0;
lastQ<-0;
lastSigma<-0;
x<-div_dat[c(1:8),1];
y<-div_dat[c(1:8),2];
for( k in c(1:100000))
{
print(paste("loop:",k,sep=""));
#parameters for discrete normal, draw next one
lamda<-runif(1, 0.00,1000)
q<-rbeta(1,1,1);
sigma<-runif(1,0, 1)
#calculate the likelihood of MCMC chain
denorm<-sum((lamda^x)*q^(x*(x-1)/2))
norm<-rep(0,length(x));
i<-1;
for(i in c(1:length( x)))
{
norm[i]<-lamda^x[i]*q^(x[i]*(x[i]-1)/2);
norm[i]<-norm[i]/denorm;
}
logll<-0;
##calculate the loglikelihood
for(j in c(1:length(norm)))
{
logll<-logll+log(dnorm(y[j],norm[j],sigma));
}
flag<-FALSE;
####compare the logll
if(logll>=lastLogll) {
#update everything
flag<-TRUE;
} else{
u<-runif(1,0,1);
if(log(u)< (logll-lastLogll)){
flag<-TRUE;
}
}
###check to upate
if(flag){
lamdaArr<-c(lamdaArr, lamda);
qArr<-c(qArr,q);
logllArr<-c(logllArr,logll);
sigmaArr<-c(sigmaArr,sigma);
lastLogll<-logll;
lastLamda<-lamda;
lastQ<-q;
lastSigma<-sigma;
} else {
lamdaArr<-c(lamdaArr, lastLamda);
qArr<-c(qArr,lastQ);
logllArr<-c(logllArr,lastLogll);
sigmaArr<-c(sigmaArr,lastSigma);
#lastLogll<-logll;
}
}
###run some statistics
op<-par(mfrow = c(2, 2), # 2 x 2 pictures on one plot
pty = "s") # square plotting region,
# independent of device size
plot(c(1:length(logllArr)), logllArr, col=2, main="LogLL", type="l")
#mtext(paste("totle number of cells:", length(xt[,2]), ""));
plot(c(1:length(logllArr)), sigmaArr, col=2, main="sigma", type="l")
#mtext(paste("totle number of cells:", length(xt[,2]), ""));
plot(c(1:length(logllArr)), lamdaArr, col=2, main="lamda", type="l")
#mtext(paste("totle number of cells:", length(xt[,2]), ""));
plot(c(1:length(logllArr)), qArr, col=2, main="q", type="l")
#mtext(paste("totle number of cells:", length(xt[,2]), ""));
par(op);
low<-50000;#length(lamdaArr)/2;
high<-100000;
lamdaBa<-median(lamdaArr[c(low:high)])
qBa<-median(qArr[c(low:high)])
sigmaBa<-mean(sigmaArr[c(low:high)])
logllBa<-mean(logllArr[c(low:high)])
####verify
lamdaBa<-4.448718;
qBa<- 0.688582;
x<-c(1:8);
#x<-c(-2:2);
denormPredicted<-sum((lamdaBa^x)*qBa^(x*(x-1)/2))
normPredicted<-rep(0,length(x));
i<-1;
for(i in c(1:length( x)))
{
normPredicted[i]<-lamdaBa^x[i]*qBa^(x[i]*(x[i]-1)/2);
normPredicted[i]<-normPredicted[i]/denormPredicted;
}
jpeg("divisionDestiny_Figure9d.jpg");
plot(c(0,9), c(0,0.4), col=1, type="n", main="Dvision Destiny (Discrete Normal Distribution)", xlab="generation", ylab="proportion")
points(x, normPredicted, col=2, pch=2)
points(x, y, col=3, pch=16)
lines(x, normPredicted, col=2, lwd=2, lty=2);
legend(6,0.3, c("fitted", "exp data"),text.col=c(2,3), lty=c(1,0), col= c(2,3),cex=0.9, pch=c(2,16));
dev.off();
################best output so far for *figure9b* ##############
> sigmaBa
[1] 0.00937503
> logllBa
[1] 17.88932
> lamdaBa
[1] 1.556716
> qBa
[1] 0.688582
> normPredicted
[1] 0.2901771668 0.3110487205 0.2295880537 0.1166879037 0.0408373990
[6] 0.0098411532 0.0016330128 0.0001865902
> y
[1] 0.262020854 0.308018868 0.261554121 0.120754717 0.025223436 0.003291956
[7] 0.001648461 0.001891758
###############best output so far for figure9d############
sigmaBa
[1] 0.0626555
> lamdaBa
[1] 4.448718
> y
[1] 0.08250000 0.09750000 0.15333333 0.24583333 0.20000000 0.11250000 0.05083333
[8] 0.04416667
> qBa
[1] 0.6584583
> normPredicted
[1] 0.03630444 0.10634642 0.20512298 0.26051578 0.21786227 0.11996603 0.04349735
[8] 0.01038474
>
| /CellDivCmd/bin/Debug/mcmc_division_denstiny.r | no_license | ffeng23/CellDiv | R | false | false | 4,278 | r | ######r code for run MCMC for estimating the Discrete Normal distribution parameters########
## see definition for normal distribution in Kemp, Characterizations of a discrete normal distribution
## Journal of statistical planning and inference 63(1997) p223
###########################################
#define variables
div_dat<-read.table("division_destiny_withBCL-3and_cpg40hr.csv", header=T, sep=",")
div_dat<-read.table("division_destiny_withBCL-3and_cpg60hr.csv", header=T, sep=",")
lamdaArr<-c();
sigmaArr<-c();
qArr<-c();
logllArr<-c();
lastLogll<- -1E+100;
lastLamda<-0;
lastQ<-0;
lastSigma<-0;
x<-div_dat[c(1:8),1];
y<-div_dat[c(1:8),2];
for( k in c(1:100000))
{
print(paste("loop:",k,sep=""));
#parameters for discrete normal, draw next one
lamda<-runif(1, 0.00,1000)
q<-rbeta(1,1,1);
sigma<-runif(1,0, 1)
#calculate the likelihood of MCMC chain
denorm<-sum((lamda^x)*q^(x*(x-1)/2))
norm<-rep(0,length(x));
i<-1;
for(i in c(1:length( x)))
{
norm[i]<-lamda^x[i]*q^(x[i]*(x[i]-1)/2);
norm[i]<-norm[i]/denorm;
}
logll<-0;
##calculate the loglikelihood
for(j in c(1:length(norm)))
{
logll<-logll+log(dnorm(y[j],norm[j],sigma));
}
flag<-FALSE;
####compare the logll
if(logll>=lastLogll) {
#update everything
flag<-TRUE;
} else{
u<-runif(1,0,1);
if(log(u)< (logll-lastLogll)){
flag<-TRUE;
}
}
###check to upate
if(flag){
lamdaArr<-c(lamdaArr, lamda);
qArr<-c(qArr,q);
logllArr<-c(logllArr,logll);
sigmaArr<-c(sigmaArr,sigma);
lastLogll<-logll;
lastLamda<-lamda;
lastQ<-q;
lastSigma<-sigma;
} else {
lamdaArr<-c(lamdaArr, lastLamda);
qArr<-c(qArr,lastQ);
logllArr<-c(logllArr,lastLogll);
sigmaArr<-c(sigmaArr,lastSigma);
#lastLogll<-logll;
}
}
###run some statistics
op<-par(mfrow = c(2, 2), # 2 x 2 pictures on one plot
pty = "s") # square plotting region,
# independent of device size
plot(c(1:length(logllArr)), logllArr, col=2, main="LogLL", type="l")
#mtext(paste("totle number of cells:", length(xt[,2]), ""));
plot(c(1:length(logllArr)), sigmaArr, col=2, main="sigma", type="l")
#mtext(paste("totle number of cells:", length(xt[,2]), ""));
plot(c(1:length(logllArr)), lamdaArr, col=2, main="lamda", type="l")
#mtext(paste("totle number of cells:", length(xt[,2]), ""));
plot(c(1:length(logllArr)), qArr, col=2, main="q", type="l")
#mtext(paste("totle number of cells:", length(xt[,2]), ""));
par(op);
low<-50000;#length(lamdaArr)/2;
high<-100000;
lamdaBa<-median(lamdaArr[c(low:high)])
qBa<-median(qArr[c(low:high)])
sigmaBa<-mean(sigmaArr[c(low:high)])
logllBa<-mean(logllArr[c(low:high)])
####verify
lamdaBa<-4.448718;
qBa<- 0.688582;
x<-c(1:8);
#x<-c(-2:2);
denormPredicted<-sum((lamdaBa^x)*qBa^(x*(x-1)/2))
normPredicted<-rep(0,length(x));
i<-1;
for(i in c(1:length( x)))
{
normPredicted[i]<-lamdaBa^x[i]*qBa^(x[i]*(x[i]-1)/2);
normPredicted[i]<-normPredicted[i]/denormPredicted;
}
jpeg("divisionDestiny_Figure9d.jpg");
plot(c(0,9), c(0,0.4), col=1, type="n", main="Dvision Destiny (Discrete Normal Distribution)", xlab="generation", ylab="proportion")
points(x, normPredicted, col=2, pch=2)
points(x, y, col=3, pch=16)
lines(x, normPredicted, col=2, lwd=2, lty=2);
legend(6,0.3, c("fitted", "exp data"),text.col=c(2,3), lty=c(1,0), col= c(2,3),cex=0.9, pch=c(2,16));
dev.off();
################best output so far for *figure9b* ##############
> sigmaBa
[1] 0.00937503
> logllBa
[1] 17.88932
> lamdaBa
[1] 1.556716
> qBa
[1] 0.688582
> normPredicted
[1] 0.2901771668 0.3110487205 0.2295880537 0.1166879037 0.0408373990
[6] 0.0098411532 0.0016330128 0.0001865902
> y
[1] 0.262020854 0.308018868 0.261554121 0.120754717 0.025223436 0.003291956
[7] 0.001648461 0.001891758
###############best output so far for figure9d############
sigmaBa
[1] 0.0626555
> lamdaBa
[1] 4.448718
> y
[1] 0.08250000 0.09750000 0.15333333 0.24583333 0.20000000 0.11250000 0.05083333
[8] 0.04416667
> qBa
[1] 0.6584583
> normPredicted
[1] 0.03630444 0.10634642 0.20512298 0.26051578 0.21786227 0.11996603 0.04349735
[8] 0.01038474
>
|
## Caching the Inverse of a Matrix:
## Matrix inversion is usually a costly computation and there may be some
## benefit to caching the inverse of a matrix rather than compute it repeatedly.
## Below are a pair of functions that are used to create a special object that
## stores a matrix and caches its inverse.
## Creating a special matrix object that can cache its inverse.
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function(y) {
x <<- y
inv <<- NULL
}
get <- function() x
setInverse <- function(inverse) inv <<- inverse
getInverse <- function() inv
list(set = set,
get = get,
setInverse = setInverse,
getInverse = getInverse)
}
## This function computes the inverse of the special "matrix" created by
## makeCacheMatrix above. If the inverse has already been calculated (and the
## matrix has not changed), then it should retrieve the inverse from the cache.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
inv <- x$getInverse()
if (!is.null(inv)) {
message("getting cached data")
return(inv)
}
mat <- x$get()
inv <- solve(mat, ...)
x$setInverse(inv)
inv
}
> ## Testing the function with different scenarios.
> setwd("D:/RProjects") ## to set the working directory, it is my R working directory.
> source("cachematrix.R") ## store the previous functions with the name of cachematrix.R and put is in your working directory.
> my_matrix <- makeCacheMatrix(matrix(seq(2, 8, 2), 2, 2)) # to assign values to the original matrix.
> my_matrix$get() ## to show the original matrix.
[,1] [,2]
[1,] 2 6
[2,] 4 8
> my_matrix$getInverse() ## the inverse matrix has not been set yet.
NULL
> cacheSolve(my_matrix) ## getting cached data of the inversed matrix.
[,1] [,2]
[1,] -1.0 0.75
[2,] 0.5 -0.25
> my_matrix$getInverse() ## now the inverse matrix has values of inverting the original matrix
[,1] [,2]
[1,] -1.0 0.75
[2,] 0.5 -0.25
> my_matrix <- makeCacheMatrix(matrix(seq(1, 7, 2), 2, 2)) # to assign new values to the original matrix.
> my_matrix$get() ## to show the new matrix.
[,1] [,2]
[1,] 1 5
[2,] 3 7
> my_matrix$getInverse() ## the cached values of the new inverse has been cleared.
NULL
| /cachematrix.R | no_license | MohannadNafee/ProgrammingAssignment2 | R | false | false | 2,426 | r | ## Caching the Inverse of a Matrix:
## Matrix inversion is usually a costly computation and there may be some
## benefit to caching the inverse of a matrix rather than compute it repeatedly.
## Below are a pair of functions that are used to create a special object that
## stores a matrix and caches its inverse.
## Creating a special matrix object that can cache its inverse.
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function(y) {
x <<- y
inv <<- NULL
}
get <- function() x
setInverse <- function(inverse) inv <<- inverse
getInverse <- function() inv
list(set = set,
get = get,
setInverse = setInverse,
getInverse = getInverse)
}
## This function computes the inverse of the special "matrix" created by
## makeCacheMatrix above. If the inverse has already been calculated (and the
## matrix has not changed), then it should retrieve the inverse from the cache.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
inv <- x$getInverse()
if (!is.null(inv)) {
message("getting cached data")
return(inv)
}
mat <- x$get()
inv <- solve(mat, ...)
x$setInverse(inv)
inv
}
> ## Testing the function with different scenarios.
> setwd("D:/RProjects") ## to set the working directory, it is my R working directory.
> source("cachematrix.R") ## store the previous functions with the name of cachematrix.R and put is in your working directory.
> my_matrix <- makeCacheMatrix(matrix(seq(2, 8, 2), 2, 2)) # to assign values to the original matrix.
> my_matrix$get() ## to show the original matrix.
[,1] [,2]
[1,] 2 6
[2,] 4 8
> my_matrix$getInverse() ## the inverse matrix has not been set yet.
NULL
> cacheSolve(my_matrix) ## getting cached data of the inversed matrix.
[,1] [,2]
[1,] -1.0 0.75
[2,] 0.5 -0.25
> my_matrix$getInverse() ## now the inverse matrix has values of inverting the original matrix
[,1] [,2]
[1,] -1.0 0.75
[2,] 0.5 -0.25
> my_matrix <- makeCacheMatrix(matrix(seq(1, 7, 2), 2, 2)) # to assign new values to the original matrix.
> my_matrix$get() ## to show the new matrix.
[,1] [,2]
[1,] 1 5
[2,] 3 7
> my_matrix$getInverse() ## the cached values of the new inverse has been cleared.
NULL
|
## Jonah Simon
## Import Arc Outputs and DHS data
# Load libraries
library(tidyverse)
library(haven)
library(readxl)
# Import DHS data files
children2010 <- read_dta("~/Google Drive/Middlebury/Econ Research/symposium/DHSData/DHS2010/TZKR63DT/TZKR63FL.DTA")
individual2010 <- read_dta("~/Google Drive/Middlebury/Econ Research/symposium/DHSData/DHS2010/TZIR63DT/TZIR63FL.DTA")
household2010 <- read_dta("~/Google Drive/Middlebury/Econ Research/symposium/DHSData/DHS2010/TZHR63DT/TZHR63FL.DTA")
children2015 <- read_dta("~/Google Drive/Middlebury/Econ Research/symposium/DHSData/DHS2015/TZKR7HDT/TZKR7HFL.DTA")
individual2015 <- read_dta("~/Google Drive/Middlebury/Econ Research/symposium/DHSData/DHS2015/TZIR7HDT/TZIR7HFL.DTA")
household2015 <- read_dta("~/Google Drive/Middlebury/Econ Research/symposium/DHSData/DHS2015/TZHR7HDT/TZHR7HFL.DTA")
# Import ArcGIS LossYear output tables from Excel
for (year in c(7,8,9,10,12,13,14,15,16)){
assign(sprintf("DHS2010_LossYear%s", year),read_excel(sprintf("~/Google Drive/Middlebury/Econ Research/symposium/arcToExcelOutput/LossYear/DHS2010_LossYear%s.xls",year)))
assign(sprintf("DHS2015_LossYear%s", year),read_excel(sprintf("~/Google Drive/Middlebury/Econ Research/symposium/arcToExcelOutput/LossYear/DHS2015_LossYear%s.xls",year)))
assign(sprintf("DHS2010_LossYear%s", year), select(get(sprintf("DHS2010_LossYear%s", year)),DHSID,COUNT))
assign(sprintf("DHS2015_LossYear%s", year), select(get(sprintf("DHS2015_LossYear%s", year)),DHSID,COUNT))
}
# Import ArcGIS Tree Cover output tables from Excel
treeCover2010 <- read_excel("~/Google Drive/Middlebury/Econ Research/symposium/arcToExcelOutput/TreeCover/DHS2010_TreeCover.xls")
treeCover2015 <- read_excel("~/Google Drive/Middlebury/Econ Research/symposium/arcToExcelOutput/TreeCover/DHS2015_TreeCover.xls")
| /importingData.R | no_license | jonahsimon36/tanzania | R | false | false | 1,817 | r | ## Jonah Simon
## Import Arc Outputs and DHS data
# Load libraries
library(tidyverse)
library(haven)
library(readxl)
# Import DHS data files
children2010 <- read_dta("~/Google Drive/Middlebury/Econ Research/symposium/DHSData/DHS2010/TZKR63DT/TZKR63FL.DTA")
individual2010 <- read_dta("~/Google Drive/Middlebury/Econ Research/symposium/DHSData/DHS2010/TZIR63DT/TZIR63FL.DTA")
household2010 <- read_dta("~/Google Drive/Middlebury/Econ Research/symposium/DHSData/DHS2010/TZHR63DT/TZHR63FL.DTA")
children2015 <- read_dta("~/Google Drive/Middlebury/Econ Research/symposium/DHSData/DHS2015/TZKR7HDT/TZKR7HFL.DTA")
individual2015 <- read_dta("~/Google Drive/Middlebury/Econ Research/symposium/DHSData/DHS2015/TZIR7HDT/TZIR7HFL.DTA")
household2015 <- read_dta("~/Google Drive/Middlebury/Econ Research/symposium/DHSData/DHS2015/TZHR7HDT/TZHR7HFL.DTA")
# Import ArcGIS LossYear output tables from Excel
for (year in c(7,8,9,10,12,13,14,15,16)){
assign(sprintf("DHS2010_LossYear%s", year),read_excel(sprintf("~/Google Drive/Middlebury/Econ Research/symposium/arcToExcelOutput/LossYear/DHS2010_LossYear%s.xls",year)))
assign(sprintf("DHS2015_LossYear%s", year),read_excel(sprintf("~/Google Drive/Middlebury/Econ Research/symposium/arcToExcelOutput/LossYear/DHS2015_LossYear%s.xls",year)))
assign(sprintf("DHS2010_LossYear%s", year), select(get(sprintf("DHS2010_LossYear%s", year)),DHSID,COUNT))
assign(sprintf("DHS2015_LossYear%s", year), select(get(sprintf("DHS2015_LossYear%s", year)),DHSID,COUNT))
}
# Import ArcGIS Tree Cover output tables from Excel
treeCover2010 <- read_excel("~/Google Drive/Middlebury/Econ Research/symposium/arcToExcelOutput/TreeCover/DHS2010_TreeCover.xls")
treeCover2015 <- read_excel("~/Google Drive/Middlebury/Econ Research/symposium/arcToExcelOutput/TreeCover/DHS2015_TreeCover.xls")
|
library(stringr)
data.lib="/Users/xuehan/Desktop/Spr2017-proj4-team-9/data/nameset"
data.files=list.files(path=data.lib, "*.txt")
data.files
## remove "*.txt"
query.list=substring(data.files,
1, nchar(data.files)-4)
query.list
## add a space
query.list=paste(substring(query.list, 1, 1),
" ",
substring(query.list,
2, nchar(query.list)),
sep=""
)
query.list
f.line.proc=function(lin, nam.query="."){
# remove unwanted characters
char_notallowed <- "\\@#$%^&?" # characters to be removed
lin.str=str_replace(lin, char_notallowed, "")
# get author id
lin.str=strsplit(lin.str, "_")[[1]]
author_id=as.numeric(lin.str[1])
# get paper id
lin.str=lin.str[2]
paper_id=strsplit(lin.str, " ")[[1]][1]
lin.str=substring(lin.str, nchar(paper_id)+1, nchar(lin.str))
paper_id=as.numeric(paper_id)
# get coauthor list
lin.str=strsplit(lin.str, "<>")[[1]]
coauthor_list=strsplit(lin.str[1], ";")[[1]]
#print(lin.str)
for(j in 1:length(coauthor_list)){
if(nchar(coauthor_list[j])>0){
nam = strsplit(coauthor_list[j], " ")[[1]]
if(nchar(nam[1])>0){
first.ini=substring(nam[1], 1, 1)
}else{
first.ini=substring(nam[2], 1, 1)
}
}
last.name=nam[length(nam)]
nam.str = paste(first.ini, last.name)
coauthor_list[j]=nam.str
}
match_ind = charmatch(nam.query, coauthor_list, nomatch=-1)
#print(nam.query)
#print(coauthor_list)
#print(match_ind)
if(match_ind>0){
coauthor_list=coauthor_list[-match_ind]
}
paper_title=lin.str[2]
journal_name=lin.str[3]
list(author_id,
paper_id,
coauthor_list,
paper_title,
journal_name)
}
data_list=list(1:length(data.files))
for(i in 1:length(data.files)){
## Step 0 scan in one line at a time.
dat=as.list(readLines(paste(data.lib, data.files[i], sep="/")))
#ASDFASDF
data_list[[i]]=lapply(dat, f.line.proc, nam.query=query.list[i])
}
for (i in 1:length(query.list))
{
mat=data_list[[i]]
## Turn nested list into one data frame:
textFileDfList <- lapply(mat, function(listLevel3){
## Paste multiple entries (e.g. vector of co-authors)
## together to create a single character entry:
simplifiedList <- lapply(listLevel3,
function(entries) paste(entries, collapse = ", "))
## Create data.frame:
outDf <- as.data.frame(simplifiedList,
stringsAsFactors = FALSE,
col.names = c("author ID", "paper ID", "coauthor names",
"paper title", "journal title")
)
## Combine data frames of the single entries to one data frame,
## containing all entries of the text file:
textFileDf <- do.call('cbind', outDf)
})
## Combine data frames of the text files to one big data frame:
bigDataFrame <- as.data.frame(do.call('rbind', textFileDfList))
bigDataFrame$author.names = query.list[i]
write.csv(bigDataFrame, file = paste("/Users/xuehan/Desktop/Spr2017-proj4-team-9/data/nameset/", query.list[i], ".csv", sep=""))
}
| /lib/data cleaner.R | no_license | TZstatsADS/Spr2017-proj4-team-9 | R | false | false | 3,308 | r |
library(stringr)
data.lib="/Users/xuehan/Desktop/Spr2017-proj4-team-9/data/nameset"
data.files=list.files(path=data.lib, "*.txt")
data.files
## remove "*.txt"
query.list=substring(data.files,
1, nchar(data.files)-4)
query.list
## add a space
query.list=paste(substring(query.list, 1, 1),
" ",
substring(query.list,
2, nchar(query.list)),
sep=""
)
query.list
f.line.proc=function(lin, nam.query="."){
# remove unwanted characters
char_notallowed <- "\\@#$%^&?" # characters to be removed
lin.str=str_replace(lin, char_notallowed, "")
# get author id
lin.str=strsplit(lin.str, "_")[[1]]
author_id=as.numeric(lin.str[1])
# get paper id
lin.str=lin.str[2]
paper_id=strsplit(lin.str, " ")[[1]][1]
lin.str=substring(lin.str, nchar(paper_id)+1, nchar(lin.str))
paper_id=as.numeric(paper_id)
# get coauthor list
lin.str=strsplit(lin.str, "<>")[[1]]
coauthor_list=strsplit(lin.str[1], ";")[[1]]
#print(lin.str)
for(j in 1:length(coauthor_list)){
if(nchar(coauthor_list[j])>0){
nam = strsplit(coauthor_list[j], " ")[[1]]
if(nchar(nam[1])>0){
first.ini=substring(nam[1], 1, 1)
}else{
first.ini=substring(nam[2], 1, 1)
}
}
last.name=nam[length(nam)]
nam.str = paste(first.ini, last.name)
coauthor_list[j]=nam.str
}
match_ind = charmatch(nam.query, coauthor_list, nomatch=-1)
#print(nam.query)
#print(coauthor_list)
#print(match_ind)
if(match_ind>0){
coauthor_list=coauthor_list[-match_ind]
}
paper_title=lin.str[2]
journal_name=lin.str[3]
list(author_id,
paper_id,
coauthor_list,
paper_title,
journal_name)
}
data_list=list(1:length(data.files))
for(i in 1:length(data.files)){
## Step 0 scan in one line at a time.
dat=as.list(readLines(paste(data.lib, data.files[i], sep="/")))
#ASDFASDF
data_list[[i]]=lapply(dat, f.line.proc, nam.query=query.list[i])
}
for (i in 1:length(query.list))
{
mat=data_list[[i]]
## Turn nested list into one data frame:
textFileDfList <- lapply(mat, function(listLevel3){
## Paste multiple entries (e.g. vector of co-authors)
## together to create a single character entry:
simplifiedList <- lapply(listLevel3,
function(entries) paste(entries, collapse = ", "))
## Create data.frame:
outDf <- as.data.frame(simplifiedList,
stringsAsFactors = FALSE,
col.names = c("author ID", "paper ID", "coauthor names",
"paper title", "journal title")
)
## Combine data frames of the single entries to one data frame,
## containing all entries of the text file:
textFileDf <- do.call('cbind', outDf)
})
## Combine data frames of the text files to one big data frame:
bigDataFrame <- as.data.frame(do.call('rbind', textFileDfList))
bigDataFrame$author.names = query.list[i]
write.csv(bigDataFrame, file = paste("/Users/xuehan/Desktop/Spr2017-proj4-team-9/data/nameset/", query.list[i], ".csv", sep=""))
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mda.R
\name{getmdr}
\alias{getmdr}
\title{Get the raw Mass Defect}
\usage{
getmdr(mz)
}
\arguments{
\item{mz}{numeric vector for exact mass}
}
\value{
raw Mass Defect
}
\description{
Get the raw Mass Defect
}
\examples{
getmdr(getmass('C2H4'))
}
| /man/getmdr.Rd | no_license | yufree/enviGCMS | R | false | true | 324 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mda.R
\name{getmdr}
\alias{getmdr}
\title{Get the raw Mass Defect}
\usage{
getmdr(mz)
}
\arguments{
\item{mz}{numeric vector for exact mass}
}
\value{
raw Mass Defect
}
\description{
Get the raw Mass Defect
}
\examples{
getmdr(getmass('C2H4'))
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/model.matrix.R
\docType{methods}
\name{model.matrix,BFBayesFactor-method}
\alias{model.matrix,BFBayesFactor}
\alias{model.matrix,BFBayesFactor-method}
\alias{model.matrix,BFBayesFactorTop-method}
\title{Design matrices for Bayes factor linear models analyses.}
\usage{
\S4method{model.matrix}{BFBayesFactor}(object, ...)
\S4method{model.matrix}{BFBayesFactorTop}(object, ...)
}
\arguments{
\item{object}{a BayesFactor object with a single numerator}
\item{...}{arguments passed to and from related methods}
}
\value{
Returns the design matrix for the corresponding model. The 'gMap' attribute of the returned
matrix contains the mapping from columns of the design matrix to g parameters
}
\description{
This function returns the design matrix used for computation of the Bayes factor
for the numerator of a \code{BFBayesFactor} object. There must not be more
than one numerator in the \code{BFBayesFactor} object.
}
\examples{
## Gets the design matrix for a simple analysis
data(sleep)
bf = anovaBF(extra ~ group + ID, data = sleep, whichRandom="ID", progress=FALSE)
X = model.matrix(bf)
## Show dimensions of X (should be 20 by 12)
dim(X)
}
\references{
Rouder, J. N., Morey, R. D., Speckman, P. L., Province, J. M., (2012)
Default Bayes Factors for ANOVA Designs. Journal of Mathematical
Psychology. 56. p. 356-374.
}
| /pkg/BayesFactor/man/model.matrix-methods.Rd | no_license | jonathon-love/BayesFactor | R | false | true | 1,410 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/model.matrix.R
\docType{methods}
\name{model.matrix,BFBayesFactor-method}
\alias{model.matrix,BFBayesFactor}
\alias{model.matrix,BFBayesFactor-method}
\alias{model.matrix,BFBayesFactorTop-method}
\title{Design matrices for Bayes factor linear models analyses.}
\usage{
\S4method{model.matrix}{BFBayesFactor}(object, ...)
\S4method{model.matrix}{BFBayesFactorTop}(object, ...)
}
\arguments{
\item{object}{a BayesFactor object with a single numerator}
\item{...}{arguments passed to and from related methods}
}
\value{
Returns the design matrix for the corresponding model. The 'gMap' attribute of the returned
matrix contains the mapping from columns of the design matrix to g parameters
}
\description{
This function returns the design matrix used for computation of the Bayes factor
for the numerator of a \code{BFBayesFactor} object. There must not be more
than one numerator in the \code{BFBayesFactor} object.
}
\examples{
## Gets the design matrix for a simple analysis
data(sleep)
bf = anovaBF(extra ~ group + ID, data = sleep, whichRandom="ID", progress=FALSE)
X = model.matrix(bf)
## Show dimensions of X (should be 20 by 12)
dim(X)
}
\references{
Rouder, J. N., Morey, R. D., Speckman, P. L., Province, J. M., (2012)
Default Bayes Factors for ANOVA Designs. Journal of Mathematical
Psychology. 56. p. 356-374.
}
|
install.packages("tidyverse")
install.packages("data.table")
install.packages("remotes")
install.packages("rlang")
install.packages("SuperLearner")
remotes::install_github("osofr/condensier@master")
remotes::install_github("jeremyrcoyle/origami@master")
remotes::install_github("jeremyrcoyle/sl3@master")
| /install.R | permissive | chenweichen/lab_05 | R | false | false | 306 | r | install.packages("tidyverse")
install.packages("data.table")
install.packages("remotes")
install.packages("rlang")
install.packages("SuperLearner")
remotes::install_github("osofr/condensier@master")
remotes::install_github("jeremyrcoyle/origami@master")
remotes::install_github("jeremyrcoyle/sl3@master")
|
#' Add a fourth "ID" column to bed-style files
#'
#' There are three required BED fields: chr, chromStart
#' and chromEnd. The fourth (optional) column is reserved
#' for name which this function generates as "chr-chromStart".
#'
#' @export
#'
#' @param bed_file either a dataframe or filename to be read
#'
#' @examples
#' bed <- data.frame(chr=c("chr1", "chr2"),
#' start=c(100, 450), end=c(200, 300))
#'
#' add_bed_id(bed)
#'
add_bed_id <- function(bed_file){
# work with either filenames or data.frames
bed <- if(is.character(bed_file))
read.table(bed_file, header=F, sep="\t")[,1:3]
else
bed_file[,1:3]
# cat together chr-pos for identifier
bed[,4] <- gsub(" ", "", apply(bed[,1:2], 1, paste0, collapse="-"))
bed
}
#' Alias to source remote bioconductor script
#'
#' Remembering/typing this URL is a pain.
#'
#' @export
#'
#' @examples
#' bioconductor()
#' biocLite()
#'
bioconductor <- function()
source("http://bioconductor.org/biocLite.R")
#' Circular permutation of genomic features in a BED file
#'
#' Generate a permuted version of an input BED file (object),
#' probably for use as a null model for comparison with some
#' statistical result.
#'
#' @export
#'
#' @param chr chromosome reference, matching your BED file (i.e.
#' this may be "chr1" or "1")
#' @param bed a BED format \code{data.frame}, such as that
#' produced after using \code{read.table} on a BED file
#' @param chr.sizes a \code{data.frame} of chromsoome sizes,
#' i.e. \code{mm9.chrom.sizes.txt}
#' @param step.size if your features are discretised (e.g. can only
#' occur at set intervals, like those introduced by binning) they
#' can be permuted by setting \code{step.size} to the size of your
#' bins in basepairs. \code{step.size} of 1 (the default) means no
#' binning.
#'
#' @examples
#'
#' sizes <- read.table("http://igv.googlecode.com/svn-history/r2493/trunk/src/org/broad/igv/hic/tools/mm9.chrom.sizes")
#'
#' bed_file <- data.frame(chr=c("chr1", "chr2"),
#' start=c(100, 450), end=c(200, 300))
#'
#' set.seed(42)
#'
#' (permute_chr("chr1", bed_file, sizes))
#' # chr start end
#' # 1 chr1 180395673 180395773
#'
#' # run for all chromosomes
#' chrs <- as.character(unique(bed$chr))
#' perm <- sapply(chrs, permute_chr, bed=bed_file,
#' chr.sizes=sizes, simplify=F)
#' (do.call(rbind, perm))
#' # chr start end
#' # chr1 chr1 184787091 184787191
#' # chr2 chr2 52005764 52005614
#'
permute_chr <- function(chr, bed, chr.sizes, step.size=1){
# get relevant chromosome
outbed <- bed[bed[,1] == chr,]
# get chromosome size (e.g. from mm9.chrom.sizes.txt)
max <- chr.sizes[chr.sizes[,1] == chr, 2]
avail_steps <- floor(max / step.size)
# pick shift:
shift <- sample(1:avail_steps, 1)
for(i in 1:nrow(outbed)){
# start
outbed[i,2] <- (outbed[i,2] + shift*step.size) %% max
# end
outbed[i,3] <- (outbed[i,3] + shift*step.size) %% max
}
outbed
}
#' Write data.frame in BED format
#'
#' Sets options to `write.table` useful for
#' writing BED files.
#'
#' @export
#' @param df data.frame (typically 4+ column)
#' @param fname filename to save BED file
#'
#' @examples
#' write_bed(data.frame(chr=c("chr1", "chr2"),
#' start=c(100, 450), end=c(200, 300), id=c("p1", "p2")),
#' "outfile.bed")
#'
write_bed <- function(df, fname)
write.table(df, fname, quote=F, row.names=F,
col.names=F, sep="\t") | /R/bioinfo_fns.R | permissive | blmoore/blmR | R | false | false | 3,473 | r | #' Add a fourth "ID" column to bed-style files
#'
#' There are three required BED fields: chr, chromStart
#' and chromEnd. The fourth (optional) column is reserved
#' for name which this function generates as "chr-chromStart".
#'
#' @export
#'
#' @param bed_file either a dataframe or filename to be read
#'
#' @examples
#' bed <- data.frame(chr=c("chr1", "chr2"),
#' start=c(100, 450), end=c(200, 300))
#'
#' add_bed_id(bed)
#'
add_bed_id <- function(bed_file){
# work with either filenames or data.frames
bed <- if(is.character(bed_file))
read.table(bed_file, header=F, sep="\t")[,1:3]
else
bed_file[,1:3]
# cat together chr-pos for identifier
bed[,4] <- gsub(" ", "", apply(bed[,1:2], 1, paste0, collapse="-"))
bed
}
#' Alias to source remote bioconductor script
#'
#' Remembering/typing this URL is a pain.
#'
#' @export
#'
#' @examples
#' bioconductor()
#' biocLite()
#'
bioconductor <- function()
source("http://bioconductor.org/biocLite.R")
#' Circular permutation of genomic features in a BED file
#'
#' Generate a permuted version of an input BED file (object),
#' probably for use as a null model for comparison with some
#' statistical result.
#'
#' @export
#'
#' @param chr chromosome reference, matching your BED file (i.e.
#' this may be "chr1" or "1")
#' @param bed a BED format \code{data.frame}, such as that
#' produced after using \code{read.table} on a BED file
#' @param chr.sizes a \code{data.frame} of chromsoome sizes,
#' i.e. \code{mm9.chrom.sizes.txt}
#' @param step.size if your features are discretised (e.g. can only
#' occur at set intervals, like those introduced by binning) they
#' can be permuted by setting \code{step.size} to the size of your
#' bins in basepairs. \code{step.size} of 1 (the default) means no
#' binning.
#'
#' @examples
#'
#' sizes <- read.table("http://igv.googlecode.com/svn-history/r2493/trunk/src/org/broad/igv/hic/tools/mm9.chrom.sizes")
#'
#' bed_file <- data.frame(chr=c("chr1", "chr2"),
#' start=c(100, 450), end=c(200, 300))
#'
#' set.seed(42)
#'
#' (permute_chr("chr1", bed_file, sizes))
#' # chr start end
#' # 1 chr1 180395673 180395773
#'
#' # run for all chromosomes
#' chrs <- as.character(unique(bed$chr))
#' perm <- sapply(chrs, permute_chr, bed=bed_file,
#' chr.sizes=sizes, simplify=F)
#' (do.call(rbind, perm))
#' # chr start end
#' # chr1 chr1 184787091 184787191
#' # chr2 chr2 52005764 52005614
#'
permute_chr <- function(chr, bed, chr.sizes, step.size=1){
# get relevant chromosome
outbed <- bed[bed[,1] == chr,]
# get chromosome size (e.g. from mm9.chrom.sizes.txt)
max <- chr.sizes[chr.sizes[,1] == chr, 2]
avail_steps <- floor(max / step.size)
# pick shift:
shift <- sample(1:avail_steps, 1)
for(i in 1:nrow(outbed)){
# start
outbed[i,2] <- (outbed[i,2] + shift*step.size) %% max
# end
outbed[i,3] <- (outbed[i,3] + shift*step.size) %% max
}
outbed
}
#' Write data.frame in BED format
#'
#' Sets options to `write.table` useful for
#' writing BED files.
#'
#' @export
#' @param df data.frame (typically 4+ column)
#' @param fname filename to save BED file
#'
#' @examples
#' write_bed(data.frame(chr=c("chr1", "chr2"),
#' start=c(100, 450), end=c(200, 300), id=c("p1", "p2")),
#' "outfile.bed")
#'
write_bed <- function(df, fname)
write.table(df, fname, quote=F, row.names=F,
col.names=F, sep="\t") |
require(plot3D)
epsilon.rad <- 3*pi/4
phase.periods <- 0.1
time <- 0
x.waveplateStart <- 0
x.waveplateEnd <- 2
waveplate.shiftPeriods <- 0.5
x <- seq(from=-1,to=2,by=0.01)
Ex <- function(x=0, t=time) {
shift <- sapply(x, function(x) {
if(x < x.waveplateStart) shift <- 0
else if(x > x.waveplateEnd) shift <- waveplate.shiftPeriods
else shift <- (x - x.waveplateStart) / (x.waveplateEnd - x.waveplateStart) * waveplate.shiftPeriods
return(shift)
})
round(sin(epsilon.rad), digits=2)*cos(2*pi*(x-t-shift))
}
Ey <- function(x=0, t=time) round(cos(epsilon.rad), digits=2)*cos(2*pi*(x-t+phase.periods))
# Create plot
# Z-Axis
arrows3D(x0=x[1], x1=tail(x,1)+0.5,
y0=0, y1=0,
z0=0, z1=0,
col="black", lwd = 2,
bty="n",
zlim=c(-1,1), ylim=c(-1,1),
theta=45, phi=20)
text3D(x=tail(x, 1)+0.55, y=0, z=0, labels="z", add=T)
# Wave along x axis
lines3D(x=x, y=rep(0, length(x)), z=Ex(x), col="red", lwd = 2, add=T)
# Wave along y axis
lines3D(x=x, y=Ey(x), z=rep(0,length(x)), col="green", lwd = 2, add=T)
# Add axes
segments3D(x0=x.waveplateStart, x1=x.waveplateStart,
y0=-0.8, y1=0.8,
z0=0, z1=0,
col="black", lwd = 2,
add=T)
segments3D(x0=x.waveplateStart, x1=x.waveplateStart,
y0=0, y1=0,
z0=-0.8, z1=0.8,
col="black", lwd = 2,
add=T)
arrows3D(x0=x.waveplateEnd, x1=x.waveplateEnd,
y0=-1, y1=1,
z0=0, z1=0,
col="black", lwd = 2,
add=T)
text3D(x=x.waveplateEnd, y = 1.05, z = 0, labels = "y", add=T)
arrows3D(x0=x.waveplateEnd, x1=x.waveplateEnd,
y0=0, y1=0,
z0=-1, z1=1,
col="black", lwd = 2,
add=T)
text3D(x=x.waveplateEnd, y = 0, z = 1.05, labels = "x", add=T)
# Add polarisation shit
# START
# Add polarisation plane
rect3D(x0 = x.waveplateStart, x1 = NULL,
y0 = -0.8, y1 = 0.8,
z0 = -0.8, z1 = 0.8,
add = T, col="blue", alpha=0.1)
# Add polarisation ellipse
lines3D(x = rep(x.waveplateStart, length(x)),
y = Ey(x=x.waveplateStart, t=x),
z = Ex(x=x.waveplateStart, t=x),
add=T, col="blue", lwd = 2)
# Add polarisation vector
arrows3D( x0 = x.waveplateStart, x1 = x.waveplateStart,
y0 = 0, y1 = Ey(x.waveplateStart),
z0 = 0, z1 = Ex(x.waveplateStart),
add=T, col="blue", lwd = 2)
# Add polarisation shit
# END
# Add polarisation plane
rect3D(x0 = x.waveplateEnd, x1 = NULL,
y0 = -0.8, y1 = 0.8,
z0 = -0.8, z1 = 0.8,
add = T, col="blue", alpha=0.1)
# Add polarisation ellipse
lines3D(x = rep(x.waveplateEnd, length(x)),
y = Ey(x=x.waveplateEnd, t=x),
z = Ex(x=x.waveplateEnd, t=x),
add=T, col="blue", lwd = 2)
# Add polarisation vector
arrows3D( x0 = x.waveplateEnd, x1 = x.waveplateEnd,
y0 = 0, y1 = Ey(x.waveplateEnd),
z0 = 0, z1 = Ex(x.waveplateEnd),
add=T, col="blue", lwd = 2) | /sketches/betterWaveplate.R | no_license | AlreadyTakenJonas/bachelorThesisSummary | R | false | false | 3,000 | r | require(plot3D)
epsilon.rad <- 3*pi/4
phase.periods <- 0.1
time <- 0
x.waveplateStart <- 0
x.waveplateEnd <- 2
waveplate.shiftPeriods <- 0.5
x <- seq(from=-1,to=2,by=0.01)
Ex <- function(x=0, t=time) {
shift <- sapply(x, function(x) {
if(x < x.waveplateStart) shift <- 0
else if(x > x.waveplateEnd) shift <- waveplate.shiftPeriods
else shift <- (x - x.waveplateStart) / (x.waveplateEnd - x.waveplateStart) * waveplate.shiftPeriods
return(shift)
})
round(sin(epsilon.rad), digits=2)*cos(2*pi*(x-t-shift))
}
Ey <- function(x=0, t=time) round(cos(epsilon.rad), digits=2)*cos(2*pi*(x-t+phase.periods))
# Create plot
# Z-Axis
arrows3D(x0=x[1], x1=tail(x,1)+0.5,
y0=0, y1=0,
z0=0, z1=0,
col="black", lwd = 2,
bty="n",
zlim=c(-1,1), ylim=c(-1,1),
theta=45, phi=20)
text3D(x=tail(x, 1)+0.55, y=0, z=0, labels="z", add=T)
# Wave along x axis
lines3D(x=x, y=rep(0, length(x)), z=Ex(x), col="red", lwd = 2, add=T)
# Wave along y axis
lines3D(x=x, y=Ey(x), z=rep(0,length(x)), col="green", lwd = 2, add=T)
# Add axes
segments3D(x0=x.waveplateStart, x1=x.waveplateStart,
y0=-0.8, y1=0.8,
z0=0, z1=0,
col="black", lwd = 2,
add=T)
segments3D(x0=x.waveplateStart, x1=x.waveplateStart,
y0=0, y1=0,
z0=-0.8, z1=0.8,
col="black", lwd = 2,
add=T)
arrows3D(x0=x.waveplateEnd, x1=x.waveplateEnd,
y0=-1, y1=1,
z0=0, z1=0,
col="black", lwd = 2,
add=T)
text3D(x=x.waveplateEnd, y = 1.05, z = 0, labels = "y", add=T)
arrows3D(x0=x.waveplateEnd, x1=x.waveplateEnd,
y0=0, y1=0,
z0=-1, z1=1,
col="black", lwd = 2,
add=T)
text3D(x=x.waveplateEnd, y = 0, z = 1.05, labels = "x", add=T)
# Add polarisation shit
# START
# Add polarisation plane
rect3D(x0 = x.waveplateStart, x1 = NULL,
y0 = -0.8, y1 = 0.8,
z0 = -0.8, z1 = 0.8,
add = T, col="blue", alpha=0.1)
# Add polarisation ellipse
lines3D(x = rep(x.waveplateStart, length(x)),
y = Ey(x=x.waveplateStart, t=x),
z = Ex(x=x.waveplateStart, t=x),
add=T, col="blue", lwd = 2)
# Add polarisation vector
arrows3D( x0 = x.waveplateStart, x1 = x.waveplateStart,
y0 = 0, y1 = Ey(x.waveplateStart),
z0 = 0, z1 = Ex(x.waveplateStart),
add=T, col="blue", lwd = 2)
# Add polarisation shit
# END
# Add polarisation plane
rect3D(x0 = x.waveplateEnd, x1 = NULL,
y0 = -0.8, y1 = 0.8,
z0 = -0.8, z1 = 0.8,
add = T, col="blue", alpha=0.1)
# Add polarisation ellipse
lines3D(x = rep(x.waveplateEnd, length(x)),
y = Ey(x=x.waveplateEnd, t=x),
z = Ex(x=x.waveplateEnd, t=x),
add=T, col="blue", lwd = 2)
# Add polarisation vector
arrows3D( x0 = x.waveplateEnd, x1 = x.waveplateEnd,
y0 = 0, y1 = Ey(x.waveplateEnd),
z0 = 0, z1 = Ex(x.waveplateEnd),
add=T, col="blue", lwd = 2) |
require(Matrix)
bestEigen <- function(x=x,a1=NULL,a2=NULL,tol=1.0e-6,maxiter=100) {
# set.seed(12345)
n <- nrow(x)
s1 <- colSums(x)/n
s2 <- colSums(x^2)/n
z <- s2-s1*s1
#
bad <- which(z==0)
good <- setdiff(1:n,bad)
x <- x[-bad,-bad]
for (i in 1:3) junk <- gc()
k <- nrow(x)
s1 <- colSums(x)/k
s2 <- colSums(x^2)/k
sd <- sqrt(s2-s1*s1)
x <- x %*% Diagonal(x=1/sd)
C <- s1/sd
if (is.null(a1)) a1 <- rnorm(nrow(x))
if (is.null(a2)) a2 <- rnorm(nrow(x))
xx <- t(x)
for (i in 1:3) junk <- gc()
niter <- 0
while (niter < maxiter) {
for (i in 1:10) {
a1 <- x %*% a1 - sum(C*a1)
a1 <- xx %*% a1 - sum(a1)*C
a1 <- a1/sqrt(sum(a1*a1))
}
ev1 <- a1
a1 <- x %*% a1 - sum(C*a1)
a1 <- xx %*% a1 - sum(a1)*C
lam1 <- sqrt(sum(a1*a1))
er1 <- sqrt(sum((a1 - lam1*ev1)^2))
a1 <- a1/sqrt(sum(a1*a1))
a2 <- a2 - a1*sum(a2*a1)
for (i in 1:10) {
a2 <- x %*% a2 - sum(C*a2)
a2 <- xx %*% a2 - sum(a2)*C
a2 <- a2 - a1*sum(a2*a1)
a2 <- a2/sqrt(sum(a2*a2))
}
ev2 <- a2
a2 <- x %*% a2 - sum(C*a2)
a2 <- xx %*% a2 - sum(a2)*C
lam2 <- sqrt(sum(a2*a2))
a2 <- a2/sqrt(sum(a2*a2))
if (lam1 < lam2) {
temp <- a1
a1 <- a2
a2 <- temp
temp <- lam1
lam1 <- lam2
lam2 <- temp
for (junk in 1:3) gc()
}
niter <- niter + 10
if (er1/(lam1-lam2) < tol) break
}
v <- rep(NA,n)
v[good] <- ev1
return(list(v=v,lam1=lam1,er=er1,lam2=lam2,niter=niter,a1=a1,a2=a2))
}
| /bestEigen3.R | no_license | zozo123/EigenVector | R | false | false | 1,432 | r | require(Matrix)
bestEigen <- function(x=x,a1=NULL,a2=NULL,tol=1.0e-6,maxiter=100) {
# set.seed(12345)
n <- nrow(x)
s1 <- colSums(x)/n
s2 <- colSums(x^2)/n
z <- s2-s1*s1
#
bad <- which(z==0)
good <- setdiff(1:n,bad)
x <- x[-bad,-bad]
for (i in 1:3) junk <- gc()
k <- nrow(x)
s1 <- colSums(x)/k
s2 <- colSums(x^2)/k
sd <- sqrt(s2-s1*s1)
x <- x %*% Diagonal(x=1/sd)
C <- s1/sd
if (is.null(a1)) a1 <- rnorm(nrow(x))
if (is.null(a2)) a2 <- rnorm(nrow(x))
xx <- t(x)
for (i in 1:3) junk <- gc()
niter <- 0
while (niter < maxiter) {
for (i in 1:10) {
a1 <- x %*% a1 - sum(C*a1)
a1 <- xx %*% a1 - sum(a1)*C
a1 <- a1/sqrt(sum(a1*a1))
}
ev1 <- a1
a1 <- x %*% a1 - sum(C*a1)
a1 <- xx %*% a1 - sum(a1)*C
lam1 <- sqrt(sum(a1*a1))
er1 <- sqrt(sum((a1 - lam1*ev1)^2))
a1 <- a1/sqrt(sum(a1*a1))
a2 <- a2 - a1*sum(a2*a1)
for (i in 1:10) {
a2 <- x %*% a2 - sum(C*a2)
a2 <- xx %*% a2 - sum(a2)*C
a2 <- a2 - a1*sum(a2*a1)
a2 <- a2/sqrt(sum(a2*a2))
}
ev2 <- a2
a2 <- x %*% a2 - sum(C*a2)
a2 <- xx %*% a2 - sum(a2)*C
lam2 <- sqrt(sum(a2*a2))
a2 <- a2/sqrt(sum(a2*a2))
if (lam1 < lam2) {
temp <- a1
a1 <- a2
a2 <- temp
temp <- lam1
lam1 <- lam2
lam2 <- temp
for (junk in 1:3) gc()
}
niter <- niter + 10
if (er1/(lam1-lam2) < tol) break
}
v <- rep(NA,n)
v[good] <- ev1
return(list(v=v,lam1=lam1,er=er1,lam2=lam2,niter=niter,a1=a1,a2=a2))
}
|
library(RCurl)
library(XML)
library(xml2)
library(rvest)
require(plyr)
form <- read.table("clipboard", sep="\t")
names(form) <- "BoardGameGeek Username"
fulldata <- data.frame(matrix(nrow = 1, ncol=16, data = 0))
user_temp_data <- data.frame(matrix(nrow = 1, ncol=16, data = 0))
names(fulldata) <- c(
"user",
"gamenames",
"yearpublished",
"minplayers",
"maxplayers",
"playing_time_minutes",
"user_rating",
"bgg_rating",
"num_plays",
"own",
"wanttoplay",
"wanttobuy",
"wanttotrade",
"wantintrade",
"wishlist",
"comment")
names(user_temp_data) <- c(
"user",
"gamenames",
"yearpublished",
"minplayers",
"maxplayers",
"playing_time_minutes",
"user_rating",
"bgg_rating",
"num_plays",
"own",
"wanttoplay",
"wanttobuy",
"wanttotrade",
"wantintrade",
"wishlist",
"comment")
for(i in form$`BoardGameGeek Username`){
print(i)
url <- sprintf("https://boardgamegeek.com/xmlapi2/collection?username=%s&stats=1&excludesubtype=boardgameexpansion", i)
#forcing api pings
pg <- 0
xD <- 0
xL <- 0
pg <- read_xml(url)
xD <- xmlParse(pg)
xL <- xmlToList(xD)
while(
#length(xml_nodes(pg, xpath="//name") %>% xml_text()) < 1
length(xL) <= 1
){
Sys.sleep(30)
pg <- read_xml(url)
xD <- xmlParse(pg)
xL <- xmlToList(xD)
print(i)
}
#
# new
#
for(j in 1:(length(xL)-1)){
test <- data.frame(
i
,xL[[j]]$name$text
,if(is.null(xL[[j]]$yearpublished)){"na"} else{xL[[j]]$yearpublished}
,xL[[j]]$stats$.attrs[1]
,xL[[j]]$stats$.attrs[2]
,xL[[j]]$stats$.attrs[5]
,xL[[j]]$stats$rating$.attrs
,xL[[j]]$stats$rating$bayesaverage
,xL[[j]]$numplays
,xL[[j]]$status[1] #own game flag
,xL[[j]]$status[5] #want to play flag
,xL[[j]]$status[7] #wishlisted flag
,xL[[j]]$status[6] #want to buy
,xL[[j]]$status[3] #available for trade
,xL[[j]]$status[4] #want in trade
,if(is.null(xL[[j]]$comment)){"na"} else{xL[[j]]$comment}
)
names(test) <- c("user", "gamenames", "yearpublished", "minplayers", "maxplayers", "playing_time_minutes", "user_rating", "bgg_rating", "num_plays", "own", "wanttoplay","wanttobuy", "wanttotrade", "wantintrade", "wishlist", "comment")
user_temp_data <- rbind(user_temp_data, test)
}
fulldata <- rbind(fulldata, user_temp_data)
}
#cleanup
fulldata = fulldata[!duplicated(fulldata), ] #manual dedupe until i can figure out whats wrong with the loops
fulldata$yearpublished <- as.numeric(fulldata$yearpublished)
fulldata$minplayers <- as.numeric(fulldata$minplayers)
fulldata$maxplayers <- as.numeric(fulldata$maxplayers)
fulldata$playing_time_minutes <- as.numeric(fulldata$playing_time_minutes)
fulldata$user_rating <- as.numeric(fulldata$user_rating)
fulldata$bgg_rating <- as.numeric(fulldata$bgg_rating)
fulldata$num_plays <- as.numeric(fulldata$num_plays)
fulldata$comment <- gsub("[\r\n]", " ", fulldata$comment)
#remove first row of zero data
fulldata <- fulldata[-1,]
| /bgg_group_data.R | no_license | ScottBurger/bgg_group_data | R | false | false | 3,079 | r |
library(RCurl)
library(XML)
library(xml2)
library(rvest)
require(plyr)
form <- read.table("clipboard", sep="\t")
names(form) <- "BoardGameGeek Username"
fulldata <- data.frame(matrix(nrow = 1, ncol=16, data = 0))
user_temp_data <- data.frame(matrix(nrow = 1, ncol=16, data = 0))
names(fulldata) <- c(
"user",
"gamenames",
"yearpublished",
"minplayers",
"maxplayers",
"playing_time_minutes",
"user_rating",
"bgg_rating",
"num_plays",
"own",
"wanttoplay",
"wanttobuy",
"wanttotrade",
"wantintrade",
"wishlist",
"comment")
names(user_temp_data) <- c(
"user",
"gamenames",
"yearpublished",
"minplayers",
"maxplayers",
"playing_time_minutes",
"user_rating",
"bgg_rating",
"num_plays",
"own",
"wanttoplay",
"wanttobuy",
"wanttotrade",
"wantintrade",
"wishlist",
"comment")
for(i in form$`BoardGameGeek Username`){
print(i)
url <- sprintf("https://boardgamegeek.com/xmlapi2/collection?username=%s&stats=1&excludesubtype=boardgameexpansion", i)
#forcing api pings
pg <- 0
xD <- 0
xL <- 0
pg <- read_xml(url)
xD <- xmlParse(pg)
xL <- xmlToList(xD)
while(
#length(xml_nodes(pg, xpath="//name") %>% xml_text()) < 1
length(xL) <= 1
){
Sys.sleep(30)
pg <- read_xml(url)
xD <- xmlParse(pg)
xL <- xmlToList(xD)
print(i)
}
#
# new
#
for(j in 1:(length(xL)-1)){
test <- data.frame(
i
,xL[[j]]$name$text
,if(is.null(xL[[j]]$yearpublished)){"na"} else{xL[[j]]$yearpublished}
,xL[[j]]$stats$.attrs[1]
,xL[[j]]$stats$.attrs[2]
,xL[[j]]$stats$.attrs[5]
,xL[[j]]$stats$rating$.attrs
,xL[[j]]$stats$rating$bayesaverage
,xL[[j]]$numplays
,xL[[j]]$status[1] #own game flag
,xL[[j]]$status[5] #want to play flag
,xL[[j]]$status[7] #wishlisted flag
,xL[[j]]$status[6] #want to buy
,xL[[j]]$status[3] #available for trade
,xL[[j]]$status[4] #want in trade
,if(is.null(xL[[j]]$comment)){"na"} else{xL[[j]]$comment}
)
names(test) <- c("user", "gamenames", "yearpublished", "minplayers", "maxplayers", "playing_time_minutes", "user_rating", "bgg_rating", "num_plays", "own", "wanttoplay","wanttobuy", "wanttotrade", "wantintrade", "wishlist", "comment")
user_temp_data <- rbind(user_temp_data, test)
}
fulldata <- rbind(fulldata, user_temp_data)
}
#cleanup
fulldata = fulldata[!duplicated(fulldata), ] #manual dedupe until i can figure out whats wrong with the loops
fulldata$yearpublished <- as.numeric(fulldata$yearpublished)
fulldata$minplayers <- as.numeric(fulldata$minplayers)
fulldata$maxplayers <- as.numeric(fulldata$maxplayers)
fulldata$playing_time_minutes <- as.numeric(fulldata$playing_time_minutes)
fulldata$user_rating <- as.numeric(fulldata$user_rating)
fulldata$bgg_rating <- as.numeric(fulldata$bgg_rating)
fulldata$num_plays <- as.numeric(fulldata$num_plays)
fulldata$comment <- gsub("[\r\n]", " ", fulldata$comment)
#remove first row of zero data
fulldata <- fulldata[-1,]
|
# Automatically generated by openapi-generator (https://openapi-generator.tech)
# Please update as you see appropriate
context("Test DriverDailyLogResponse")
model.instance <- DriverDailyLogResponse$new()
test_that("days", {
# tests for the property `days` (DriverDailyLogResponseDays)
# uncomment below to test the property
#expect_equal(model.instance$`days`, "EXPECTED_RESULT")
})
| /openapi-generator/r/tests/testthat/test_driver_daily_log_response.R | no_license | silverspace/samsara-sdks | R | false | false | 396 | r | # Automatically generated by openapi-generator (https://openapi-generator.tech)
# Please update as you see appropriate
context("Test DriverDailyLogResponse")
model.instance <- DriverDailyLogResponse$new()
test_that("days", {
# tests for the property `days` (DriverDailyLogResponseDays)
# uncomment below to test the property
#expect_equal(model.instance$`days`, "EXPECTED_RESULT")
})
|
##librairies needed
install.packages("dplyr")
install.packages("tidyverse")
library(dplyr)
library(tidyverse)
library(ggplot2)
library(ggpubr)
###
cenus<- read.csv("DEC_10_SF1_GCTPH1.ST05_with_ann.csv",header=T)
head(cenus)
colnames(cenus)<- c("ID","ID2","Geog","Geoid1","Geoid2","Geographicarea",
"county","Population","Housing units","totalarea","waterarea",
"leandarea","popdens","housedens")
##
ggplot(cenus)+
geom_col(aes(county,Population, fill=county))+
ylim(0,1000000)+
ggpubr::theme_pubr()+
xlab("Counties")+
ylab("Population")+
theme(axis.text.x = element_text(size=16,hjust=45))+
theme(axis.title.x = element_text(size=16))+
theme(axis.text.y = element_text(size=16))+
theme(axis.title.y = element_text(size=16))+
labs(title = "Population of california counties")
######
############
dis<- read.csv("izb_odp_final_03262019.csv")
head(dis)
dis2<- dis %>%
spread(year,count)
head(dis2)
ggplot(dis)+
geom_line(aes(year,count, col=county))+
facet_wrap(~disease,scales = "free")+
ggpubr::theme_pubr()+
xlab("Disease")+
ylab("Number of cases")+
theme(axis.text.x = element_text(size=16))+
theme(axis.title.x = element_text(size=16))+
theme(axis.text.y = element_text(size=16))+
theme(axis.title.y = element_text(size=16))+
labs(title = "Number of cases of infectious diseases in california counties")
###########
###################
#split the data for county from census
head(cenus)
census<- cenus %>%
na.omit() %>%
separate("county",into=c("county","just "))
head(census)
tail(census)
tail(dis)
##merge by county
dem.dis<- full_join(census,dis,by="county")
head(dem.dis)
tail(dem.dis)
??tidyr::separate
colnames(cenus)
de.dis.2010<-dem.dis %>%
filter(year==2010) %>%
select(county,year,Population,disease,count,popdens) %>%
group_by(disease) %>%
summarise(n=sum(count))
#spread(disease,count) %>%
tail(de.dis.2010)
head(de.dis.2010)
###does the cases depend on the population
dem.dis2<-dem.dis %>% filter(year==2010) %>% group_by(disease) %>%
select(county,disease,Population,count,popdens)
###pertusis
de.dis.2010.per<-dem.dis %>%
filter(year==2010) %>%
select(county,year,Population,disease,count,popdens) %>%
filter(disease== "Pertussis")
dem.dis.2010.per<- as.data.frame(de.dis.2010.per)
m1<-glm(count~county,data=dem.dis.2010.per)
m2<-glm(count~Population,data=dem.dis.2010.per)
m3<-glm(count~county+Population,data=dem.dis.2010.per)
m4<-glm(count~popdens,data=dem.dis.2010.per)
### relation between population and cases of pertusis
ggplot(de.dis.2010.per, aes(Population,count)) +
geom_smooth(method=lm, forumla =count~Population,se=T,col="red",fill="orchid")+
xlim(0,1000000)+
ggpubr::theme_pubr()+
xlab("Population")+
ylab("Number of cases")+
theme(axis.text.x = element_text(size=16))+
theme(axis.title.x = element_text(size=16))+
theme(axis.text.y = element_text(size=16))+
theme(axis.title.y = element_text(size=16))+
labs(title="Relation between population and number of pertussis cases ")
### relation between population and cases of all diseases
ggplot(dem.dis2, aes(Population,count)) +
geom_smooth(method=lm, forumla =count~Population,se=T,col="orange",fill="yellow")+
xlim(0,1000000)+
ggpubr::theme_pubr()+
xlab("Population")+
ylab("Number of cases")+
theme(axis.text.x = element_text(size=16))+
theme(axis.title.x = element_text(size=16))+
theme(axis.text.y = element_text(size=16))+
theme(axis.title.y = element_text(size=16))+
labs(title="Relation between population and number of all disease cases ")
##############3
###################densiyt
### relation between population and cases of pertusis
ggplot(de.dis.2010.per, aes(popdens,count)) +
geom_smooth(method=lm, forumla =count~popdens,se=T,col="purple",fill="light green")+
#xlim(0,1000000)+
ggpubr::theme_pubr()+
xlab("Population density")+
ylab("Number of cases")+
theme(axis.text.x = element_text(size=16))+
theme(axis.title.x = element_text(size=16))+
theme(axis.text.y = element_text(size=16))+
theme(axis.title.y = element_text(size=16))+
labs(title="Relation between population density and number of pertussis cases ")
### relation between population and cases of all diseases
ggplot(dem.dis2, aes(popdens,count)) +
geom_smooth(method=lm, forumla =count~podens,se=T,col="navy blue",fill="ligh tblue")+
#xlim(0,1000000)+
ggpubr::theme_pubr()+
xlab("Population density")+
ylab("Number of cases")+
theme(axis.text.x = element_text(size=16))+
theme(axis.title.x = element_text(size=16))+
theme(axis.text.y = element_text(size=16))+
theme(axis.title.y = element_text(size=16))+
labs(title="Relation between population and number of all disease cases ")
| /infectious-disease copy.R | no_license | vratchaudhary/Infectious_Diseases_California | R | false | false | 4,744 | r | ##librairies needed
install.packages("dplyr")
install.packages("tidyverse")
library(dplyr)
library(tidyverse)
library(ggplot2)
library(ggpubr)
###
cenus<- read.csv("DEC_10_SF1_GCTPH1.ST05_with_ann.csv",header=T)
head(cenus)
colnames(cenus)<- c("ID","ID2","Geog","Geoid1","Geoid2","Geographicarea",
"county","Population","Housing units","totalarea","waterarea",
"leandarea","popdens","housedens")
##
ggplot(cenus)+
geom_col(aes(county,Population, fill=county))+
ylim(0,1000000)+
ggpubr::theme_pubr()+
xlab("Counties")+
ylab("Population")+
theme(axis.text.x = element_text(size=16,hjust=45))+
theme(axis.title.x = element_text(size=16))+
theme(axis.text.y = element_text(size=16))+
theme(axis.title.y = element_text(size=16))+
labs(title = "Population of california counties")
######
############
dis<- read.csv("izb_odp_final_03262019.csv")
head(dis)
dis2<- dis %>%
spread(year,count)
head(dis2)
ggplot(dis)+
geom_line(aes(year,count, col=county))+
facet_wrap(~disease,scales = "free")+
ggpubr::theme_pubr()+
xlab("Disease")+
ylab("Number of cases")+
theme(axis.text.x = element_text(size=16))+
theme(axis.title.x = element_text(size=16))+
theme(axis.text.y = element_text(size=16))+
theme(axis.title.y = element_text(size=16))+
labs(title = "Number of cases of infectious diseases in california counties")
###########
###################
#split the data for county from census
head(cenus)
census<- cenus %>%
na.omit() %>%
separate("county",into=c("county","just "))
head(census)
tail(census)
tail(dis)
##merge by county
dem.dis<- full_join(census,dis,by="county")
head(dem.dis)
tail(dem.dis)
??tidyr::separate
colnames(cenus)
de.dis.2010<-dem.dis %>%
filter(year==2010) %>%
select(county,year,Population,disease,count,popdens) %>%
group_by(disease) %>%
summarise(n=sum(count))
#spread(disease,count) %>%
tail(de.dis.2010)
head(de.dis.2010)
###does the cases depend on the population
dem.dis2<-dem.dis %>% filter(year==2010) %>% group_by(disease) %>%
select(county,disease,Population,count,popdens)
###pertusis
de.dis.2010.per<-dem.dis %>%
filter(year==2010) %>%
select(county,year,Population,disease,count,popdens) %>%
filter(disease== "Pertussis")
dem.dis.2010.per<- as.data.frame(de.dis.2010.per)
m1<-glm(count~county,data=dem.dis.2010.per)
m2<-glm(count~Population,data=dem.dis.2010.per)
m3<-glm(count~county+Population,data=dem.dis.2010.per)
m4<-glm(count~popdens,data=dem.dis.2010.per)
### relation between population and cases of pertusis
ggplot(de.dis.2010.per, aes(Population,count)) +
geom_smooth(method=lm, forumla =count~Population,se=T,col="red",fill="orchid")+
xlim(0,1000000)+
ggpubr::theme_pubr()+
xlab("Population")+
ylab("Number of cases")+
theme(axis.text.x = element_text(size=16))+
theme(axis.title.x = element_text(size=16))+
theme(axis.text.y = element_text(size=16))+
theme(axis.title.y = element_text(size=16))+
labs(title="Relation between population and number of pertussis cases ")
### relation between population and cases of all diseases
ggplot(dem.dis2, aes(Population,count)) +
geom_smooth(method=lm, forumla =count~Population,se=T,col="orange",fill="yellow")+
xlim(0,1000000)+
ggpubr::theme_pubr()+
xlab("Population")+
ylab("Number of cases")+
theme(axis.text.x = element_text(size=16))+
theme(axis.title.x = element_text(size=16))+
theme(axis.text.y = element_text(size=16))+
theme(axis.title.y = element_text(size=16))+
labs(title="Relation between population and number of all disease cases ")
##############3
###################densiyt
### relation between population and cases of pertusis
ggplot(de.dis.2010.per, aes(popdens,count)) +
geom_smooth(method=lm, forumla =count~popdens,se=T,col="purple",fill="light green")+
#xlim(0,1000000)+
ggpubr::theme_pubr()+
xlab("Population density")+
ylab("Number of cases")+
theme(axis.text.x = element_text(size=16))+
theme(axis.title.x = element_text(size=16))+
theme(axis.text.y = element_text(size=16))+
theme(axis.title.y = element_text(size=16))+
labs(title="Relation between population density and number of pertussis cases ")
### relation between population and cases of all diseases
ggplot(dem.dis2, aes(popdens,count)) +
geom_smooth(method=lm, forumla =count~podens,se=T,col="navy blue",fill="ligh tblue")+
#xlim(0,1000000)+
ggpubr::theme_pubr()+
xlab("Population density")+
ylab("Number of cases")+
theme(axis.text.x = element_text(size=16))+
theme(axis.title.x = element_text(size=16))+
theme(axis.text.y = element_text(size=16))+
theme(axis.title.y = element_text(size=16))+
labs(title="Relation between population and number of all disease cases ")
|
## Get full dataset
data_full <- read.csv("./DATA/household_power_consumption.txt", header=T, sep=';', na.strings="?",
nrows=2075259, check.names=F, stringsAsFactors=F, comment.char="", quote='\"')
data_full$Date <- as.Date(data_full$Date, format="%d/%m/%Y")
## Subsetting the data
data <- subset(data_full, subset=(Date >= "2007-02-01" & Date <= "2007-02-02"))
rm(data_full)
## Converting dates
datetime <- paste(as.Date(data$Date), data$Time)
data$Datetime <- as.POSIXct(datetime)
## Plot 1
hist(data$Global_active_power, main="Global Active Power",
xlab="Global Active Power (kilowatts)", ylab="Frequency", col="Red")
## Saving to file plot1.png
dev.copy(png, file="plot1.png", height=480, width=480)
dev.off() | /plot1.R | no_license | bezoza/Course_Project_1 | R | false | false | 746 | r | ## Get full dataset
data_full <- read.csv("./DATA/household_power_consumption.txt", header=T, sep=';', na.strings="?",
nrows=2075259, check.names=F, stringsAsFactors=F, comment.char="", quote='\"')
data_full$Date <- as.Date(data_full$Date, format="%d/%m/%Y")
## Subsetting the data
data <- subset(data_full, subset=(Date >= "2007-02-01" & Date <= "2007-02-02"))
rm(data_full)
## Converting dates
datetime <- paste(as.Date(data$Date), data$Time)
data$Datetime <- as.POSIXct(datetime)
## Plot 1
hist(data$Global_active_power, main="Global Active Power",
xlab="Global Active Power (kilowatts)", ylab="Frequency", col="Red")
## Saving to file plot1.png
dev.copy(png, file="plot1.png", height=480, width=480)
dev.off() |
###############################################################################
## Version 0.6:
##
## Corrected snow melt in fortran code
## Add elevation
require(foreach)
require(doParallel)
require(raster)
require(ncdf4)
source("helpers.R")
## Climate
## Set climate data files
climdir = "~/Dropbox/Data/climate/cru_cl_2.00/"
tmpfile = "cru_10min_tmp.nc"
prefile = "cru_10min_pre.nc"
cldfile = "cru_10min_sun.nc"
elvfile = "cru_10min_elv.nc"
## STN grid
stnfile = "~/Dropbox/Data/hydrology/basins/ISLSCP_RIVER_ROUTING/data/river_routing_stn_hdeg/stn_basin_id_hd.asc"
## runoff grid
rofile = "~/Dropbox/Data/hydrology/basins/GRCD/composite/runoff/cmp_ro.grd"
## Output directory
outdir = "./wbm_ro/"
outname = "wbm"
## Get climate data
clim_tmp = stack(paste(climdir,tmpfile,sep=''))
clim_pre = stack(paste(climdir,prefile,sep=''))
clim_cld = stack(paste(climdir,cldfile,sep=''))
clim_elv = raster(paste(climdir,elvfile,sep=''), varname='elv')
clim_elv = clim_elv*1000 ## km -> m
stn_grd = raster(stnfile)
r3 = raster(rofile)
## Crop (test)
myext = extent(c(10,40,-35,-15))
clim_tmp = crop(clim_tmp, myext)
clim_pre = crop(clim_pre, myext)
clim_cld = crop(clim_cld, myext)
stn_grd = crop(stn_grd, myext)
r3 = crop(r3, myext)
clim.crds = SpatialPoints(coordinates(clim_tmp))
ncrds = dim(coordinates(clim.crds))[1]
## Convert climate data to matrices
tmp_mat = extract(clim_tmp, clim.crds)
pre_mat = extract(clim_pre, clim.crds)
cld_mat = extract(clim_cld, clim.crds)
elv_mat = extract(clim_elv, clim.crds)
stn_mat = extract(stn_grd, clim.crds)
stn_mat[which(stn_mat==-88)] <- NA
out.df = list(gdd5 = rep(NA,ncrds),
tmp = matrix(NA,nrow=ncrds, ncol=12),
pre = matrix(NA,nrow=ncrds, ncol=12),
aet = matrix(NA,nrow=ncrds, ncol=12),
pet = matrix(NA,nrow=ncrds, ncol=12),
alpha = rep(NA,ncrds),
cn = matrix(NA,nrow=ncrds, ncol=12),
sm = matrix(NA,nrow=ncrds, ncol=12),
runoff = matrix(NA,nrow=ncrds, ncol=12),
lsr = matrix(NA,nrow=ncrds, ncol=12),
sw = matrix(NA,nrow=ncrds, ncol=12))
# cl <- makeCluster(4)
# registerDoParallel(cl)
for (i in 1:ncrds) {
if (i%%100 == 0) {print(paste("Doing:",i,"of",ncrds))}
elv = elv_mat[i]
# if (!is.na(tmp_mat[i,1]) & !is.na(pre_mat[i,1]) &
# !is.na(cld_mat[i,1]) & (elv > -500)){
if (!is.na(tmp_mat[i,1])) {
## Set from file
whc = 150
# whc = whc_val[whc_mat[i]]
# if (is.na(whc)) whc = 0
if (whc > 0) {
dtemp = daily(tmp_mat[i,])$dly
dprec = daily(pre_mat[i,])$dly/(365/12)
# dsun = (100-daily(cld_mat[i,])$dly)/100
dsun = daily(cld_mat[i,])$dly/100
dtemp0 = dtemp-5
out.df$gdd5[i] = sum(ifelse(dtemp0>0, dtemp0, 0))
out.df$tmp[i,] = tmp_mat[i,]
out.df$pre[i,] = pre_mat[i,]
aetpet.df = splashfwbm(dtemp,dprec,dsun,lat=coordinates(clim.crds)[i,2],
yr=1975, elv=elv, whc=whc)
out.df$aet[i,] = aetpet.df$maet
out.df$pet[i,] = aetpet.df$mpet
out.df$alpha[i] = sum(out.df$aet[i,])/sum(out.df$pet[i,])
## ALPHA constraints
if (out.df$alpha[i]<0) { out.df$alpha[i]=0 }
if (out.df$alpha[i]>1) { out.df$alpha[i]=1 }
out.df$cn[i,] = aetpet.df$mcn
## CN constraints
out.df$cn[i,] = ifelse(out.df$cn[i,]<0,0,out.df$cn[i,])
out.df$sm[i,] = aetpet.df$msm
out.df$runoff[i,] = aetpet.df$mro
out.df$lsr[i,] = aetpet.df$mlsr
out.df$sw[i,] = aetpet.df$msnow
#if (i == 10991) {stop()}
} else {
# print(paste("Missing WHC:", coordinates(clim.crds)[i,1], coordinates(clim.crds)[i,2]))
out.df$runoff[i,] = out.df$pre[i,]
}
#stop()
}
}
# stopCluster(cl)
## New rasters
r = raster(clim_tmp,1)
gdd5.r = setValues(r, out.df$gdd5)
alpha.r = setValues(r, out.df$alpha)
#par.r = setValues(r, out.df$par)
for (i in 1:12) {
if (i == 1) {
aet.r = setValues(r, out.df$aet[,i], layer=i)
pet.r = setValues(r, out.df$pet[,i], layer=i)
sm.r = setValues(r, out.df$sm[,i], layer=i)
sw.r = setValues(r, out.df$sw[,i], layer=i)
cn.r = setValues(r, out.df$cn[,i], layer=i)
runoff.r = setValues(r, out.df$runoff[,i], layer=i)
lsr.r = setValues(r, out.df$lsr[,i], layer=i)
} else {
aet.r = stack(aet.r, setValues(r, out.df$aet[,i], layer=i))
pet.r = stack(pet.r, setValues(r, out.df$pet[,i], layer=i))
sm.r = stack(sm.r, setValues(r, out.df$sm[,i], layer=i))
sw.r = stack(sw.r, setValues(r, out.df$sw[,i], layer=i))
cn.r = stack(cn.r, setValues(r, out.df$cn[,i], layer=i))
runoff.r = stack(runoff.r, setValues(r, out.df$runoff[,i], layer=i))
lsr.r = stack(lsr.r, setValues(r, out.df$lsr[,i], layer=i))
}
}
writeRaster(gdd5.r, paste0(outdir,outname,"_gdd.nc"), overwrite=TRUE,
varname="gdd5",longname="Growing Degree Days",varunit="degree days")
writeRaster(alpha.r, paste0(outdir,outname,"_alpha.nc"), overwrite=TRUE,
varname="alpha",longname="Moisture Index",varunit="")
writeRaster(aet.r, paste0(outdir,outname,"_aet.nc"), overwrite=TRUE,
varname="aet",longname="Actual Evapotranspiration",varunit="mm m-1")
writeRaster(pet.r, paste0(outdir,outname,"_pet.nc"), overwrite=TRUE,
varname="pet",longname="Potential Evapotranspiration",varunit="mm m-1")
writeRaster(sm.r, paste0(outdir,outname,"_sm.nc"), overwrite=TRUE,
varname="sw",longname="Soil Moisture",varunit="mm m-1")
writeRaster(sw.r, paste0(outdir,outname,"_sw.nc"), overwrite=TRUE,
varname="sw",longname="Snow",varunit="mm m-1")
writeRaster(cn.r, paste0(outdir,outname,"_cn.nc"), overwrite=TRUE,
varname="cn",longname="Condensation",varunit="mm m-1")
writeRaster(runoff.r, paste0(outdir,outname,"_runoff.nc"), overwrite=TRUE,
varname="runoff",longname="Runoff",varunit="mm m-1")
writeRaster(lsr.r, paste0(outdir,outname,"_lsr.nc"), overwrite=TRUE,
varname="lsr",longname="Land Surface Runoff",varunit="mm m-1")
| /calcRunoff_WBM.R | no_license | simonbrewer/safricaro | R | false | false | 6,095 | r | ###############################################################################
## Version 0.6:
##
## Corrected snow melt in fortran code
## Add elevation
require(foreach)
require(doParallel)
require(raster)
require(ncdf4)
source("helpers.R")
## Climate
## Set climate data files
climdir = "~/Dropbox/Data/climate/cru_cl_2.00/"
tmpfile = "cru_10min_tmp.nc"
prefile = "cru_10min_pre.nc"
cldfile = "cru_10min_sun.nc"
elvfile = "cru_10min_elv.nc"
## STN grid
stnfile = "~/Dropbox/Data/hydrology/basins/ISLSCP_RIVER_ROUTING/data/river_routing_stn_hdeg/stn_basin_id_hd.asc"
## runoff grid
rofile = "~/Dropbox/Data/hydrology/basins/GRCD/composite/runoff/cmp_ro.grd"
## Output directory
outdir = "./wbm_ro/"
outname = "wbm"
## Get climate data
clim_tmp = stack(paste(climdir,tmpfile,sep=''))
clim_pre = stack(paste(climdir,prefile,sep=''))
clim_cld = stack(paste(climdir,cldfile,sep=''))
clim_elv = raster(paste(climdir,elvfile,sep=''), varname='elv')
clim_elv = clim_elv*1000 ## km -> m
stn_grd = raster(stnfile)
r3 = raster(rofile)
## Crop (test)
myext = extent(c(10,40,-35,-15))
clim_tmp = crop(clim_tmp, myext)
clim_pre = crop(clim_pre, myext)
clim_cld = crop(clim_cld, myext)
stn_grd = crop(stn_grd, myext)
r3 = crop(r3, myext)
clim.crds = SpatialPoints(coordinates(clim_tmp))
ncrds = dim(coordinates(clim.crds))[1]
## Convert climate data to matrices
tmp_mat = extract(clim_tmp, clim.crds)
pre_mat = extract(clim_pre, clim.crds)
cld_mat = extract(clim_cld, clim.crds)
elv_mat = extract(clim_elv, clim.crds)
stn_mat = extract(stn_grd, clim.crds)
stn_mat[which(stn_mat==-88)] <- NA
out.df = list(gdd5 = rep(NA,ncrds),
tmp = matrix(NA,nrow=ncrds, ncol=12),
pre = matrix(NA,nrow=ncrds, ncol=12),
aet = matrix(NA,nrow=ncrds, ncol=12),
pet = matrix(NA,nrow=ncrds, ncol=12),
alpha = rep(NA,ncrds),
cn = matrix(NA,nrow=ncrds, ncol=12),
sm = matrix(NA,nrow=ncrds, ncol=12),
runoff = matrix(NA,nrow=ncrds, ncol=12),
lsr = matrix(NA,nrow=ncrds, ncol=12),
sw = matrix(NA,nrow=ncrds, ncol=12))
# cl <- makeCluster(4)
# registerDoParallel(cl)
for (i in 1:ncrds) {
if (i%%100 == 0) {print(paste("Doing:",i,"of",ncrds))}
elv = elv_mat[i]
# if (!is.na(tmp_mat[i,1]) & !is.na(pre_mat[i,1]) &
# !is.na(cld_mat[i,1]) & (elv > -500)){
if (!is.na(tmp_mat[i,1])) {
## Set from file
whc = 150
# whc = whc_val[whc_mat[i]]
# if (is.na(whc)) whc = 0
if (whc > 0) {
dtemp = daily(tmp_mat[i,])$dly
dprec = daily(pre_mat[i,])$dly/(365/12)
# dsun = (100-daily(cld_mat[i,])$dly)/100
dsun = daily(cld_mat[i,])$dly/100
dtemp0 = dtemp-5
out.df$gdd5[i] = sum(ifelse(dtemp0>0, dtemp0, 0))
out.df$tmp[i,] = tmp_mat[i,]
out.df$pre[i,] = pre_mat[i,]
aetpet.df = splashfwbm(dtemp,dprec,dsun,lat=coordinates(clim.crds)[i,2],
yr=1975, elv=elv, whc=whc)
out.df$aet[i,] = aetpet.df$maet
out.df$pet[i,] = aetpet.df$mpet
out.df$alpha[i] = sum(out.df$aet[i,])/sum(out.df$pet[i,])
## ALPHA constraints
if (out.df$alpha[i]<0) { out.df$alpha[i]=0 }
if (out.df$alpha[i]>1) { out.df$alpha[i]=1 }
out.df$cn[i,] = aetpet.df$mcn
## CN constraints
out.df$cn[i,] = ifelse(out.df$cn[i,]<0,0,out.df$cn[i,])
out.df$sm[i,] = aetpet.df$msm
out.df$runoff[i,] = aetpet.df$mro
out.df$lsr[i,] = aetpet.df$mlsr
out.df$sw[i,] = aetpet.df$msnow
#if (i == 10991) {stop()}
} else {
# print(paste("Missing WHC:", coordinates(clim.crds)[i,1], coordinates(clim.crds)[i,2]))
out.df$runoff[i,] = out.df$pre[i,]
}
#stop()
}
}
# stopCluster(cl)
## New rasters
r = raster(clim_tmp,1)
gdd5.r = setValues(r, out.df$gdd5)
alpha.r = setValues(r, out.df$alpha)
#par.r = setValues(r, out.df$par)
for (i in 1:12) {
if (i == 1) {
aet.r = setValues(r, out.df$aet[,i], layer=i)
pet.r = setValues(r, out.df$pet[,i], layer=i)
sm.r = setValues(r, out.df$sm[,i], layer=i)
sw.r = setValues(r, out.df$sw[,i], layer=i)
cn.r = setValues(r, out.df$cn[,i], layer=i)
runoff.r = setValues(r, out.df$runoff[,i], layer=i)
lsr.r = setValues(r, out.df$lsr[,i], layer=i)
} else {
aet.r = stack(aet.r, setValues(r, out.df$aet[,i], layer=i))
pet.r = stack(pet.r, setValues(r, out.df$pet[,i], layer=i))
sm.r = stack(sm.r, setValues(r, out.df$sm[,i], layer=i))
sw.r = stack(sw.r, setValues(r, out.df$sw[,i], layer=i))
cn.r = stack(cn.r, setValues(r, out.df$cn[,i], layer=i))
runoff.r = stack(runoff.r, setValues(r, out.df$runoff[,i], layer=i))
lsr.r = stack(lsr.r, setValues(r, out.df$lsr[,i], layer=i))
}
}
writeRaster(gdd5.r, paste0(outdir,outname,"_gdd.nc"), overwrite=TRUE,
varname="gdd5",longname="Growing Degree Days",varunit="degree days")
writeRaster(alpha.r, paste0(outdir,outname,"_alpha.nc"), overwrite=TRUE,
varname="alpha",longname="Moisture Index",varunit="")
writeRaster(aet.r, paste0(outdir,outname,"_aet.nc"), overwrite=TRUE,
varname="aet",longname="Actual Evapotranspiration",varunit="mm m-1")
writeRaster(pet.r, paste0(outdir,outname,"_pet.nc"), overwrite=TRUE,
varname="pet",longname="Potential Evapotranspiration",varunit="mm m-1")
writeRaster(sm.r, paste0(outdir,outname,"_sm.nc"), overwrite=TRUE,
varname="sw",longname="Soil Moisture",varunit="mm m-1")
writeRaster(sw.r, paste0(outdir,outname,"_sw.nc"), overwrite=TRUE,
varname="sw",longname="Snow",varunit="mm m-1")
writeRaster(cn.r, paste0(outdir,outname,"_cn.nc"), overwrite=TRUE,
varname="cn",longname="Condensation",varunit="mm m-1")
writeRaster(runoff.r, paste0(outdir,outname,"_runoff.nc"), overwrite=TRUE,
varname="runoff",longname="Runoff",varunit="mm m-1")
writeRaster(lsr.r, paste0(outdir,outname,"_lsr.nc"), overwrite=TRUE,
varname="lsr",longname="Land Surface Runoff",varunit="mm m-1")
|
#0>>Global Setup--------
#0.Load libraries -----------------------------------------------------------------------------
#netcdy libraries
library(ncdf4)
#spatial libraries
library(sp)
library(geosphere)
library(rgdal)
library(raster)
library(RStoolbox)
#plotting libraries
library(ggplot2)
#spatial plotting libraries
library(sf)
library(ggspatial)
library(rnaturalearth)
library(rnaturalearthdata)
#list-data handling libraries
library(dplyr)
library(rlist)
library(tidyr)
library(purrr)
library(data.table)
#time series/zoo libraries
library(zoo)
#date-time libraries
library(lubridate)
#0.Plot Color Setup------------
library(RColorBrewer)
rf <- colorRampPalette(rev(brewer.pal(11,'Spectral')))
r <- rf(32)
r<-rf(100)
library(RColorBrewer)
colstx <- rev(brewer.pal(n = 9, "Spectral"))
colsindex <- rev(brewer.pal(n = 9, "RdYlBu"))
colsdelta <- brewer.pal(n = 9, "Reds")
colsbias <- brewer.pal(n = 9, "PiYG")
colssd <- brewer.pal(n = 9, "Blues")
#1.0 Load List Data-----------------
load('mn7_days_cmfd.rdata')
mn.inv <-x
load('refrac.rdata')
refrac <- data.frame(x)
refrac<-unique(refrac)
#2.0 Creating New Lists-----------------
#refrac and 30-day ant for 1994-2005
for(i in 1:length(mn.inv))
{
a <- refrac
names(a)[1] <-'dates'
b <- data.frame(mn.inv[[i]][['ts']][['dates']],mn.inv[[i]][['ant.ts']][['ant.30']])
names(b)[1] <-'dates'
names(b)[2] <-'ant.30'
ant.30<-list(b)
ant.30<-list.names(ant.30,'ant.30.ts')
mn.inv[[i]]<-append(mn.inv[[i]],ant.30)
b<- b[as.numeric(strftime(b$dates, "%Y")) %in% 1994:2005,]
c <- merge(transform(a, dates = format(as.Date(dates), "%Y-%m-%d")), transform(b, dates = format(as.Date(dates), "%Y-%m-%d")))
qa <- data.frame(a$dates,c$refrac*c$ant.30/30)
names(qa)[1] <-'dates'
names(qa)[2] <-'qa.30'
qa<-list(qa)
qa<-list.names(qa,'qa.ts')
mn.inv[[i]]<-append(mn.inv[[i]],qa)
}
antqa.cmfd<-Map(c,lapply(mn.inv, '[', 'ant.30.ts'),lapply(mn.inv, '[', 'qa.ts'), lapply(mn.inv, '[', 'lon_cmfd'),lapply(mn.inv, '[', 'lat_cmfd'))
list.save(antqa.cmfd,'jja.antqa.cmfd.rdata')
#3.0 Fitting Distributions (JJA) LOG-NORM-----------------
load('jja.antqa.cmfd.rdata')
jja.antqa.cmfd<-x
library(fitdistrplus)
for(i in 1:length(jja.antqa.cmfd))
{
df<-jja.antqa.cmfd[[i]][['ant.30.ts']]
df<-df[df$ant.30!=0,]
df<-na.omit(df)
fit.lnorm<-fitdist(df$ant.30,"lnorm")
fit.lnorm<-list(fit.lnorm)
fit.lnorm<<- list.names(fit.lnorm,"fit.lnorm")
jja.antqa.cmfd[[i]] <- append(jja.antqa.cmfd[[i]],fit.lnorm)
}
library(goftest)
library(stats)
dlnorm(x, meanlog = 0, sdlog = 1, log = FALSE)
plnorm(q, meanlog = 0, sdlog = 1, lower.tail = TRUE, log.p = FALSE)
qlnorm(p, meanlog = 0, sdlog = 1, lower.tail = TRUE, log.p = FALSE)
rlnorm(n, meanlog = 0, sdlog = 1)
for(i in 1:length(jja.antqa.cmfd))
{
fit<-jja.antqa.cmfd[[i]][['fit.lnorm']]
df<-jja.antqa.cmfd[[i]][['ant.30.ts']]
df<-df[df$ant.30!=0,]
df<-na.omit(df)#flag for potential bugs
#Kolmogorov–Smirnov Goodness-of-fit test
test.ks <- ks.test(df$ant.30,"plnorm",fit$estimate[[1]],fit$estimate[[2]], alternative = "two.sided")
res.ks <- if(test.ks$p.value>=0.05) {'Accept'} else{'Reject'} # H-test at 0.05 confidence interval
test.ks <- list(res.ks,test.ks)
test.ks<-list.names(test.ks,'test.ks')
names(test.ks) <- c("h.test","ks.stats")
#Anderson–Darling test Goodness-of-fit test
test.ad <- ad.test(df$ant.30,"plnorm",fit$estimate[[1]],fit$estimate[[2]])
res.ad <- if(test.ad$p.value>=0.05) {'Accept'} else{'Reject'} # H-test at 0.05 confidence interval
test.ad <- list(res.ad,test.ad)
test.ad<-list.names(test.ad,'test.ad')
names(test.ad) <- c("h.test","ad.stats")
#Cramer-von Mises Goodness-of-fit test
test.cvm <- cvm.test(df$ant.30,"plnorm",fit$estimate[[1]],fit$estimate[[2]])
res.cvm <- if(test.cvm$p.value>=0.05) {'Accept'} else{'Reject'} # H-test at 0.05 confidence interval
test.cvm <- list(res.cvm,test.cvm)
test.cvm<-list.names(test.cvm,'test.cvm')
names(test.cvm) <- c("h.test","cvm.stats")
#test compilation and integration
tests.lnorm<-list(test.ks,test.ad,test.cvm)
names(tests.lnorm) <- c('test.ks','test.ad','test.cvm')
tests.lnorm<-list(tests.lnorm)
tests.lnorm<-list.names(tests.lnorm,'tests.lnorm')
jja.antqa.cmfd[[i]] <- append(jja.antqa.cmfd[[i]],tests.lnorm)
}
x<-jja.antqa.cmfd
#3.0 Fitting Distributions (JJA) GAMMA-----------------
load('jja.antqa.cmfd.rdata')
jja.antqa.cmfd<-x
library(fitdistrplus)
for(i in 1:length(jja.antqa.cmfd))
{
df<-jja.antqa.cmfd[[i]][['ant.30.ts']]
df<-df[df$ant.30!=0,]
df<-na.omit(df)
fit.gamma<-fitdist(df$ant.30,"gamma")
fit.gamma<-list(fit.gamma)
fit.gamma<<- list.names(fit.gamma,"fit.gamma")
jja.antqa.cmfd[[i]] <- append(jja.antqa.cmfd[[i]],fit.gamma)
}
library(goftest)
library(stats)
#
# dlnorm(x, meanlog = 0, sdlog = 1, log = FALSE)
# plnorm(q, meanlog = 0, sdlog = 1, lower.tail = TRUE, log.p = FALSE)
# qlnorm(p, meanlog = 0, sdlog = 1, lower.tail = TRUE, log.p = FALSE)
# rlnorm(n, meanlog = 0, sdlog = 1)
for(i in 1:length(jja.antqa.cmfd))
{
fit<-jja.antqa.cmfd[[i]][['fit.gamma']]
df<-jja.antqa.cmfd[[i]][['ant.30.ts']]
df<-df[df$ant.30!=0,]
df<-na.omit(df)#flag for potential bugs
#Kolmogorov–Smirnov Goodness-of-fit test
test.ks <- ks.test(df$ant.30,"pgamma",fit$estimate[[1]],fit$estimate[[2]], alternative = "two.sided")
res.ks <- if(test.ks$p.value>=0.05) {'Accept'} else{'Reject'} # H-test at 0.05 confidence interval
test.ks <- list(res.ks,test.ks)
test.ks<-list.names(test.ks,'test.ks')
names(test.ks) <- c("h.test","ks.stats")
#Anderson–Darling test Goodness-of-fit test
test.ad <- ad.test(df$ant.30,"pgamma",fit$estimate[[1]],fit$estimate[[2]])
res.ad <- if(test.ad$p.value>=0.05) {'Accept'} else{'Reject'} # H-test at 0.05 confidence interval
test.ad <- list(res.ad,test.ad)
test.ad<-list.names(test.ad,'test.ad')
names(test.ad) <- c("h.test","ad.stats")
#Cramer-von Mises Goodness-of-fit test
test.cvm <- cvm.test(df$ant.30,"pgamma",fit$estimate[[1]],fit$estimate[[2]])
res.cvm <- if(test.cvm$p.value>=0.05) {'Accept'} else{'Reject'} # H-test at 0.05 confidence interval
test.cvm <- list(res.cvm,test.cvm)
test.cvm<-list.names(test.cvm,'test.cvm')
names(test.cvm) <- c("h.test","cvm.stats")
#test compilation and integration
tests.gamma<-list(test.ks,test.ad,test.cvm)
names(tests.gamma) <- c('test.ks','test.ad','test.cvm')
tests.gamma<-list(tests.gamma)
tests.gamma<-list.names(tests.gamma,'tests.gamma')
jja.antqa.cmfd[[i]] <- append(jja.antqa.cmfd[[i]],tests.gamma)
}
x<-jja.antqa.cmfd
list.save(x,'jja.ant.gamma.cmfd.rdata')
#-------------------------#
#inspection
load('jja.ant.gamma.cmfd.rdata')
# tg<-lapply(x, `[[`, 'tests.gamma')
tg<-lapply(x, `[[`, 'tests.lnorm')
#checking cvm
cvm<-lapply(tg, `[`, 'test.cvm')
cv<-lapply(cvm, `[`, 'test.cvm')
cv<-lapply(cvm, `[[`, 'test.cvm')
h<-rbindlist(lapply(cv, `[`, 'h.test'))
#checking ad
cvm<-lapply(tg, `[`, 'test.ad')
cv<-lapply(cvm, `[`, 'test.ad')
cv<-lapply(cvm, `[[`, 'test.ad')
h<-rbindlist(lapply(cv, `[`, 'h.test'))
#checking ks
cvm<-lapply(tg, `[`, 'test.ks')
cv<-lapply(cvm, `[`, 'test.ks')
cv<-lapply(cvm, `[[`, 'test.ks')
h<-rbindlist(lapply(cv, `[`, 'h.test'))
#2>>Measure Antecedent Skewness ---------
ant.30<-antqa.cmfd[[1]][['ant.30.ts']]
#antecedent
ant.30<- ant.30[as.numeric(strftime(ant.30$dates, "%m")) %in% 6:8,]
library(e1071)
skewness(ant.30$ant.30)
skewness(sqrt(ant.30$ant.30))
hist((ant.30$ant.30))
plot(density(ant.30$ant.30))
qa.30<-antqa.cmfd[[1]][['qa.ts']]
qa.30<- qa.30[as.numeric(strftime(qa.30$dates, "%m")) %in% 6:8,]
hist((qa.30$qa.30))
plot(density(qa.30$qa.30))
skewness(qa.30$qa.30)
#3>>Exploring Distributions for ant.30---------
library(fitdistrplus)
fitwe<-fitdist(ant.30$ant.30,"weibull")
fitln<-fitdist(ant.30$ant.30,"lnorm")
fitga<-fitdist(ant.30$ant.30,"gauss")
fitn<-fitdist(ant.30$ant.30,"norm")
#Start: Pearson III
library(e1071)
m <- mean(ant.30$ant.30)
v <- var(ant.30$ant.30)
s <- sd(ant.30$ant.30)
g <- e1071::skewness(ant.30$ant.30, type=1)
# Correct the sample skew for bias using the recommendation of
# Bobee, B. and R. Robitaille (1977). "The use of the Pearson Type 3 and Log Pearson Type 3 distributions revisited."
# Water Resources Reseach 13(2): 427-443, as used by Kite
n <- length(ant.30$ant.30)
g <- g*(sqrt(n*(n-1))/(n-2))*(1+8.5/n)
# We will use method of moment estimates as starting values for the MLE search
my.shape <- (2/g)^2
my.scale <- sqrt(v)/sqrt(my.shape)*sign(g) # modified as recommended by Carl Schwarz
my.location <- m-sqrt(v * my.shape)
my.param <- list(shape=my.shape, scale=my.scale, location=my.location)
library(PearsonDS)
dPIII<-function(x, shape, location, scale) PearsonDS::dpearsonIII(x, shape, location, scale, log=FALSE)
pPIII<-function(q, shape, location, scale) PearsonDS::ppearsonIII(q, shape, location, scale, lower.tail = TRUE, log.p = FALSE)
qPIII<-function(p, shape, location, scale) PearsonDS::qpearsonIII(p, shape, location, scale, lower.tail = TRUE, log.p = FALSE)
fitpe3<-fitdistrplus::fitdist(ant.30$ant.30, distr="PIII", method="mge", start=my.param)
library(gsl)
pearsonFitML(ant.30$ant.30)
#End:: Pearson III
plot(fitpe3) # Q-Q plot didn't work so well
#Start:: Define Gumbel Functions
#a := beta
#b := alpha
dgumbel <- function(x, a, b) 1/b*exp((a-x)/b)*exp(-exp((a-x)/b))
pgumbel <- function(q, a, b) exp(-exp((a-q)/b))
qgumbel <- function(p, a, b) a-b*log(-log(p))
#End :: Define Gumbel Functions
fitgu<-fitdist(ant.30$ant.30,"gumbel",start=list(a=10, b=10))
plot(fitgu)
plot(fitln)
denscomp(list(fitwe,fitln,fitga), legendtext=c("Weibull","Log-Normal","Gamma"))
ppcomp(list(fitwe,fitln,fitga), legendtext=c("Weibull","Log-Normal","Gamma"))
qqcomp(list(fitwe,fitln,fitga), legendtext=c("Weibull","Log-Normal","Gamma"))
#
# fitwe<-fitdist(sqrt(ant.30$ant.30),"weibull")
# fitln<-fitdist(sqrt(ant.30$ant.30),"lnorm")
# fitga<-fitdist(sqrt(ant.30$ant.30),"gamma")
# fitnorm<-fitdist(sqrt(ant.30$ant.30),"norm")
#
# cdfcomp(list(fitwe,fitln,fitga,fitnorm), legendtext=c("Weibull","Log-Normal","Gamma","Normal"))
# qqcomp(list(fitwe,fitln,fitga,fitnorm), legendtext=c("Weibull","Log-Normal","Gamma","Normal"))
#4>>Log-Normal Fitting for ant.30 and GOF tests ---------
#fit
library(fitdistrplus)
fitln<-fitdist(ant.30$ant.30,"lnorm")
plot(fitln)
#gof tests
gofstat(fitln)
#5>>>Exploring Distributions for qa.30 and GOF tests ---------
qa.30[qa.30==0] <- NA
qa.30<-na.omit(qa.30)
library(fitdistrplus)
fitwe<-fitdist(qa.30$qa.30,"weibull")
fitln<-fitdist(qa.30$qa.30,"lnorm")
fitga<-fitdist(qa.30$qa.30,"gamma")
#Start:: Define Gumbel Functions
#a := beta
#b := alpha
dgumbel <- function(x, a, b) 1/b*exp((a-x)/b)*exp(-exp((a-x)/b))
pgumbel <- function(q, a, b) exp(-exp((a-q)/b))
qgumbel <- function(p, a, b) a-b*log(-log(p))
#End :: Define Gumbel Functions
fitgu<-fitdist(qa.30$qa.30,"gumbel",start=list(a=10, b=10))
denscomp(list(fitwe,fitln,fitga,fitgu), legendtext=c("Weibull","Log-Normal","Gamma","Gumbel"))
ppcomp(list(fitwe,fitln,fitga,fitgu), legendtext=c("Weibull","Log-Normal","Gamma","Gumbel"))
qqcomp(list(fitwe,fitln,fitga,fitgu), legendtext=c("Weibull","Log-Normal","Gamma","Gumbel"))
plot(fitgu)
gofstat(fitgu)
| /V5-CMFD-ANT-PDF-TESTS.R | no_license | joaquin-ferrer/V5-FSLAMR-CCM | R | false | false | 11,264 | r | #0>>Global Setup--------
#0.Load libraries -----------------------------------------------------------------------------
#netcdy libraries
library(ncdf4)
#spatial libraries
library(sp)
library(geosphere)
library(rgdal)
library(raster)
library(RStoolbox)
#plotting libraries
library(ggplot2)
#spatial plotting libraries
library(sf)
library(ggspatial)
library(rnaturalearth)
library(rnaturalearthdata)
#list-data handling libraries
library(dplyr)
library(rlist)
library(tidyr)
library(purrr)
library(data.table)
#time series/zoo libraries
library(zoo)
#date-time libraries
library(lubridate)
#0.Plot Color Setup------------
library(RColorBrewer)
rf <- colorRampPalette(rev(brewer.pal(11,'Spectral')))
r <- rf(32)
r<-rf(100)
library(RColorBrewer)
colstx <- rev(brewer.pal(n = 9, "Spectral"))
colsindex <- rev(brewer.pal(n = 9, "RdYlBu"))
colsdelta <- brewer.pal(n = 9, "Reds")
colsbias <- brewer.pal(n = 9, "PiYG")
colssd <- brewer.pal(n = 9, "Blues")
#1.0 Load List Data-----------------
load('mn7_days_cmfd.rdata')
mn.inv <-x
load('refrac.rdata')
refrac <- data.frame(x)
refrac<-unique(refrac)
#2.0 Creating New Lists-----------------
#refrac and 30-day ant for 1994-2005
for(i in 1:length(mn.inv))
{
a <- refrac
names(a)[1] <-'dates'
b <- data.frame(mn.inv[[i]][['ts']][['dates']],mn.inv[[i]][['ant.ts']][['ant.30']])
names(b)[1] <-'dates'
names(b)[2] <-'ant.30'
ant.30<-list(b)
ant.30<-list.names(ant.30,'ant.30.ts')
mn.inv[[i]]<-append(mn.inv[[i]],ant.30)
b<- b[as.numeric(strftime(b$dates, "%Y")) %in% 1994:2005,]
c <- merge(transform(a, dates = format(as.Date(dates), "%Y-%m-%d")), transform(b, dates = format(as.Date(dates), "%Y-%m-%d")))
qa <- data.frame(a$dates,c$refrac*c$ant.30/30)
names(qa)[1] <-'dates'
names(qa)[2] <-'qa.30'
qa<-list(qa)
qa<-list.names(qa,'qa.ts')
mn.inv[[i]]<-append(mn.inv[[i]],qa)
}
antqa.cmfd<-Map(c,lapply(mn.inv, '[', 'ant.30.ts'),lapply(mn.inv, '[', 'qa.ts'), lapply(mn.inv, '[', 'lon_cmfd'),lapply(mn.inv, '[', 'lat_cmfd'))
list.save(antqa.cmfd,'jja.antqa.cmfd.rdata')
#3.0 Fitting Distributions (JJA) LOG-NORM-----------------
load('jja.antqa.cmfd.rdata')
jja.antqa.cmfd<-x
library(fitdistrplus)
for(i in 1:length(jja.antqa.cmfd))
{
df<-jja.antqa.cmfd[[i]][['ant.30.ts']]
df<-df[df$ant.30!=0,]
df<-na.omit(df)
fit.lnorm<-fitdist(df$ant.30,"lnorm")
fit.lnorm<-list(fit.lnorm)
fit.lnorm<<- list.names(fit.lnorm,"fit.lnorm")
jja.antqa.cmfd[[i]] <- append(jja.antqa.cmfd[[i]],fit.lnorm)
}
library(goftest)
library(stats)
dlnorm(x, meanlog = 0, sdlog = 1, log = FALSE)
plnorm(q, meanlog = 0, sdlog = 1, lower.tail = TRUE, log.p = FALSE)
qlnorm(p, meanlog = 0, sdlog = 1, lower.tail = TRUE, log.p = FALSE)
rlnorm(n, meanlog = 0, sdlog = 1)
for(i in 1:length(jja.antqa.cmfd))
{
fit<-jja.antqa.cmfd[[i]][['fit.lnorm']]
df<-jja.antqa.cmfd[[i]][['ant.30.ts']]
df<-df[df$ant.30!=0,]
df<-na.omit(df)#flag for potential bugs
#Kolmogorov–Smirnov Goodness-of-fit test
test.ks <- ks.test(df$ant.30,"plnorm",fit$estimate[[1]],fit$estimate[[2]], alternative = "two.sided")
res.ks <- if(test.ks$p.value>=0.05) {'Accept'} else{'Reject'} # H-test at 0.05 confidence interval
test.ks <- list(res.ks,test.ks)
test.ks<-list.names(test.ks,'test.ks')
names(test.ks) <- c("h.test","ks.stats")
#Anderson–Darling test Goodness-of-fit test
test.ad <- ad.test(df$ant.30,"plnorm",fit$estimate[[1]],fit$estimate[[2]])
res.ad <- if(test.ad$p.value>=0.05) {'Accept'} else{'Reject'} # H-test at 0.05 confidence interval
test.ad <- list(res.ad,test.ad)
test.ad<-list.names(test.ad,'test.ad')
names(test.ad) <- c("h.test","ad.stats")
#Cramer-von Mises Goodness-of-fit test
test.cvm <- cvm.test(df$ant.30,"plnorm",fit$estimate[[1]],fit$estimate[[2]])
res.cvm <- if(test.cvm$p.value>=0.05) {'Accept'} else{'Reject'} # H-test at 0.05 confidence interval
test.cvm <- list(res.cvm,test.cvm)
test.cvm<-list.names(test.cvm,'test.cvm')
names(test.cvm) <- c("h.test","cvm.stats")
#test compilation and integration
tests.lnorm<-list(test.ks,test.ad,test.cvm)
names(tests.lnorm) <- c('test.ks','test.ad','test.cvm')
tests.lnorm<-list(tests.lnorm)
tests.lnorm<-list.names(tests.lnorm,'tests.lnorm')
jja.antqa.cmfd[[i]] <- append(jja.antqa.cmfd[[i]],tests.lnorm)
}
x<-jja.antqa.cmfd
#3.0 Fitting Distributions (JJA) GAMMA-----------------
load('jja.antqa.cmfd.rdata')
jja.antqa.cmfd<-x
library(fitdistrplus)
for(i in 1:length(jja.antqa.cmfd))
{
df<-jja.antqa.cmfd[[i]][['ant.30.ts']]
df<-df[df$ant.30!=0,]
df<-na.omit(df)
fit.gamma<-fitdist(df$ant.30,"gamma")
fit.gamma<-list(fit.gamma)
fit.gamma<<- list.names(fit.gamma,"fit.gamma")
jja.antqa.cmfd[[i]] <- append(jja.antqa.cmfd[[i]],fit.gamma)
}
library(goftest)
library(stats)
#
# dlnorm(x, meanlog = 0, sdlog = 1, log = FALSE)
# plnorm(q, meanlog = 0, sdlog = 1, lower.tail = TRUE, log.p = FALSE)
# qlnorm(p, meanlog = 0, sdlog = 1, lower.tail = TRUE, log.p = FALSE)
# rlnorm(n, meanlog = 0, sdlog = 1)
for(i in 1:length(jja.antqa.cmfd))
{
fit<-jja.antqa.cmfd[[i]][['fit.gamma']]
df<-jja.antqa.cmfd[[i]][['ant.30.ts']]
df<-df[df$ant.30!=0,]
df<-na.omit(df)#flag for potential bugs
#Kolmogorov–Smirnov Goodness-of-fit test
test.ks <- ks.test(df$ant.30,"pgamma",fit$estimate[[1]],fit$estimate[[2]], alternative = "two.sided")
res.ks <- if(test.ks$p.value>=0.05) {'Accept'} else{'Reject'} # H-test at 0.05 confidence interval
test.ks <- list(res.ks,test.ks)
test.ks<-list.names(test.ks,'test.ks')
names(test.ks) <- c("h.test","ks.stats")
#Anderson–Darling test Goodness-of-fit test
test.ad <- ad.test(df$ant.30,"pgamma",fit$estimate[[1]],fit$estimate[[2]])
res.ad <- if(test.ad$p.value>=0.05) {'Accept'} else{'Reject'} # H-test at 0.05 confidence interval
test.ad <- list(res.ad,test.ad)
test.ad<-list.names(test.ad,'test.ad')
names(test.ad) <- c("h.test","ad.stats")
#Cramer-von Mises Goodness-of-fit test
test.cvm <- cvm.test(df$ant.30,"pgamma",fit$estimate[[1]],fit$estimate[[2]])
res.cvm <- if(test.cvm$p.value>=0.05) {'Accept'} else{'Reject'} # H-test at 0.05 confidence interval
test.cvm <- list(res.cvm,test.cvm)
test.cvm<-list.names(test.cvm,'test.cvm')
names(test.cvm) <- c("h.test","cvm.stats")
#test compilation and integration
tests.gamma<-list(test.ks,test.ad,test.cvm)
names(tests.gamma) <- c('test.ks','test.ad','test.cvm')
tests.gamma<-list(tests.gamma)
tests.gamma<-list.names(tests.gamma,'tests.gamma')
jja.antqa.cmfd[[i]] <- append(jja.antqa.cmfd[[i]],tests.gamma)
}
x<-jja.antqa.cmfd
list.save(x,'jja.ant.gamma.cmfd.rdata')
#-------------------------#
#inspection
load('jja.ant.gamma.cmfd.rdata')
# tg<-lapply(x, `[[`, 'tests.gamma')
tg<-lapply(x, `[[`, 'tests.lnorm')
#checking cvm
cvm<-lapply(tg, `[`, 'test.cvm')
cv<-lapply(cvm, `[`, 'test.cvm')
cv<-lapply(cvm, `[[`, 'test.cvm')
h<-rbindlist(lapply(cv, `[`, 'h.test'))
#checking ad
cvm<-lapply(tg, `[`, 'test.ad')
cv<-lapply(cvm, `[`, 'test.ad')
cv<-lapply(cvm, `[[`, 'test.ad')
h<-rbindlist(lapply(cv, `[`, 'h.test'))
#checking ks
cvm<-lapply(tg, `[`, 'test.ks')
cv<-lapply(cvm, `[`, 'test.ks')
cv<-lapply(cvm, `[[`, 'test.ks')
h<-rbindlist(lapply(cv, `[`, 'h.test'))
#2>>Measure Antecedent Skewness ---------
ant.30<-antqa.cmfd[[1]][['ant.30.ts']]
#antecedent
ant.30<- ant.30[as.numeric(strftime(ant.30$dates, "%m")) %in% 6:8,]
library(e1071)
skewness(ant.30$ant.30)
skewness(sqrt(ant.30$ant.30))
hist((ant.30$ant.30))
plot(density(ant.30$ant.30))
qa.30<-antqa.cmfd[[1]][['qa.ts']]
qa.30<- qa.30[as.numeric(strftime(qa.30$dates, "%m")) %in% 6:8,]
hist((qa.30$qa.30))
plot(density(qa.30$qa.30))
skewness(qa.30$qa.30)
#3>>Exploring Distributions for ant.30---------
library(fitdistrplus)
fitwe<-fitdist(ant.30$ant.30,"weibull")
fitln<-fitdist(ant.30$ant.30,"lnorm")
fitga<-fitdist(ant.30$ant.30,"gauss")
fitn<-fitdist(ant.30$ant.30,"norm")
#Start: Pearson III
library(e1071)
m <- mean(ant.30$ant.30)
v <- var(ant.30$ant.30)
s <- sd(ant.30$ant.30)
g <- e1071::skewness(ant.30$ant.30, type=1)
# Correct the sample skew for bias using the recommendation of
# Bobee, B. and R. Robitaille (1977). "The use of the Pearson Type 3 and Log Pearson Type 3 distributions revisited."
# Water Resources Reseach 13(2): 427-443, as used by Kite
n <- length(ant.30$ant.30)
g <- g*(sqrt(n*(n-1))/(n-2))*(1+8.5/n)
# We will use method of moment estimates as starting values for the MLE search
my.shape <- (2/g)^2
my.scale <- sqrt(v)/sqrt(my.shape)*sign(g) # modified as recommended by Carl Schwarz
my.location <- m-sqrt(v * my.shape)
my.param <- list(shape=my.shape, scale=my.scale, location=my.location)
library(PearsonDS)
dPIII<-function(x, shape, location, scale) PearsonDS::dpearsonIII(x, shape, location, scale, log=FALSE)
pPIII<-function(q, shape, location, scale) PearsonDS::ppearsonIII(q, shape, location, scale, lower.tail = TRUE, log.p = FALSE)
qPIII<-function(p, shape, location, scale) PearsonDS::qpearsonIII(p, shape, location, scale, lower.tail = TRUE, log.p = FALSE)
fitpe3<-fitdistrplus::fitdist(ant.30$ant.30, distr="PIII", method="mge", start=my.param)
library(gsl)
pearsonFitML(ant.30$ant.30)
#End:: Pearson III
plot(fitpe3) # Q-Q plot didn't work so well
#Start:: Define Gumbel Functions
#a := beta
#b := alpha
dgumbel <- function(x, a, b) 1/b*exp((a-x)/b)*exp(-exp((a-x)/b))
pgumbel <- function(q, a, b) exp(-exp((a-q)/b))
qgumbel <- function(p, a, b) a-b*log(-log(p))
#End :: Define Gumbel Functions
fitgu<-fitdist(ant.30$ant.30,"gumbel",start=list(a=10, b=10))
plot(fitgu)
plot(fitln)
denscomp(list(fitwe,fitln,fitga), legendtext=c("Weibull","Log-Normal","Gamma"))
ppcomp(list(fitwe,fitln,fitga), legendtext=c("Weibull","Log-Normal","Gamma"))
qqcomp(list(fitwe,fitln,fitga), legendtext=c("Weibull","Log-Normal","Gamma"))
#
# fitwe<-fitdist(sqrt(ant.30$ant.30),"weibull")
# fitln<-fitdist(sqrt(ant.30$ant.30),"lnorm")
# fitga<-fitdist(sqrt(ant.30$ant.30),"gamma")
# fitnorm<-fitdist(sqrt(ant.30$ant.30),"norm")
#
# cdfcomp(list(fitwe,fitln,fitga,fitnorm), legendtext=c("Weibull","Log-Normal","Gamma","Normal"))
# qqcomp(list(fitwe,fitln,fitga,fitnorm), legendtext=c("Weibull","Log-Normal","Gamma","Normal"))
#4>>Log-Normal Fitting for ant.30 and GOF tests ---------
#fit
library(fitdistrplus)
fitln<-fitdist(ant.30$ant.30,"lnorm")
plot(fitln)
#gof tests
gofstat(fitln)
#5>>>Exploring Distributions for qa.30 and GOF tests ---------
qa.30[qa.30==0] <- NA
qa.30<-na.omit(qa.30)
library(fitdistrplus)
fitwe<-fitdist(qa.30$qa.30,"weibull")
fitln<-fitdist(qa.30$qa.30,"lnorm")
fitga<-fitdist(qa.30$qa.30,"gamma")
#Start:: Define Gumbel Functions
#a := beta
#b := alpha
dgumbel <- function(x, a, b) 1/b*exp((a-x)/b)*exp(-exp((a-x)/b))
pgumbel <- function(q, a, b) exp(-exp((a-q)/b))
qgumbel <- function(p, a, b) a-b*log(-log(p))
#End :: Define Gumbel Functions
fitgu<-fitdist(qa.30$qa.30,"gumbel",start=list(a=10, b=10))
denscomp(list(fitwe,fitln,fitga,fitgu), legendtext=c("Weibull","Log-Normal","Gamma","Gumbel"))
ppcomp(list(fitwe,fitln,fitga,fitgu), legendtext=c("Weibull","Log-Normal","Gamma","Gumbel"))
qqcomp(list(fitwe,fitln,fitga,fitgu), legendtext=c("Weibull","Log-Normal","Gamma","Gumbel"))
plot(fitgu)
gofstat(fitgu)
|
#################################
### FRE7241 Test #1 Solutions 06/16/15
#################################
# Max score 50pts
# The below solutions are examples,
# Slightly different solutions are also possible.
##################################
# 1. (15pts) subset "zoo_series" to Mondays,
# download file "zoo_series.Rdata" from NYU Classes, and load() it,
# the file "zoo_series.Rdata" contains a zoo called "zoo_series",
load(file="C:/Develop/data/zoo_series.RData")
# first create a logical vector from the index of "zoo_series",
# called "mon_days", which is TRUE if an index date is a Monday,
# and FALSE otherwise,
# use functions index() and weekdays()
library(zoo)
mon_days <- weekdays(index(zoo_series))=="Monday"
# extract (subset) the first column of "zoo_series" for index dates
# that are Mondays, and call it "zoo_mondays",
zoo_mondays <- zoo_series[weekdays(index(zoo_series))=="Monday", 1]
# calculate weekly percentage returns from "zoo_mondays",
zoo_mondays <- diff(log(zoo_mondays))
# find the dates of the weeks (not just the indices) with
# the highest and lowest returns,
# use functions which(), or which.max() and which.min,
zoo_mondays[which.max(zoo_mondays)]
zoo_mondays[which.min(zoo_mondays)]
##################################
# 2. (35pts) Create a function called lag_it() that applies a lag to vectors
# and "zoo" time series objects,
# lag_it() should accept two arguments:
# the first argument called "se_ries" can be a vector or "zoo" time series object,
# if "se_ries" is a vector, then lag_it() should return a lagged vector,
# of the same length as the input,
# if "se_ries" is a "zoo", then lag_it() should return a lagged "zoo",
# with the same number of rows as the input,
# the second argument called "lag" is an integer specifying the number of lags,
# if "lag" is positive, then lag_it() should replace the present value with
# "lag" number of values from the past,
# if "lag" is negative, then lag_it() should replace the present value with
# "lag" number of values from the future,
# for a vector, past values have a smaller index, and future values have a larger index,
# lag_it() should add NA values in place of values that are missing,
# lag_it() should return NULL if "se_ries" is neither a vector nor a
# "zoo" time series,
#
# some observations about the default method lag():
# the default method lag() can accept a vector and returns
# a "ts" time series object,
#
# some observations about lag.zoo():
# The method lag.zoo() returns a lagged version of a "zoo" time series,
# by shifting its time index by "k" observations,
# If "k" is positive, then lag.zoo() shifts values from the future to the present,
# and if "k" is negative then it shifts them from the past,
# This is the opposite of what is usually considered as a positive "lag",
# A positive lag should replace the present value with values from the past
# (negative lags should replace with values from the future),
# lag.zoo() omits any NA values the lag may have produced,
# returning a shorter time series than the original,
#
# hint: you can use functions is.vector(), is.zoo(), cbind(), merge(),
# lag.zoo(), c(), and rep(),
lag_it <- function(se_ries, lag=1) {
if (is.vector(se_ries)) { # se_ries is a vector
if(lag>0) {
se_ries <- c(rep(NA, lag), se_ries)
se_ries[-((length(se_ries)-lag+1):length(se_ries))]
} else {
se_ries <- c(se_ries, rep(NA, -lag))
se_ries[-(1:(-lag))]
}
} else if (is.zoo(se_ries)) { # se_ries is a zoo
lag(se_ries, k=-lag, na.pad=TRUE)
# or:
# cbind(se_ries[, 1], lag(se_ries, k=-lag))[, -1]
} else { # se_ries is neither a vector nor a "zoo" time series
warning(paste0("argument \"", deparse(substitute(se_ries)), "\" must be either a vector, zoo, or ts object"))
NULL # return NULL
}
} # end lag_it
# call lag_it() as below, to verify it works correctly,
load(file="C:/Develop/data/zoo_series.RData")
lag_it(1:9)
lag_it(1:9, 2)
lag_it(1:9, -1)
lag_it(1:9, -2)
lag_it(zoo_series[1:6, 1:2])
lag_it(zoo_series[1:6, 1:2], 2)
lag_it(zoo_series[1:6, 1:2], -1)
lag_it(matrix(1:9, ncol=1))
lag_it("a", "b")
| /FRE7241_test1 solution.R | no_license | PsaksuMeRap/FRE6871 | R | false | false | 4,157 | r | #################################
### FRE7241 Test #1 Solutions 06/16/15
#################################
# Max score 50pts
# The below solutions are examples,
# Slightly different solutions are also possible.
##################################
# 1. (15pts) subset "zoo_series" to Mondays,
# download file "zoo_series.Rdata" from NYU Classes, and load() it,
# the file "zoo_series.Rdata" contains a zoo called "zoo_series",
load(file="C:/Develop/data/zoo_series.RData")
# first create a logical vector from the index of "zoo_series",
# called "mon_days", which is TRUE if an index date is a Monday,
# and FALSE otherwise,
# use functions index() and weekdays()
library(zoo)
mon_days <- weekdays(index(zoo_series))=="Monday"
# extract (subset) the first column of "zoo_series" for index dates
# that are Mondays, and call it "zoo_mondays",
zoo_mondays <- zoo_series[weekdays(index(zoo_series))=="Monday", 1]
# calculate weekly percentage returns from "zoo_mondays",
zoo_mondays <- diff(log(zoo_mondays))
# find the dates of the weeks (not just the indices) with
# the highest and lowest returns,
# use functions which(), or which.max() and which.min,
zoo_mondays[which.max(zoo_mondays)]
zoo_mondays[which.min(zoo_mondays)]
##################################
# 2. (35pts) Create a function called lag_it() that applies a lag to vectors
# and "zoo" time series objects,
# lag_it() should accept two arguments:
# the first argument called "se_ries" can be a vector or "zoo" time series object,
# if "se_ries" is a vector, then lag_it() should return a lagged vector,
# of the same length as the input,
# if "se_ries" is a "zoo", then lag_it() should return a lagged "zoo",
# with the same number of rows as the input,
# the second argument called "lag" is an integer specifying the number of lags,
# if "lag" is positive, then lag_it() should replace the present value with
# "lag" number of values from the past,
# if "lag" is negative, then lag_it() should replace the present value with
# "lag" number of values from the future,
# for a vector, past values have a smaller index, and future values have a larger index,
# lag_it() should add NA values in place of values that are missing,
# lag_it() should return NULL if "se_ries" is neither a vector nor a
# "zoo" time series,
#
# some observations about the default method lag():
# the default method lag() can accept a vector and returns
# a "ts" time series object,
#
# some observations about lag.zoo():
# The method lag.zoo() returns a lagged version of a "zoo" time series,
# by shifting its time index by "k" observations,
# If "k" is positive, then lag.zoo() shifts values from the future to the present,
# and if "k" is negative then it shifts them from the past,
# This is the opposite of what is usually considered as a positive "lag",
# A positive lag should replace the present value with values from the past
# (negative lags should replace with values from the future),
# lag.zoo() omits any NA values the lag may have produced,
# returning a shorter time series than the original,
#
# hint: you can use functions is.vector(), is.zoo(), cbind(), merge(),
# lag.zoo(), c(), and rep(),
lag_it <- function(se_ries, lag=1) {
if (is.vector(se_ries)) { # se_ries is a vector
if(lag>0) {
se_ries <- c(rep(NA, lag), se_ries)
se_ries[-((length(se_ries)-lag+1):length(se_ries))]
} else {
se_ries <- c(se_ries, rep(NA, -lag))
se_ries[-(1:(-lag))]
}
} else if (is.zoo(se_ries)) { # se_ries is a zoo
lag(se_ries, k=-lag, na.pad=TRUE)
# or:
# cbind(se_ries[, 1], lag(se_ries, k=-lag))[, -1]
} else { # se_ries is neither a vector nor a "zoo" time series
warning(paste0("argument \"", deparse(substitute(se_ries)), "\" must be either a vector, zoo, or ts object"))
NULL # return NULL
}
} # end lag_it
# call lag_it() as below, to verify it works correctly,
load(file="C:/Develop/data/zoo_series.RData")
lag_it(1:9)
lag_it(1:9, 2)
lag_it(1:9, -1)
lag_it(1:9, -2)
lag_it(zoo_series[1:6, 1:2])
lag_it(zoo_series[1:6, 1:2], 2)
lag_it(zoo_series[1:6, 1:2], -1)
lag_it(matrix(1:9, ncol=1))
lag_it("a", "b")
|
#' @title Subtyping multi-omics data
#' @description Perform subtyping using multiple types of data
#'
#' @param dataList a list of data matrices. Each matrix represents a data type where the rows are items and the columns are features. The matrices must have the same set of items.
#' @param kMax The maximum number of clusters used for automatically detecting the number of clusters in \code{PerturbationClustering}. This paramter is passed to \code{PerturbationClustering} and does not affect the final number of cluster in \code{SubtypingOmicsData}. Default value is \code{5}.
#' @param kMin The minimum number of clusters used for automatically detecting the number of clusters in \code{PerturbationClustering}. This paramter is passed to \code{PerturbationClustering} and does not affect the final number of cluster in \code{SubtypingOmicsData}. Default value is \code{2}.
#' @param k The number of clusters. If k is set then kMin and kMax will be ignored.
#' @param agreementCutoff agreement threshold to be considered consistent. Default value is \code{0.5}.
#' @param ncore Number of cores that the algorithm should use. Default value is \code{1}.
#' @param verbose set it to \code{TRUE} of \code{FALSE} to get more or less details respectively.
#' @param sampledSetSize The number of sample size used for the sampling process when dataset is big. Default value is \code{2000}.
#' @param knn.k The value of k of the k-nearest neighbors algorithm. If knn.k is not set then it will be used elbow method to calculate the k.
#' @param ... these arguments will be passed to \code{PerturbationClustering} algorithm. See details for more information
#'
#' @details
#'
#' \code{SubtypingOmicsData} implements the Subtyping multi-omic data that are based on Perturbaion clustering algorithm of Nguyen et al (2017), Nguyen et al (2019) and Nguyen, et al. (2021).
#' The input is a list of data matrices where each matrix represents the molecular measurements of a data type. The input matrices must have the same number of rows.
#' \code{SubtypingOmicsData} aims to find the optimum number of subtypes and location of each sample in the clusters from integrated input data \code{dataList} through two processing stages:
#'
#' 1. Stage I: The algorithm first partitions each data type using the function \code{PerturbationClustering}.
#' It then merges the connectivities across data types into similarity matrices.
#' Both kmeans and similarity-based clustering algorithms - partitioning around medoids \code{pam} are used to partition the built similarity.
#' The algorithm returns the partitioning that agrees the most with individual data types.\cr
#' 2. Stage II: The algorithm attempts to split each discovered group if there is a strong agreement between data types,
#' or if the subtyping in Stage I is very unbalanced.
#'
#' When clustering a large number of samples, this function uses a subsampling technique to reduce the computational complexity with the two parameters \code{sampledSetSize} and \code{knn.k}. Please consult Nguyen et al. (2021) for details.
#'
#' @return
#'
#' \code{SubtypingOmicsData} returns a list with at least the following components:
#' \item{cluster1}{A vector of labels indicating the cluster to which each sample is allocated in Stage I}
#' \item{cluster2}{A vector of labels indicating the cluster to which each sample is allocated in Stage II}
#' \item{dataTypeResult}{A list of results for individual data type. Each element of the list is the result of the \code{PerturbationClustering} for the corresponding data matrix provided in dataList.}
#'
#'
#' @references
#'
#' 1. H Nguyen, S Shrestha, S Draghici, & T Nguyen. PINSPlus: a tool for tumor subtype discovery in integrated genomic data. Bioinformatics, 35(16), 2843-2846, (2019).
#'
#' 2. T Nguyen, R Tagett, D Diaz, S Draghici. A novel method for data integration and disease subtyping. Genome Research, 27(12):2025-2039, 2017.
#'
#' 3. T. Nguyen, "Horizontal and vertical integration of bio-molecular data", PhD thesis, Wayne State University, 2017.
#'
#' 4. H Nguyen, D Tran, B Tran, M Roy, A Cassell, S Dascalu, S Draghici & T Nguyen. SMRT: Randomized Data Transformation for Cancer Subtyping and Big Data Analysis. Frontiers in oncology. 2021.
#'
#' @seealso \code{\link{PerturbationClustering}}
#'
#' @examples
#' \donttest{
#' # Load the kidney cancer carcinoma data
#' data(KIRC)
#'
#' # Perform subtyping on the multi-omics data
#' dataList <- list (as.matrix(KIRC$GE), as.matrix(KIRC$ME), as.matrix(KIRC$MI))
#' names(dataList) <- c("GE", "ME", "MI")
#' result <- SubtypingOmicsData(dataList = dataList)
#'
#' # Change Pertubation clustering algorithm's arguments
#' result <- SubtypingOmicsData(
#' dataList = dataList,
#' clusteringMethod = "kmeans",
#' clusteringOptions = list(nstart = 50)
#' )
#'
#' # Plot the Kaplan-Meier curves and calculate Cox p-value
#' library(survival)
#' cluster1=result$cluster1;cluster2=result$cluster2
#' a <- intersect(unique(cluster2), unique(cluster1))
#' names(a) <- intersect(unique(cluster2), unique(cluster1))
#' a[setdiff(unique(cluster2), unique(cluster1))] <- seq(setdiff(unique(cluster2), unique(cluster1)))
#' + max(cluster1)
#' colors <- a[levels(factor(cluster2))]
#' coxFit <- coxph(
#' Surv(time = Survival, event = Death) ~ as.factor(cluster2),
#' data = KIRC$survival,
#' ties = "exact"
#' )
#' mfit <- survfit(Surv(Survival, Death == 1) ~ as.factor(cluster2), data = KIRC$survival)
#' plot(
#' mfit, col = colors,
#' main = "Survival curves for KIRC, level 2",
#' xlab = "Days", ylab = "Survival",lwd = 2
#' )
#' legend("bottomright",
#' legend = paste(
#' "Cox p-value:",
#' round(summary(coxFit)$sctest[3], digits = 5),
#' sep = ""
#' )
#' )
#' legend(
#' "bottomleft",
#' fill = colors,
#' legend = paste(
#' "Group ",
#' levels(factor(cluster2)),": ", table(cluster2)[levels(factor(cluster2))],
#' sep =""
#' )
#' )
#'
#' }
#' @importFrom FNN knnx.index
#' @importFrom entropy entropy
#' @importFrom impute impute.knn
#' @export
SubtypingOmicsData <- function (dataList, kMin = 2, kMax = 5, k = NULL, agreementCutoff = 0.5, ncore = 1, verbose = T, sampledSetSize = 2000, knn.k = NULL, ...) {
now = Sys.time()
# defined log function
mlog <- if(!verbose) function(...){} else function(...){
message(...)
flush.console()
}
dataListComplete <- dataList
commonSamples <- Reduce(f = intersect, x = lapply(dataList, rownames))
dataList <- lapply(dataList, function(d) d[commonSamples, ])
notCommonData <- lapply(dataListComplete, function(d) {
rn <- rownames(d)[(!rownames(d) %in% commonSamples)]
d <- matrix(d[rn, ], ncol = ncol(d))
rownames(d) <- rn
d
})
dataListTrain <- NULL
dataListTest <- NULL
seed = round(rnorm(1)*10^6)
dataList <- lapply(dataList, as.data.frame)
if (nrow(dataList[[1]]) > sampledSetSize) {
n_samples <- nrow(dataList[[1]])
ind <- sample.int(n_samples, size = sampledSetSize)
dataListTrain <- lapply(dataList, function(x) x[ind, ])
dataListTest <- lapply(dataList, function(x) x[-ind, , drop=F])
dataList <- dataListTrain
}
runPerturbationClustering <- function(dataList, kMin, kMax, stage = 1, forceSplit = FALSE, k = NULL){
dataTypeResult <- lapply(dataList, function(data) {
set.seed(seed)
data <- as.matrix(data)
data <- data[rowSums(is.na(data)) == 0, ]
PerturbationClustering(data, kMin, kMax, ncore = ncore, verbose = verbose,...)
})
# origList <- lapply(dataTypeResult, function(r) r$origS[[r$k]])
# orig = Reduce('+', origList)/length(origList)
# PW = Reduce('*', origList)
# agreement = (sum(orig == 0) + sum(orig == 1) - nrow(orig)) / (nrow(orig) ^ 2 - nrow(orig))
#
# pert = Reduce('+', lapply(dataTypeResult, function(r) r$pertS[[r$k]]))/length(dataList)
allSamples <- unique(unlist(lapply(dataList, rownames)))
origList <- lapply(dataTypeResult, function(r) r$origS[[r$k]])
origMerged <- do.call(
what = rbind,
args = lapply(origList, function(o){
o <- as.data.frame(o)
as.numeric(as.matrix(t(as.data.frame(t(o[allSamples, ]))[allSamples, ])))
})
)
orig = matrix(colMeans(origMerged, na.rm = T), nrow = length(allSamples))
rownames(orig) <- colnames(orig) <- allSamples
orig <- impute::impute.knn(orig)$data
orig[is.na(orig)] <- 0
PW = matrix(as.numeric(colSums(origMerged == 0, na.rm = T) == 0), nrow = length(allSamples))
rownames(PW) <- colnames(PW) <- allSamples
agreement = (sum(orig == 0) + sum(orig == 1) - nrow(orig)) / (nrow(orig) ^ 2 - nrow(orig))
pertList <- lapply(dataTypeResult, function(r) r$pertS[[r$k]])
pertMerged <- do.call(
what = rbind,
args = lapply(pertList, function(p){
p <- as.data.frame(p)
as.numeric(as.matrix(t(as.data.frame(t(p[allSamples, ]))[allSamples, ])))
})
)
pert = matrix(colMeans(pertMerged, na.rm = T), nrow = length(allSamples))
rownames(pert) <- colnames(pert) <- allSamples
pert <- impute::impute.knn(pert)$data
pert[is.na(pert)] <- 0
groups <- NULL
mlog("STAGE : ", stage, "\t Agreement : ", agreement)
if (agreement >= agreementCutoff | forceSplit){
hcW <- hclust(dist(PW))
maxK = min(kMax*2, dim(unique(PW, MARGIN = 2))[2] - (stage - 1))
maxHeight = FindMaxHeight(hcW, maxK = min(2*maxK, 10))
groups <- cutree(hcW, maxHeight)
# if k is specific then only use that k if the number of groups > k
# the max(groups) < k, this may cause small groups
if (!is.null(k) && max(groups) > k){
groups <- cutree(hcW, k)
}
}
list(dataTypeResult = dataTypeResult, orig = orig, pert = pert, PW = PW, groups = groups, agreement = agreement)
}
pResult <- runPerturbationClustering(dataList, kMin, kMax, k = k)
groups <- pResult$groups
groups2 <- NULL
if (!is.null(groups)) {
groups2 <- groups
if (is.null(k)){
for (g in sort(unique(groups))) {
miniGroup <- names(groups[groups == g])
if (length(miniGroup) > 30) {
groupsM <- runPerturbationClustering(dataList = lapply(dataList, function(d) d[miniGroup, ]), kMin = kMin, kMax = min(kMax, 5), stage = 2)$groups
if (!is.null(groupsM))
groups2[miniGroup] <- paste(g, groupsM, sep = "-")
}
}
} else {
agreements <- rep(1, length(groups))
names(agreements) <- names(groups)
# if k is specific then force further split
tbl <- sort(table(groups2), decreasing = T)
minGroupSize <- 30
while (length(unique(groups2)) < k){
if (all(tbl <= 30)) {
minGroupSize <- 10
}
# cannot split anymore
if (all(tbl <= 10)) break()
for (g in names(tbl)){
miniGroup <- names(groups2[groups2 == g])
if (length(miniGroup) > minGroupSize) {
splitRes <- runPerturbationClustering(dataList = lapply(dataList, function(d) d[miniGroup, ]), kMin = kMin, kMax = min(kMax, 5), stage = 2, forceSplit = T)
groupsM <- splitRes$groups
if (!is.null(groupsM)){
groups2[miniGroup] <- paste(g, groupsM, sep = "-")
agreements[miniGroup] <- splitRes$agreement
}
}
}
tbl <- sort(table(groups2), decreasing = T)
}
# now after further splitting, the number of groups can be > k
# need to merge cluster based on their agreement
agreements.unique = unique(agreements)
for (aggr in sort(unique(agreements))){
if (length(unique(groups2)) == k) break()
merge.group <- agreements == aggr
k.smallGroup <- length(unique(groups2[merge.group]))
k.need <- k - (length(unique(groups2)) - k.smallGroup + 1) + 1
groups2[merge.group] <- unlist(lapply(strsplit(groups2[merge.group], "-"), function(g){
paste0(g[1:(length(g)-1)], collapse = "-")
}))
if (k.need > 1){
splitRes <- runPerturbationClustering(dataList = lapply(dataList, function(d) d[miniGroup, ]),
kMin = kMin, kMax = min(kMax, 5), stage = 2, forceSplit = T,
k = k.need)
groupsM <- splitRes$groups
groups2[merge.group] <- paste(groups2[merge.group], groupsM, sep = "-")
}
agreements[merge.group] <- 1
}
}
} else{
set.seed(seed)
orig <- pResult$orig
dataTypeResult <- pResult$dataTypeResult
clusteringAlgorithm = GetClusteringAlgorithm(...)$fun
groupings <- lapply(dataTypeResult, function(r) clusteringAlgorithm(data = r$origS[[r$k]], k = r$k))
pGroups <- ClusterUsingPAM(orig = orig, kMax = kMax*2, groupings = groupings)
hGroups <- ClusterUsingHierarchical(orig = orig, kMax = kMax*2, groupings = groupings)
pAgree = pGroups$agree; hAgree = hGroups$agree;
groups <- (if (pAgree > hAgree) pGroups else if (hAgree > pAgree) hGroups else {
pAgree = ClusterUsingPAM(orig = pResult$pert, kMax = kMax, groupings = groupings)$agree
hAgree = ClusterUsingHierarchical(orig = pResult$pert, kMax = kMax, groupings = groupings)$agree
if (hAgree - pAgree >= 1e-3) hGroups else pGroups
})$cluster
names(groups) <- rownames(orig)
groups2 <- groups
if (is.null(k)){
mlog("Check if can proceed to stage II")
normalizedEntropy = entropy::entropy(table(groups)) / log(length(unique(groups)), exp(1))
if (normalizedEntropy < 0.5) {
for (g in sort(unique(groups))) {
miniGroup <- names(groups[groups == g])
#this is just to make sure we don't split a group that is already very small
if (length(miniGroup) > 30) {
#this is to check if the data types in this group can be split
groupsM <- runPerturbationClustering(dataList = lapply(dataList, function(d) d[miniGroup, ]),kMin = kMin, kMax = min(kMax, 5), stage = 2, forceSplit = T, k = NULL)$groups
if (!is.null(groupsM))
groups2[miniGroup] <- paste(g, groupsM, sep = "-")
}
}
}
} else {
if (length(unique(groups2)) > k){
pGroups <- ClusterUsingPAM(orig = orig, kMax = kMax*2, groupings = groupings, k)
hGroups <- ClusterUsingHierarchical(orig = orig, kMax = kMax*2, groupings = groupings, k)
pAgree = pGroups$agree; hAgree = hGroups$agree;
groups <- (if (pAgree > hAgree) pGroups else if (hAgree > pAgree) hGroups else {
pAgree = ClusterUsingPAM(orig = pResult$pert, kMax = kMax, groupings = groupings, k)$agree
hAgree = ClusterUsingHierarchical(orig = pResult$pert, kMax = kMax, groupings = groupings, k)$agree
if (hAgree - pAgree >= 1e-3) hGroups else pGroups
})$cluster
names(groups) <- rownames(orig)
groups2 <- groups
} else if (length(unique(groups2)) < k){
# split like normal using entropy
normalizedEntropy = entropy::entropy(table(groups)) / log(length(unique(groups)), exp(1))
agreements <- rep(1, length(groups))
names(agreements) <- names(groups)
if (normalizedEntropy < 0.5) {
for (g in sort(unique(groups))) {
miniGroup <- names(groups[groups == g])
#this is just to make sure we don't split a group that is already very small
if (length(miniGroup) > 30) {
#this is to check if the data types in this group can be split
splitRes <- runPerturbationClustering(dataList = lapply(dataList, function(d) d[miniGroup, ]), kMin = kMin, kMax = min(kMax, 5), stage = 2, forceSplit = T)
groupsM <- splitRes$groups
if (!is.null(groupsM)){
groups2[miniGroup] <- paste(g, groupsM, sep = "-")
agreements[miniGroup] <- splitRes$agreement
}
}
}
}
# if the number of group is still less than k, then force split
if (length(unique(groups2)) < k){
# if k is specific then force further split
tbl <- sort(table(groups2), decreasing = T)
minGroupSize <- 30
while (length(unique(groups2)) < k){
if (all(tbl <= 30)) {
minGroupSize <- 10
}
# cannot split anymore
if (all(tbl <= 10)) break()
for (g in names(tbl)){
miniGroup <- names(groups2[groups2 == g])
if (length(miniGroup) > minGroupSize) {
splitRes <- runPerturbationClustering(dataList = lapply(dataList, function(d) d[miniGroup, ]), kMin = kMin, kMax = min(kMax, 5), stage = 2, forceSplit = T)
groupsM <- splitRes$groups
if (!is.null(groupsM)){
groups2[miniGroup] <- paste(g, groupsM, sep = "-")
agreements[miniGroup] <- splitRes$agreement
}
}
}
tbl <- sort(table(groups2), decreasing = T)
}
}
# now after further splitting, the number of groups can be > k
# need to merge cluster based on their aggrement
agreements.unique = unique(agreements)
for (aggr in sort(unique(agreements))){
if (length(unique(groups2)) == k) break()
merge.group <- agreements == aggr
k.smallGroup <- length(unique(groups2[merge.group]))
k.need <- k - (length(unique(groups2)) - k.smallGroup + 1) + 1
groups2[merge.group] <- unlist(lapply(strsplit(groups2[merge.group], "-"), function(g){
paste0(g[1:(length(g)-1)], collapse = "-")
}))
if (k.need > 1){
splitRes <- runPerturbationClustering(dataList = lapply(dataList, function(d) d[merge.group, ]),
kMin = kMin, kMax = min(kMax, 5), stage = 2, forceSplit = T,
k = k.need)
groupsM <- splitRes$groups
groups2[merge.group] <- paste(groups2[merge.group], groupsM, sep = "-")
}
agreements[merge.group] <- 1
}
}
}
}
{
train_y <- groups
train_y2 <- groups2
if(!is.null(dataListTest)) {
set.seed(seed)
RcppParallel::setThreadOptions(ncore)
test_prob <- matrix(0, nrow = n_samples - sampledSetSize, ncol = length(unique(groups)))
if(!is.null(train_y2))
{
test_prob2 <- matrix(0, nrow = n_samples - sampledSetSize, ncol = length(unique(groups2)))
}
for (i in 1:length(dataListTrain)) {
train <- dataListTrain[[i]]
test <- dataListTest[[i]]
if(ncol(train)*nrow(train) > 2e7) {
pca <- pResult$dataTypeResult[[i]]$pca
} else {
pca <- rpca.para(train, min(nrow(data), 20), scale = F)
}
train <- pca$x
test <- predict.rpca.para(pca, test)
test_prob <- test_prob + classifierProb(train, groups, test, knn.k)
if(!is.null(train_y2)){
test_prob2 <- test_prob2 + classifierProb(train, groups2, test, knn.k)
}
# nn_index <- FNN::knnx.index(train, test, k = 10)
# nn_group <- matrix(train_y[nn_index], ncol = 10)
#
# for (j in 1:nrow(test_prob)) {
# tmp <- table(nn_group[j, ])
# test_prob[j, as.numeric(names(tmp))] <- test_prob[j, as.numeric(names(tmp))] + as.numeric(tmp)/10
# }
#
# if(!is.null(train_y2))
# {
# nn_group2 <- matrix(train_y2[nn_index], ncol = 10)
#
# for (j in 1:nrow(test_prob)) {
# tmp <- table(nn_group2[j, ])
# test_prob2[j, as.numeric(names(tmp))] <- test_prob2[j, as.numeric(names(tmp))] + as.numeric(tmp)/10
# }
# }
}
test_y <- apply(test_prob, 1, which.max)
groups <- rep(0, n_samples)
groups[ind] <- train_y
groups[-ind] <- test_y
if(!is.null(train_y2)) {
test_y2 <- apply(test_prob2, 1, which.max)
groups2 <- rep(0, n_samples)
groups2[ind] <- train_y2
groups2[-ind] <- test_y2
}
}
}
# for not common data
{
# if data is big, now groups include the testing
train_y <- groups
train_y2 <- groups2
allSamples <- unique(unlist(lapply(dataListComplete, rownames)))
n_samples <- length(allSamples)
notCommonSamples <- allSamples[!(allSamples %in% commonSamples)]
if(length(allSamples) > length(commonSamples)) {
set.seed(seed)
RcppParallel::setThreadOptions(ncore)
# if dataListTrain is not null this means the data is big
if (!is.null(dataListTrain)){
dataListTrain <- lapply(1:length(dataList), function(i){
as.matrix(rbind(dataList[[i]], dataListTrain[[i]]))
})
} else {
dataListTrain <- dataList
}
dataListTest <- notCommonData
test_prob <- matrix(0, nrow = length(notCommonSamples), ncol = length(unique(groups)))
rownames(test_prob) <- notCommonSamples
colnames(test_prob) <- unique(groups)
if(!is.null(train_y2))
{
test_prob2 <- matrix(0, nrow = length(notCommonSamples), ncol = length(unique(groups2)))
rownames(test_prob2) <- notCommonSamples
colnames(test_prob2) <- unique(groups2)
}
for (i in 1:length(dataListTrain)) {
train <- dataListTrain[[i]]
test <- dataListTest[[i]]
if (nrow(test) == 0) next()
pca <- rpca.para(train, min(nrow(train), 20), scale = F)
train <- pca$x
test <- predict.rpca.para(pca, test)
test_prob_tmp <- classifierProb(train, groups, test, knn.k)
test_prob[rownames(test_prob_tmp), ] <- test_prob[rownames(test_prob_tmp), ] + test_prob_tmp
if(!is.null(train_y2)){
test_prob_tmp2 <- classifierProb(train, groups2, test, knn.k)
test_prob2[rownames(test_prob_tmp2), ] <- test_prob2[rownames(test_prob_tmp2), ] + test_prob_tmp2
}
}
test_y <- colnames(test_prob)[apply(test_prob, 1, which.max)]
groups <- rep(0, n_samples)
names(groups) <- allSamples
groups[commonSamples] <- train_y
groups[notCommonSamples] <- test_y
if(!is.null(train_y2)) {
test_y2 <- colnames(test_prob2)[apply(test_prob2, 1, which.max)]
groups2 <- rep(0, n_samples)
names(groups2) <- allSamples
groups2[commonSamples] <- train_y2
groups2[notCommonSamples] <- test_y2
}
}
}
timediff = Sys.time() - now;
mlog("Done in ", timediff, " ", units(timediff), ".\n")
list(
cluster1 = groups,
cluster2 = groups2,
dataTypeResult = pResult$dataTypeResult
)
} | /R/subtyping-omics-data.R | no_license | cran/PINSPlus | R | false | false | 26,889 | r | #' @title Subtyping multi-omics data
#' @description Perform subtyping using multiple types of data
#'
#' @param dataList a list of data matrices. Each matrix represents a data type where the rows are items and the columns are features. The matrices must have the same set of items.
#' @param kMax The maximum number of clusters used for automatically detecting the number of clusters in \code{PerturbationClustering}. This paramter is passed to \code{PerturbationClustering} and does not affect the final number of cluster in \code{SubtypingOmicsData}. Default value is \code{5}.
#' @param kMin The minimum number of clusters used for automatically detecting the number of clusters in \code{PerturbationClustering}. This paramter is passed to \code{PerturbationClustering} and does not affect the final number of cluster in \code{SubtypingOmicsData}. Default value is \code{2}.
#' @param k The number of clusters. If k is set then kMin and kMax will be ignored.
#' @param agreementCutoff agreement threshold to be considered consistent. Default value is \code{0.5}.
#' @param ncore Number of cores that the algorithm should use. Default value is \code{1}.
#' @param verbose set it to \code{TRUE} of \code{FALSE} to get more or less details respectively.
#' @param sampledSetSize The number of sample size used for the sampling process when dataset is big. Default value is \code{2000}.
#' @param knn.k The value of k of the k-nearest neighbors algorithm. If knn.k is not set then it will be used elbow method to calculate the k.
#' @param ... these arguments will be passed to \code{PerturbationClustering} algorithm. See details for more information
#'
#' @details
#'
#' \code{SubtypingOmicsData} implements the Subtyping multi-omic data that are based on Perturbaion clustering algorithm of Nguyen et al (2017), Nguyen et al (2019) and Nguyen, et al. (2021).
#' The input is a list of data matrices where each matrix represents the molecular measurements of a data type. The input matrices must have the same number of rows.
#' \code{SubtypingOmicsData} aims to find the optimum number of subtypes and location of each sample in the clusters from integrated input data \code{dataList} through two processing stages:
#'
#' 1. Stage I: The algorithm first partitions each data type using the function \code{PerturbationClustering}.
#' It then merges the connectivities across data types into similarity matrices.
#' Both kmeans and similarity-based clustering algorithms - partitioning around medoids \code{pam} are used to partition the built similarity.
#' The algorithm returns the partitioning that agrees the most with individual data types.\cr
#' 2. Stage II: The algorithm attempts to split each discovered group if there is a strong agreement between data types,
#' or if the subtyping in Stage I is very unbalanced.
#'
#' When clustering a large number of samples, this function uses a subsampling technique to reduce the computational complexity with the two parameters \code{sampledSetSize} and \code{knn.k}. Please consult Nguyen et al. (2021) for details.
#'
#' @return
#'
#' \code{SubtypingOmicsData} returns a list with at least the following components:
#' \item{cluster1}{A vector of labels indicating the cluster to which each sample is allocated in Stage I}
#' \item{cluster2}{A vector of labels indicating the cluster to which each sample is allocated in Stage II}
#' \item{dataTypeResult}{A list of results for individual data type. Each element of the list is the result of the \code{PerturbationClustering} for the corresponding data matrix provided in dataList.}
#'
#'
#' @references
#'
#' 1. H Nguyen, S Shrestha, S Draghici, & T Nguyen. PINSPlus: a tool for tumor subtype discovery in integrated genomic data. Bioinformatics, 35(16), 2843-2846, (2019).
#'
#' 2. T Nguyen, R Tagett, D Diaz, S Draghici. A novel method for data integration and disease subtyping. Genome Research, 27(12):2025-2039, 2017.
#'
#' 3. T. Nguyen, "Horizontal and vertical integration of bio-molecular data", PhD thesis, Wayne State University, 2017.
#'
#' 4. H Nguyen, D Tran, B Tran, M Roy, A Cassell, S Dascalu, S Draghici & T Nguyen. SMRT: Randomized Data Transformation for Cancer Subtyping and Big Data Analysis. Frontiers in oncology. 2021.
#'
#' @seealso \code{\link{PerturbationClustering}}
#'
#' @examples
#' \donttest{
#' # Load the kidney cancer carcinoma data
#' data(KIRC)
#'
#' # Perform subtyping on the multi-omics data
#' dataList <- list (as.matrix(KIRC$GE), as.matrix(KIRC$ME), as.matrix(KIRC$MI))
#' names(dataList) <- c("GE", "ME", "MI")
#' result <- SubtypingOmicsData(dataList = dataList)
#'
#' # Change Pertubation clustering algorithm's arguments
#' result <- SubtypingOmicsData(
#' dataList = dataList,
#' clusteringMethod = "kmeans",
#' clusteringOptions = list(nstart = 50)
#' )
#'
#' # Plot the Kaplan-Meier curves and calculate Cox p-value
#' library(survival)
#' cluster1=result$cluster1;cluster2=result$cluster2
#' a <- intersect(unique(cluster2), unique(cluster1))
#' names(a) <- intersect(unique(cluster2), unique(cluster1))
#' a[setdiff(unique(cluster2), unique(cluster1))] <- seq(setdiff(unique(cluster2), unique(cluster1)))
#' + max(cluster1)
#' colors <- a[levels(factor(cluster2))]
#' coxFit <- coxph(
#' Surv(time = Survival, event = Death) ~ as.factor(cluster2),
#' data = KIRC$survival,
#' ties = "exact"
#' )
#' mfit <- survfit(Surv(Survival, Death == 1) ~ as.factor(cluster2), data = KIRC$survival)
#' plot(
#' mfit, col = colors,
#' main = "Survival curves for KIRC, level 2",
#' xlab = "Days", ylab = "Survival",lwd = 2
#' )
#' legend("bottomright",
#' legend = paste(
#' "Cox p-value:",
#' round(summary(coxFit)$sctest[3], digits = 5),
#' sep = ""
#' )
#' )
#' legend(
#' "bottomleft",
#' fill = colors,
#' legend = paste(
#' "Group ",
#' levels(factor(cluster2)),": ", table(cluster2)[levels(factor(cluster2))],
#' sep =""
#' )
#' )
#'
#' }
#' @importFrom FNN knnx.index
#' @importFrom entropy entropy
#' @importFrom impute impute.knn
#' @export
SubtypingOmicsData <- function (dataList, kMin = 2, kMax = 5, k = NULL, agreementCutoff = 0.5, ncore = 1, verbose = T, sampledSetSize = 2000, knn.k = NULL, ...) {
now = Sys.time()
# defined log function
mlog <- if(!verbose) function(...){} else function(...){
message(...)
flush.console()
}
dataListComplete <- dataList
commonSamples <- Reduce(f = intersect, x = lapply(dataList, rownames))
dataList <- lapply(dataList, function(d) d[commonSamples, ])
notCommonData <- lapply(dataListComplete, function(d) {
rn <- rownames(d)[(!rownames(d) %in% commonSamples)]
d <- matrix(d[rn, ], ncol = ncol(d))
rownames(d) <- rn
d
})
dataListTrain <- NULL
dataListTest <- NULL
seed = round(rnorm(1)*10^6)
dataList <- lapply(dataList, as.data.frame)
if (nrow(dataList[[1]]) > sampledSetSize) {
n_samples <- nrow(dataList[[1]])
ind <- sample.int(n_samples, size = sampledSetSize)
dataListTrain <- lapply(dataList, function(x) x[ind, ])
dataListTest <- lapply(dataList, function(x) x[-ind, , drop=F])
dataList <- dataListTrain
}
runPerturbationClustering <- function(dataList, kMin, kMax, stage = 1, forceSplit = FALSE, k = NULL){
dataTypeResult <- lapply(dataList, function(data) {
set.seed(seed)
data <- as.matrix(data)
data <- data[rowSums(is.na(data)) == 0, ]
PerturbationClustering(data, kMin, kMax, ncore = ncore, verbose = verbose,...)
})
# origList <- lapply(dataTypeResult, function(r) r$origS[[r$k]])
# orig = Reduce('+', origList)/length(origList)
# PW = Reduce('*', origList)
# agreement = (sum(orig == 0) + sum(orig == 1) - nrow(orig)) / (nrow(orig) ^ 2 - nrow(orig))
#
# pert = Reduce('+', lapply(dataTypeResult, function(r) r$pertS[[r$k]]))/length(dataList)
allSamples <- unique(unlist(lapply(dataList, rownames)))
origList <- lapply(dataTypeResult, function(r) r$origS[[r$k]])
origMerged <- do.call(
what = rbind,
args = lapply(origList, function(o){
o <- as.data.frame(o)
as.numeric(as.matrix(t(as.data.frame(t(o[allSamples, ]))[allSamples, ])))
})
)
orig = matrix(colMeans(origMerged, na.rm = T), nrow = length(allSamples))
rownames(orig) <- colnames(orig) <- allSamples
orig <- impute::impute.knn(orig)$data
orig[is.na(orig)] <- 0
PW = matrix(as.numeric(colSums(origMerged == 0, na.rm = T) == 0), nrow = length(allSamples))
rownames(PW) <- colnames(PW) <- allSamples
agreement = (sum(orig == 0) + sum(orig == 1) - nrow(orig)) / (nrow(orig) ^ 2 - nrow(orig))
pertList <- lapply(dataTypeResult, function(r) r$pertS[[r$k]])
pertMerged <- do.call(
what = rbind,
args = lapply(pertList, function(p){
p <- as.data.frame(p)
as.numeric(as.matrix(t(as.data.frame(t(p[allSamples, ]))[allSamples, ])))
})
)
pert = matrix(colMeans(pertMerged, na.rm = T), nrow = length(allSamples))
rownames(pert) <- colnames(pert) <- allSamples
pert <- impute::impute.knn(pert)$data
pert[is.na(pert)] <- 0
groups <- NULL
mlog("STAGE : ", stage, "\t Agreement : ", agreement)
if (agreement >= agreementCutoff | forceSplit){
hcW <- hclust(dist(PW))
maxK = min(kMax*2, dim(unique(PW, MARGIN = 2))[2] - (stage - 1))
maxHeight = FindMaxHeight(hcW, maxK = min(2*maxK, 10))
groups <- cutree(hcW, maxHeight)
# if k is specific then only use that k if the number of groups > k
# the max(groups) < k, this may cause small groups
if (!is.null(k) && max(groups) > k){
groups <- cutree(hcW, k)
}
}
list(dataTypeResult = dataTypeResult, orig = orig, pert = pert, PW = PW, groups = groups, agreement = agreement)
}
pResult <- runPerturbationClustering(dataList, kMin, kMax, k = k)
groups <- pResult$groups
groups2 <- NULL
if (!is.null(groups)) {
groups2 <- groups
if (is.null(k)){
for (g in sort(unique(groups))) {
miniGroup <- names(groups[groups == g])
if (length(miniGroup) > 30) {
groupsM <- runPerturbationClustering(dataList = lapply(dataList, function(d) d[miniGroup, ]), kMin = kMin, kMax = min(kMax, 5), stage = 2)$groups
if (!is.null(groupsM))
groups2[miniGroup] <- paste(g, groupsM, sep = "-")
}
}
} else {
agreements <- rep(1, length(groups))
names(agreements) <- names(groups)
# if k is specific then force further split
tbl <- sort(table(groups2), decreasing = T)
minGroupSize <- 30
while (length(unique(groups2)) < k){
if (all(tbl <= 30)) {
minGroupSize <- 10
}
# cannot split anymore
if (all(tbl <= 10)) break()
for (g in names(tbl)){
miniGroup <- names(groups2[groups2 == g])
if (length(miniGroup) > minGroupSize) {
splitRes <- runPerturbationClustering(dataList = lapply(dataList, function(d) d[miniGroup, ]), kMin = kMin, kMax = min(kMax, 5), stage = 2, forceSplit = T)
groupsM <- splitRes$groups
if (!is.null(groupsM)){
groups2[miniGroup] <- paste(g, groupsM, sep = "-")
agreements[miniGroup] <- splitRes$agreement
}
}
}
tbl <- sort(table(groups2), decreasing = T)
}
# now after further splitting, the number of groups can be > k
# need to merge cluster based on their agreement
agreements.unique = unique(agreements)
for (aggr in sort(unique(agreements))){
if (length(unique(groups2)) == k) break()
merge.group <- agreements == aggr
k.smallGroup <- length(unique(groups2[merge.group]))
k.need <- k - (length(unique(groups2)) - k.smallGroup + 1) + 1
groups2[merge.group] <- unlist(lapply(strsplit(groups2[merge.group], "-"), function(g){
paste0(g[1:(length(g)-1)], collapse = "-")
}))
if (k.need > 1){
splitRes <- runPerturbationClustering(dataList = lapply(dataList, function(d) d[miniGroup, ]),
kMin = kMin, kMax = min(kMax, 5), stage = 2, forceSplit = T,
k = k.need)
groupsM <- splitRes$groups
groups2[merge.group] <- paste(groups2[merge.group], groupsM, sep = "-")
}
agreements[merge.group] <- 1
}
}
} else{
set.seed(seed)
orig <- pResult$orig
dataTypeResult <- pResult$dataTypeResult
clusteringAlgorithm = GetClusteringAlgorithm(...)$fun
groupings <- lapply(dataTypeResult, function(r) clusteringAlgorithm(data = r$origS[[r$k]], k = r$k))
pGroups <- ClusterUsingPAM(orig = orig, kMax = kMax*2, groupings = groupings)
hGroups <- ClusterUsingHierarchical(orig = orig, kMax = kMax*2, groupings = groupings)
pAgree = pGroups$agree; hAgree = hGroups$agree;
groups <- (if (pAgree > hAgree) pGroups else if (hAgree > pAgree) hGroups else {
pAgree = ClusterUsingPAM(orig = pResult$pert, kMax = kMax, groupings = groupings)$agree
hAgree = ClusterUsingHierarchical(orig = pResult$pert, kMax = kMax, groupings = groupings)$agree
if (hAgree - pAgree >= 1e-3) hGroups else pGroups
})$cluster
names(groups) <- rownames(orig)
groups2 <- groups
if (is.null(k)){
mlog("Check if can proceed to stage II")
normalizedEntropy = entropy::entropy(table(groups)) / log(length(unique(groups)), exp(1))
if (normalizedEntropy < 0.5) {
for (g in sort(unique(groups))) {
miniGroup <- names(groups[groups == g])
#this is just to make sure we don't split a group that is already very small
if (length(miniGroup) > 30) {
#this is to check if the data types in this group can be split
groupsM <- runPerturbationClustering(dataList = lapply(dataList, function(d) d[miniGroup, ]),kMin = kMin, kMax = min(kMax, 5), stage = 2, forceSplit = T, k = NULL)$groups
if (!is.null(groupsM))
groups2[miniGroup] <- paste(g, groupsM, sep = "-")
}
}
}
} else {
if (length(unique(groups2)) > k){
pGroups <- ClusterUsingPAM(orig = orig, kMax = kMax*2, groupings = groupings, k)
hGroups <- ClusterUsingHierarchical(orig = orig, kMax = kMax*2, groupings = groupings, k)
pAgree = pGroups$agree; hAgree = hGroups$agree;
groups <- (if (pAgree > hAgree) pGroups else if (hAgree > pAgree) hGroups else {
pAgree = ClusterUsingPAM(orig = pResult$pert, kMax = kMax, groupings = groupings, k)$agree
hAgree = ClusterUsingHierarchical(orig = pResult$pert, kMax = kMax, groupings = groupings, k)$agree
if (hAgree - pAgree >= 1e-3) hGroups else pGroups
})$cluster
names(groups) <- rownames(orig)
groups2 <- groups
} else if (length(unique(groups2)) < k){
# split like normal using entropy
normalizedEntropy = entropy::entropy(table(groups)) / log(length(unique(groups)), exp(1))
agreements <- rep(1, length(groups))
names(agreements) <- names(groups)
if (normalizedEntropy < 0.5) {
for (g in sort(unique(groups))) {
miniGroup <- names(groups[groups == g])
#this is just to make sure we don't split a group that is already very small
if (length(miniGroup) > 30) {
#this is to check if the data types in this group can be split
splitRes <- runPerturbationClustering(dataList = lapply(dataList, function(d) d[miniGroup, ]), kMin = kMin, kMax = min(kMax, 5), stage = 2, forceSplit = T)
groupsM <- splitRes$groups
if (!is.null(groupsM)){
groups2[miniGroup] <- paste(g, groupsM, sep = "-")
agreements[miniGroup] <- splitRes$agreement
}
}
}
}
# if the number of group is still less than k, then force split
if (length(unique(groups2)) < k){
# if k is specific then force further split
tbl <- sort(table(groups2), decreasing = T)
minGroupSize <- 30
while (length(unique(groups2)) < k){
if (all(tbl <= 30)) {
minGroupSize <- 10
}
# cannot split anymore
if (all(tbl <= 10)) break()
for (g in names(tbl)){
miniGroup <- names(groups2[groups2 == g])
if (length(miniGroup) > minGroupSize) {
splitRes <- runPerturbationClustering(dataList = lapply(dataList, function(d) d[miniGroup, ]), kMin = kMin, kMax = min(kMax, 5), stage = 2, forceSplit = T)
groupsM <- splitRes$groups
if (!is.null(groupsM)){
groups2[miniGroup] <- paste(g, groupsM, sep = "-")
agreements[miniGroup] <- splitRes$agreement
}
}
}
tbl <- sort(table(groups2), decreasing = T)
}
}
# now after further splitting, the number of groups can be > k
# need to merge cluster based on their aggrement
agreements.unique = unique(agreements)
for (aggr in sort(unique(agreements))){
if (length(unique(groups2)) == k) break()
merge.group <- agreements == aggr
k.smallGroup <- length(unique(groups2[merge.group]))
k.need <- k - (length(unique(groups2)) - k.smallGroup + 1) + 1
groups2[merge.group] <- unlist(lapply(strsplit(groups2[merge.group], "-"), function(g){
paste0(g[1:(length(g)-1)], collapse = "-")
}))
if (k.need > 1){
splitRes <- runPerturbationClustering(dataList = lapply(dataList, function(d) d[merge.group, ]),
kMin = kMin, kMax = min(kMax, 5), stage = 2, forceSplit = T,
k = k.need)
groupsM <- splitRes$groups
groups2[merge.group] <- paste(groups2[merge.group], groupsM, sep = "-")
}
agreements[merge.group] <- 1
}
}
}
}
{
train_y <- groups
train_y2 <- groups2
if(!is.null(dataListTest)) {
set.seed(seed)
RcppParallel::setThreadOptions(ncore)
test_prob <- matrix(0, nrow = n_samples - sampledSetSize, ncol = length(unique(groups)))
if(!is.null(train_y2))
{
test_prob2 <- matrix(0, nrow = n_samples - sampledSetSize, ncol = length(unique(groups2)))
}
for (i in 1:length(dataListTrain)) {
train <- dataListTrain[[i]]
test <- dataListTest[[i]]
if(ncol(train)*nrow(train) > 2e7) {
pca <- pResult$dataTypeResult[[i]]$pca
} else {
pca <- rpca.para(train, min(nrow(data), 20), scale = F)
}
train <- pca$x
test <- predict.rpca.para(pca, test)
test_prob <- test_prob + classifierProb(train, groups, test, knn.k)
if(!is.null(train_y2)){
test_prob2 <- test_prob2 + classifierProb(train, groups2, test, knn.k)
}
# nn_index <- FNN::knnx.index(train, test, k = 10)
# nn_group <- matrix(train_y[nn_index], ncol = 10)
#
# for (j in 1:nrow(test_prob)) {
# tmp <- table(nn_group[j, ])
# test_prob[j, as.numeric(names(tmp))] <- test_prob[j, as.numeric(names(tmp))] + as.numeric(tmp)/10
# }
#
# if(!is.null(train_y2))
# {
# nn_group2 <- matrix(train_y2[nn_index], ncol = 10)
#
# for (j in 1:nrow(test_prob)) {
# tmp <- table(nn_group2[j, ])
# test_prob2[j, as.numeric(names(tmp))] <- test_prob2[j, as.numeric(names(tmp))] + as.numeric(tmp)/10
# }
# }
}
test_y <- apply(test_prob, 1, which.max)
groups <- rep(0, n_samples)
groups[ind] <- train_y
groups[-ind] <- test_y
if(!is.null(train_y2)) {
test_y2 <- apply(test_prob2, 1, which.max)
groups2 <- rep(0, n_samples)
groups2[ind] <- train_y2
groups2[-ind] <- test_y2
}
}
}
# for not common data
{
# if data is big, now groups include the testing
train_y <- groups
train_y2 <- groups2
allSamples <- unique(unlist(lapply(dataListComplete, rownames)))
n_samples <- length(allSamples)
notCommonSamples <- allSamples[!(allSamples %in% commonSamples)]
if(length(allSamples) > length(commonSamples)) {
set.seed(seed)
RcppParallel::setThreadOptions(ncore)
# if dataListTrain is not null this means the data is big
if (!is.null(dataListTrain)){
dataListTrain <- lapply(1:length(dataList), function(i){
as.matrix(rbind(dataList[[i]], dataListTrain[[i]]))
})
} else {
dataListTrain <- dataList
}
dataListTest <- notCommonData
test_prob <- matrix(0, nrow = length(notCommonSamples), ncol = length(unique(groups)))
rownames(test_prob) <- notCommonSamples
colnames(test_prob) <- unique(groups)
if(!is.null(train_y2))
{
test_prob2 <- matrix(0, nrow = length(notCommonSamples), ncol = length(unique(groups2)))
rownames(test_prob2) <- notCommonSamples
colnames(test_prob2) <- unique(groups2)
}
for (i in 1:length(dataListTrain)) {
train <- dataListTrain[[i]]
test <- dataListTest[[i]]
if (nrow(test) == 0) next()
pca <- rpca.para(train, min(nrow(train), 20), scale = F)
train <- pca$x
test <- predict.rpca.para(pca, test)
test_prob_tmp <- classifierProb(train, groups, test, knn.k)
test_prob[rownames(test_prob_tmp), ] <- test_prob[rownames(test_prob_tmp), ] + test_prob_tmp
if(!is.null(train_y2)){
test_prob_tmp2 <- classifierProb(train, groups2, test, knn.k)
test_prob2[rownames(test_prob_tmp2), ] <- test_prob2[rownames(test_prob_tmp2), ] + test_prob_tmp2
}
}
test_y <- colnames(test_prob)[apply(test_prob, 1, which.max)]
groups <- rep(0, n_samples)
names(groups) <- allSamples
groups[commonSamples] <- train_y
groups[notCommonSamples] <- test_y
if(!is.null(train_y2)) {
test_y2 <- colnames(test_prob2)[apply(test_prob2, 1, which.max)]
groups2 <- rep(0, n_samples)
names(groups2) <- allSamples
groups2[commonSamples] <- train_y2
groups2[notCommonSamples] <- test_y2
}
}
}
timediff = Sys.time() - now;
mlog("Done in ", timediff, " ", units(timediff), ".\n")
list(
cluster1 = groups,
cluster2 = groups2,
dataTypeResult = pResult$dataTypeResult
)
} |
\name{south_carolina.cdp}
\Rdversion{1.1}
\alias{south_carolina.cdp}
\docType{data}
\title{
south_carolina.cdp
}
\description{
south_carolina.cdp is a \code{\link[sp:SpatialPolygonsDataFrame]{SpatialPolygonsDataFrame}} with polygons made from the 2000 US Census tiger/line boundary files (\url{http://www.census.gov/geo/www/tiger/}) for Census Designated Places (CDP). It also contains 86 variables from the Summary File 1 (SF 1) which contains the 100-percent data (\url{http://www.census.gov/prod/cen2000/doc/sf1.pdf}).
All polygons are projected in CRS("+proj=longlat +datum=NAD83")
}
\usage{data(south_carolina.cdp)}
%\format{
%}
\details{
\bold{ID Variables} \cr
\tabular{ll}{
data field name \tab Full Description \cr
place \tab FIPS code \cr
state \tab State FIPS code \cr
name \tab CDP name \cr
designation \tab city/town/CDP/etc.
}
\bold{Census Variables} \cr
\tabular{lll}{
Census SF1 Field Name \tab data field name \tab Full Description \cr
(P007001) \tab pop2000 \tab population 2000 \cr
(P007002) \tab white \tab white alone \cr
(P007003) \tab black \tab black or african american alone \cr
(P007004) \tab ameri.es \tab american indian and alaska native alone \cr
(P007005) \tab asian \tab asian alone \cr
(P007006) \tab hawn.pi \tab native hawaiian and other pacific islander alone \cr
(P007007) \tab other \tab some other race alone \cr
(P007008) \tab mult.race \tab 2 or more races \cr
(P011001) \tab hispanic \tab people who are hispanic or latino \cr
(P008002) \tab not.hispanic.t \tab Not Hispanic or Latino \cr
(P008003) \tab nh.white \tab White alone \cr
(P008004) \tab nh.black \tab Black or African American alone \cr
(P008005) \tab nh.ameri.es \tab American Indian and Alaska Native alone \cr
(P008006) \tab nh.asian \tab Asian alone \cr
(P008007) \tab nh.hawn.pi \tab Native Hawaiian and Other Pacific Islander alone \cr
(P008008) \tab nh.other \tab Some other race alone \cr
(P008010) \tab hispanic.t \tab Hispanic or Latino \cr
(P008011) \tab h.white \tab White alone \cr
(P008012) \tab h.black \tab Black or African American alone \cr
(P008013) \tab h.american.es \tab American Indian and Alaska Native alone \cr
(P008014) \tab h.asian \tab Asian alone \cr
(P008015) \tab h.hawn.pi \tab Native Hawaiian and Other Pacific Islander alone \cr
(P008016) \tab h.other \tab Some other race alone \cr
(P012002) \tab males \tab males \cr
(P012026) \tab females \tab females \cr
(P012003 + P012027) \tab age.under5 \tab male and female under 5 yrs \cr
(P012004-006 + P012028-030) \tab age.5.17 \tab male and female 5 to 17 yrs \cr
(P012007-009 + P012031-033) \tab age.18.21 \tab male and female 18 to 21 yrs \cr
(P012010-011 + P012034-035) \tab age.22.29 \tab male and female 22 to 29 yrs \cr
(P012012-013 + P012036-037) \tab age.30.39 \tab male and female 30 to 39 yrs \cr
(P012014-015 + P012038-039) \tab age.40.49 \tab male and female 40 to 49 yrs \cr
(P012016-019 + P012040-043) \tab age.50.64 \tab male and female 50 to 64 yrs \cr
(P012020-025 + P012044-049) \tab age.65.up \tab male and female 65 yrs and over \cr
(P013001) \tab med.age \tab median age, both sexes \cr
(P013002) \tab med.age.m \tab median age, males \cr
(P013003) \tab med.age.f \tab median age, females \cr
(P015001) \tab households \tab households \cr
(P017001) \tab ave.hh.sz \tab average household size \cr
(P018003) \tab hsehld.1.m \tab 1-person household, male householder \cr
(P018004) \tab hsehld.1.f \tab 1-person household, female householder \cr
(P018008) \tab marhh.chd \tab family households, married-couple family, w/ own children under 18 yrs \cr
(P018009) \tab marhh.no.c \tab family households, married-couple family, no own children under 18 yrs \cr
(P018012) \tab mhh.child \tab family households, other family, male householder, no wife present, w/ own children under 18 yrs \cr
(P018015) \tab fhh.child \tab family households, other family, female householder, no husband present, w/ own children under 18 yrs \cr
(H001001) \tab hh.units \tab housng units total \cr
(H002002) \tab hh.urban \tab urban housing units \cr
(H002005) \tab hh.rural \tab rural housing units \cr
(H003002) \tab hh.occupied \tab occupied housing units \cr
(H003003) \tab hh.vacant \tab vacant housing units \cr
(H004002) \tab hh.owner \tab owner occupied housing units \cr
(H004003) \tab hh.renter \tab renter occupied housing units \cr
(H013002) \tab hh.1person \tab 1-person household \cr
(H013003) \tab hh.2person \tab 2-person household \cr
(H013004) \tab hh.3person \tab 3-person household \cr
(H013005) \tab hh.4person \tab 4-person household \cr
(H013006) \tab hh.5person \tab 5-person household \cr
(H013007) \tab hh.6person \tab 6-person household \cr
(H013008) \tab hh.7person \tab 7-person household \cr
(H015I003)+(H015I011) \tab hh.nh.white.1p \tab (white only, not hispanic ) 1-person household \cr
(H015I004)+(H015I012) \tab hh.nh.white.2p \tab (white only, not hispanic ) 2-person household \cr
(H015I005)+(H015I013) \tab hh.nh.white.3p \tab (white only, not hispanic ) 3-person household \cr
(H015I006)+(H015I014) \tab hh.nh.white.4p \tab (white only, not hispanic ) 4-person household \cr
(H015I007)+(H015I015) \tab hh.nh.white.5p \tab (white only, not hispanic ) 5-person household \cr
(H015I008)+(H015I016) \tab hh.nh.white.6p \tab (white only, not hispanic ) 6-person household \cr
(H015I009)+(H015I017) \tab hh.nh.white.7p \tab (white only, not hispanic ) 7-person household \cr
(H015H003)+(H015H011) \tab hh.hisp.1p \tab (hispanic) 1-person household \cr
(H015H004)+(H015H012) \tab hh.hisp.2p \tab (hispanic) 2-person household \cr
(H015H005)+(H015H013) \tab hh.hisp.3p \tab (hispanic) 3-person household \cr
(H015H006)+(H015H014) \tab hh.hisp.4p \tab (hispanic) 4-person household \cr
(H015H007)+(H015H015) \tab hh.hisp.5p \tab (hispanic) 5-person household \cr
(H015H008)+(H015H016) \tab hh.hisp.6p \tab (hispanic) 6-person household \cr
(H015H009)+(H015H017) \tab hh.hisp.7p \tab (hispanic) 7-person household \cr
(H015B003)+(H015B011) \tab hh.black.1p \tab (black) 1-person household \cr
(H015B004)+(H015B012) \tab hh.black.2p \tab (black) 2-person household \cr
(H015B005)+(H015B013) \tab hh.black.3p \tab (black) 3-person household \cr
(H015B006)+(H015B014) \tab hh.black.4p \tab (black) 4-person household \cr
(H015B007)+(H015B015) \tab hh.black.5p \tab (black) 5-person household \cr
(H015B008)+(H015B016) \tab hh.black.6p \tab (black) 6-person household \cr
(H015B009)+(H015B017) \tab hh.black.7p \tab (black) 7-person household \cr
(H015D003)+(H015D011) \tab hh.asian.1p \tab (asian) 1-person household \cr
(H015D004)+(H015D012) \tab hh.asian.2p \tab (asian) 2-person household \cr
(H015D005)+(H015D013) \tab hh.asian.3p \tab (asian) 3-person household \cr
(H015D006)+(H015D014) \tab hh.asian.4p \tab (asian) 4-person household \cr
(H015D007)+(H015D015) \tab hh.asian.5p \tab (asian) 5-person household \cr
(H015D008)+(H015D016) \tab hh.asian.6p \tab (asian) 6-person household \cr
(H015D009)+(H015D017) \tab hh.asian.7p \tab (asian) 7-person household \cr
}
}
\source{
Census 2000 Summary File 1 [name of state1 or United States]/prepared by the U.S. Census
Bureau, 2001.
}
\references{
\url{http://www.census.gov/ }\cr
\url{http://www2.census.gov/cgi-bin/shapefiles/national-files} \cr
\url{http://www.census.gov/prod/cen2000/doc/sf1.pdf} \cr
}
\examples{
data(south_carolina.cdp)
############################################
## Helper function for handling coloring of the map
############################################
color.map<- function(x,dem,y=NULL){
l.poly<-length(x@polygons)
dem.num<- cut(dem ,breaks=ceiling(quantile(dem)),dig.lab = 6)
dem.num[which(is.na(dem.num)==TRUE)]<-levels(dem.num)[1]
l.uc<-length(table(dem.num))
if(is.null(y)){
col.heat<-heat.colors(16)[c(14,8,4,1)] ##fixed set of four colors
}else{
col.heat<-y
}
dem.col<-cbind(col.heat,names(table(dem.num)))
colors.dem<-vector(length=l.poly)
for(i in 1:l.uc){
colors.dem[which(dem.num==dem.col[i,2])]<-dem.col[i,1]
}
out<-list(colors=colors.dem,dem.cut=dem.col[,2],table.colors=dem.col[,1])
return(out)
}
############################################
## Helper function for handling coloring of the map
############################################
colors.use<-color.map(south_carolina.cdp,south_carolina.cdp$pop2000)
plot(south_carolina.cdp,col=colors.use$colors)
#text(coordinates(south_carolina.cdp),south_carolina.cdp$name,cex=.3)
title(main="Census Designated Places \n of South_carolina, 2000", sub="Quantiles (equal frequency)")
legend("bottomright",legend=colors.use$dem.cut,fill=colors.use$table.colors,bty="o",title="Population Count",bg="white")
###############################
### Alternative way to do the above
###############################
\dontrun{
####This example requires the following additional libraries
library(RColorBrewer)
library(classInt)
library(maps)
####This example requires the following additional libraries
data(south_carolina.cdp)
map('state',region='south_carolina')
plotvar <- south_carolina.cdp$pop2000
nclr <- 4
#BuPu
plotclr <- brewer.pal(nclr,"BuPu")
class <- classIntervals(plotvar, nclr, style="quantile")
colcode <- findColours(class, plotclr)
plot(south_carolina.cdp, col=colcode, border="transparent",add=TRUE)
#transparent
title(main="Census Designated Places\n of South_carolina, 2000", sub="Quantiles (equal frequency)")
map.text("county", "south_carolina",cex=.7,add=TRUE)
map('county','south_carolina',add=TRUE)
legend("bottomright","(x,y)", legend=names(attr(colcode, "table")),fill=attr(colcode, "palette"),
cex=0.9, bty="o", title="Population Count",bg="white")
}
}
\keyword{datasets}
| /man/south_carolina.cdp.Rd | no_license | cran/UScensus2000cdp | R | false | false | 9,642 | rd | \name{south_carolina.cdp}
\Rdversion{1.1}
\alias{south_carolina.cdp}
\docType{data}
\title{
south_carolina.cdp
}
\description{
south_carolina.cdp is a \code{\link[sp:SpatialPolygonsDataFrame]{SpatialPolygonsDataFrame}} with polygons made from the 2000 US Census tiger/line boundary files (\url{http://www.census.gov/geo/www/tiger/}) for Census Designated Places (CDP). It also contains 86 variables from the Summary File 1 (SF 1) which contains the 100-percent data (\url{http://www.census.gov/prod/cen2000/doc/sf1.pdf}).
All polygons are projected in CRS("+proj=longlat +datum=NAD83")
}
\usage{data(south_carolina.cdp)}
%\format{
%}
\details{
\bold{ID Variables} \cr
\tabular{ll}{
data field name \tab Full Description \cr
place \tab FIPS code \cr
state \tab State FIPS code \cr
name \tab CDP name \cr
designation \tab city/town/CDP/etc.
}
\bold{Census Variables} \cr
\tabular{lll}{
Census SF1 Field Name \tab data field name \tab Full Description \cr
(P007001) \tab pop2000 \tab population 2000 \cr
(P007002) \tab white \tab white alone \cr
(P007003) \tab black \tab black or african american alone \cr
(P007004) \tab ameri.es \tab american indian and alaska native alone \cr
(P007005) \tab asian \tab asian alone \cr
(P007006) \tab hawn.pi \tab native hawaiian and other pacific islander alone \cr
(P007007) \tab other \tab some other race alone \cr
(P007008) \tab mult.race \tab 2 or more races \cr
(P011001) \tab hispanic \tab people who are hispanic or latino \cr
(P008002) \tab not.hispanic.t \tab Not Hispanic or Latino \cr
(P008003) \tab nh.white \tab White alone \cr
(P008004) \tab nh.black \tab Black or African American alone \cr
(P008005) \tab nh.ameri.es \tab American Indian and Alaska Native alone \cr
(P008006) \tab nh.asian \tab Asian alone \cr
(P008007) \tab nh.hawn.pi \tab Native Hawaiian and Other Pacific Islander alone \cr
(P008008) \tab nh.other \tab Some other race alone \cr
(P008010) \tab hispanic.t \tab Hispanic or Latino \cr
(P008011) \tab h.white \tab White alone \cr
(P008012) \tab h.black \tab Black or African American alone \cr
(P008013) \tab h.american.es \tab American Indian and Alaska Native alone \cr
(P008014) \tab h.asian \tab Asian alone \cr
(P008015) \tab h.hawn.pi \tab Native Hawaiian and Other Pacific Islander alone \cr
(P008016) \tab h.other \tab Some other race alone \cr
(P012002) \tab males \tab males \cr
(P012026) \tab females \tab females \cr
(P012003 + P012027) \tab age.under5 \tab male and female under 5 yrs \cr
(P012004-006 + P012028-030) \tab age.5.17 \tab male and female 5 to 17 yrs \cr
(P012007-009 + P012031-033) \tab age.18.21 \tab male and female 18 to 21 yrs \cr
(P012010-011 + P012034-035) \tab age.22.29 \tab male and female 22 to 29 yrs \cr
(P012012-013 + P012036-037) \tab age.30.39 \tab male and female 30 to 39 yrs \cr
(P012014-015 + P012038-039) \tab age.40.49 \tab male and female 40 to 49 yrs \cr
(P012016-019 + P012040-043) \tab age.50.64 \tab male and female 50 to 64 yrs \cr
(P012020-025 + P012044-049) \tab age.65.up \tab male and female 65 yrs and over \cr
(P013001) \tab med.age \tab median age, both sexes \cr
(P013002) \tab med.age.m \tab median age, males \cr
(P013003) \tab med.age.f \tab median age, females \cr
(P015001) \tab households \tab households \cr
(P017001) \tab ave.hh.sz \tab average household size \cr
(P018003) \tab hsehld.1.m \tab 1-person household, male householder \cr
(P018004) \tab hsehld.1.f \tab 1-person household, female householder \cr
(P018008) \tab marhh.chd \tab family households, married-couple family, w/ own children under 18 yrs \cr
(P018009) \tab marhh.no.c \tab family households, married-couple family, no own children under 18 yrs \cr
(P018012) \tab mhh.child \tab family households, other family, male householder, no wife present, w/ own children under 18 yrs \cr
(P018015) \tab fhh.child \tab family households, other family, female householder, no husband present, w/ own children under 18 yrs \cr
(H001001) \tab hh.units \tab housng units total \cr
(H002002) \tab hh.urban \tab urban housing units \cr
(H002005) \tab hh.rural \tab rural housing units \cr
(H003002) \tab hh.occupied \tab occupied housing units \cr
(H003003) \tab hh.vacant \tab vacant housing units \cr
(H004002) \tab hh.owner \tab owner occupied housing units \cr
(H004003) \tab hh.renter \tab renter occupied housing units \cr
(H013002) \tab hh.1person \tab 1-person household \cr
(H013003) \tab hh.2person \tab 2-person household \cr
(H013004) \tab hh.3person \tab 3-person household \cr
(H013005) \tab hh.4person \tab 4-person household \cr
(H013006) \tab hh.5person \tab 5-person household \cr
(H013007) \tab hh.6person \tab 6-person household \cr
(H013008) \tab hh.7person \tab 7-person household \cr
(H015I003)+(H015I011) \tab hh.nh.white.1p \tab (white only, not hispanic ) 1-person household \cr
(H015I004)+(H015I012) \tab hh.nh.white.2p \tab (white only, not hispanic ) 2-person household \cr
(H015I005)+(H015I013) \tab hh.nh.white.3p \tab (white only, not hispanic ) 3-person household \cr
(H015I006)+(H015I014) \tab hh.nh.white.4p \tab (white only, not hispanic ) 4-person household \cr
(H015I007)+(H015I015) \tab hh.nh.white.5p \tab (white only, not hispanic ) 5-person household \cr
(H015I008)+(H015I016) \tab hh.nh.white.6p \tab (white only, not hispanic ) 6-person household \cr
(H015I009)+(H015I017) \tab hh.nh.white.7p \tab (white only, not hispanic ) 7-person household \cr
(H015H003)+(H015H011) \tab hh.hisp.1p \tab (hispanic) 1-person household \cr
(H015H004)+(H015H012) \tab hh.hisp.2p \tab (hispanic) 2-person household \cr
(H015H005)+(H015H013) \tab hh.hisp.3p \tab (hispanic) 3-person household \cr
(H015H006)+(H015H014) \tab hh.hisp.4p \tab (hispanic) 4-person household \cr
(H015H007)+(H015H015) \tab hh.hisp.5p \tab (hispanic) 5-person household \cr
(H015H008)+(H015H016) \tab hh.hisp.6p \tab (hispanic) 6-person household \cr
(H015H009)+(H015H017) \tab hh.hisp.7p \tab (hispanic) 7-person household \cr
(H015B003)+(H015B011) \tab hh.black.1p \tab (black) 1-person household \cr
(H015B004)+(H015B012) \tab hh.black.2p \tab (black) 2-person household \cr
(H015B005)+(H015B013) \tab hh.black.3p \tab (black) 3-person household \cr
(H015B006)+(H015B014) \tab hh.black.4p \tab (black) 4-person household \cr
(H015B007)+(H015B015) \tab hh.black.5p \tab (black) 5-person household \cr
(H015B008)+(H015B016) \tab hh.black.6p \tab (black) 6-person household \cr
(H015B009)+(H015B017) \tab hh.black.7p \tab (black) 7-person household \cr
(H015D003)+(H015D011) \tab hh.asian.1p \tab (asian) 1-person household \cr
(H015D004)+(H015D012) \tab hh.asian.2p \tab (asian) 2-person household \cr
(H015D005)+(H015D013) \tab hh.asian.3p \tab (asian) 3-person household \cr
(H015D006)+(H015D014) \tab hh.asian.4p \tab (asian) 4-person household \cr
(H015D007)+(H015D015) \tab hh.asian.5p \tab (asian) 5-person household \cr
(H015D008)+(H015D016) \tab hh.asian.6p \tab (asian) 6-person household \cr
(H015D009)+(H015D017) \tab hh.asian.7p \tab (asian) 7-person household \cr
}
}
\source{
Census 2000 Summary File 1 [name of state1 or United States]/prepared by the U.S. Census
Bureau, 2001.
}
\references{
\url{http://www.census.gov/ }\cr
\url{http://www2.census.gov/cgi-bin/shapefiles/national-files} \cr
\url{http://www.census.gov/prod/cen2000/doc/sf1.pdf} \cr
}
\examples{
data(south_carolina.cdp)
############################################
## Helper function for handling coloring of the map
############################################
color.map<- function(x,dem,y=NULL){
l.poly<-length(x@polygons)
dem.num<- cut(dem ,breaks=ceiling(quantile(dem)),dig.lab = 6)
dem.num[which(is.na(dem.num)==TRUE)]<-levels(dem.num)[1]
l.uc<-length(table(dem.num))
if(is.null(y)){
col.heat<-heat.colors(16)[c(14,8,4,1)] ##fixed set of four colors
}else{
col.heat<-y
}
dem.col<-cbind(col.heat,names(table(dem.num)))
colors.dem<-vector(length=l.poly)
for(i in 1:l.uc){
colors.dem[which(dem.num==dem.col[i,2])]<-dem.col[i,1]
}
out<-list(colors=colors.dem,dem.cut=dem.col[,2],table.colors=dem.col[,1])
return(out)
}
############################################
## Helper function for handling coloring of the map
############################################
colors.use<-color.map(south_carolina.cdp,south_carolina.cdp$pop2000)
plot(south_carolina.cdp,col=colors.use$colors)
#text(coordinates(south_carolina.cdp),south_carolina.cdp$name,cex=.3)
title(main="Census Designated Places \n of South_carolina, 2000", sub="Quantiles (equal frequency)")
legend("bottomright",legend=colors.use$dem.cut,fill=colors.use$table.colors,bty="o",title="Population Count",bg="white")
###############################
### Alternative way to do the above
###############################
\dontrun{
####This example requires the following additional libraries
library(RColorBrewer)
library(classInt)
library(maps)
####This example requires the following additional libraries
data(south_carolina.cdp)
map('state',region='south_carolina')
plotvar <- south_carolina.cdp$pop2000
nclr <- 4
#BuPu
plotclr <- brewer.pal(nclr,"BuPu")
class <- classIntervals(plotvar, nclr, style="quantile")
colcode <- findColours(class, plotclr)
plot(south_carolina.cdp, col=colcode, border="transparent",add=TRUE)
#transparent
title(main="Census Designated Places\n of South_carolina, 2000", sub="Quantiles (equal frequency)")
map.text("county", "south_carolina",cex=.7,add=TRUE)
map('county','south_carolina',add=TRUE)
legend("bottomright","(x,y)", legend=names(attr(colcode, "table")),fill=attr(colcode, "palette"),
cex=0.9, bty="o", title="Population Count",bg="white")
}
}
\keyword{datasets}
|
setwd("C:/Users/brecre01/Documents/Coursera")
mydata <- read.csv("household_power_consumption.txt", header=TRUE, sep=';', check.names=FALSE, na.strings="?", stringsAsFactors=FALSE, comment.char="", quote='\"')
mydata$Date <- as.Date(mydata$Date, format="%d/%m/%Y")
datasm <- subset(mydata, subset=(Date >= "2007-02-01" & Date < "2007-02-03"))
datetimeonly <- paste(as.Date(datasm$Date), datasm$Time)
datasm$Datetime <- as.POSIXct(datetimeonly)
hist(datasm$Global_active_power, col="red", main="Global Active Power", xlab="Global Active Power (kilowatts)", ylab="Frequency")
dev.copy(png, file="plot1.png", height=480, width=480)
dev.off() | /plot1.R | no_license | Brent-Crews/ExData_Plotting1 | R | false | false | 638 | r | setwd("C:/Users/brecre01/Documents/Coursera")
mydata <- read.csv("household_power_consumption.txt", header=TRUE, sep=';', check.names=FALSE, na.strings="?", stringsAsFactors=FALSE, comment.char="", quote='\"')
mydata$Date <- as.Date(mydata$Date, format="%d/%m/%Y")
datasm <- subset(mydata, subset=(Date >= "2007-02-01" & Date < "2007-02-03"))
datetimeonly <- paste(as.Date(datasm$Date), datasm$Time)
datasm$Datetime <- as.POSIXct(datetimeonly)
hist(datasm$Global_active_power, col="red", main="Global Active Power", xlab="Global Active Power (kilowatts)", ylab="Frequency")
dev.copy(png, file="plot1.png", height=480, width=480)
dev.off() |
args = commandArgs(trailingOnly=TRUE)
library(edgeR)
library(ggplot2)
library(ggthemes)
library(reshape)
library(cowplot)
eqtl_comparison_to_reference_bar_plot <- function(file_stem, output_file) {
# First extract data. And get into nice data format
pvalues <- c()
version <- c()
time_step <- c()
for (temp_time_step in 0:15) {
ipsc_file <- paste0(file_stem, temp_time_step,"_real_v_matched_controls.txt")
data <- read.table(ipsc_file,header=TRUE)
pvalues <- c(pvalues,data$real_pvalue)
time_step <- c(time_step, rep(temp_time_step, length(data$real_pvalue)))
version <- c(version, as.character(rep("real", length(data$real_pvalue))))
pvalues <- c(pvalues,data$matched_pvalue)
time_step <- c(time_step, rep(temp_time_step, length(data$matched_pvalue)))
version <- c(version, as.character(rep("matched", length(data$matched_pvalue))))
print(temp_time_step)
print(wilcox.test(data$real_pvalue,data$matched_pvalue))
}
df <- data.frame(pvalues = as.numeric(pvalues), version = factor(version,c("real","matched")), time_step = factor(time_step))
box_plot <- ggplot(df, aes(x=time_step, y=pvalues, fill=version)) + geom_boxplot(width=.54)
box_plot <- box_plot + theme(text = element_text(size=18), panel.grid.major = element_blank(), panel.grid.minor = element_blank(),panel.background = element_blank(), axis.line = element_line(colour = "black"))
box_plot <- box_plot + labs(fill= "Test version",x = "time step", y = "pvalue")
ggsave(box_plot, file=output_file,width = 20,height=10.5,units="cm")
}
make_pvalue_histogram_one_time_step <- function(input_file, time_step) {
all_eqtl_nominal_pvalues <- read.table(input_file, header=TRUE)
pvalues <- all_eqtl_nominal_pvalues$pvalue
# pvalues <- runif(30000, 0.0, 1.0)
# Colors for histogram
barfill <- "#4271AE"
barlines <- "#1F3552"
# Title of plot
title <- paste0("t = ", time_step)
# put data into data frame for plotting
df <- data.frame(pvalues = pvalues)
# make plots
p <- ggplot(df, aes(x = pvalues)) +
geom_histogram(aes(y = ..count..), colour = barlines, fill = barfill,binwidth = 0.01, boundary = 0) +
scale_x_continuous(name = "pvalue", breaks = round(seq(0,1,by=.5),1)) + theme(axis.text=element_text(size=13)) +
scale_y_continuous(name = "counts") +
ggtitle(title)
return(p)
}
pvalue_histogram_across_time_steps <- function(input_stem, output_file) {
time_step <- 0
p0 <- make_pvalue_histogram_one_time_step(paste0(input_stem,time_step,"_eqtl_results.txt"), time_step)
print(time_step)
time_step <- 1
p1 <- make_pvalue_histogram_one_time_step(paste0(input_stem,time_step,"_eqtl_results.txt"), time_step)
print(time_step)
time_step <- 2
p2 <- make_pvalue_histogram_one_time_step(paste0(input_stem,time_step,"_eqtl_results.txt"), time_step)
print(time_step)
time_step <- 3
p3 <- make_pvalue_histogram_one_time_step(paste0(input_stem,time_step,"_eqtl_results.txt"), time_step)
print(time_step)
time_step <- 4
p4 <- make_pvalue_histogram_one_time_step(paste0(input_stem,time_step,"_eqtl_results.txt"), time_step)
print(time_step)
time_step <- 5
p5 <- make_pvalue_histogram_one_time_step(paste0(input_stem,time_step,"_eqtl_results.txt"), time_step)
print(time_step)
time_step <- 6
p6 <- make_pvalue_histogram_one_time_step(paste0(input_stem,time_step,"_eqtl_results.txt"), time_step)
print(time_step)
time_step <- 7
p7 <- make_pvalue_histogram_one_time_step(paste0(input_stem,time_step,"_eqtl_results.txt"), time_step)
print(time_step)
time_step <- 8
p8 <- make_pvalue_histogram_one_time_step(paste0(input_stem,time_step,"_eqtl_results.txt"), time_step)
print(time_step)
time_step <- 9
p9 <- make_pvalue_histogram_one_time_step(paste0(input_stem,time_step,"_eqtl_results.txt"), time_step)
print(time_step)
time_step <- 10
p10 <- make_pvalue_histogram_one_time_step(paste0(input_stem,time_step,"_eqtl_results.txt"), time_step)
print(time_step)
time_step <- 11
p11 <- make_pvalue_histogram_one_time_step(paste0(input_stem,time_step,"_eqtl_results.txt"), time_step)
print(time_step)
time_step <- 12
p12 <- make_pvalue_histogram_one_time_step(paste0(input_stem,time_step,"_eqtl_results.txt"), time_step)
print(time_step)
time_step <- 13
p13 <- make_pvalue_histogram_one_time_step(paste0(input_stem,time_step,"_eqtl_results.txt"), time_step)
print(time_step)
time_step <- 14
p14 <- make_pvalue_histogram_one_time_step(paste0(input_stem,time_step,"_eqtl_results.txt"), time_step)
print(time_step)
time_step <- 15
p15 <- make_pvalue_histogram_one_time_step(paste0(input_stem,time_step,"_eqtl_results.txt"), time_step)
print(time_step)
pdf(output_file)
gg <- plot_grid(p0,p1,p2,p3,p4,p5,p6,p7,p8,p9,p10,p11,p12,p13,p14,p15,nrow=4,ncol=4,label_size=13)
combined_gg <- ggdraw() + draw_plot(gg,0,0,1,1)
print(combined_gg)
dev.off()
}
visualization_directory = args[1]
distance = args[2]
maf_cutoff = args[3]
normalization_method = args[4]
independent_time_step_eqtl_dir = args[5]
data_prep_version = args[6]
num_pcs = args[7]
#############################################
# Make histogram of nominal pvalue distribution for each time step
# Then put each time step on one plot
file_stem <- paste0(independent_time_step_eqtl_dir, "eqtl_prepare_eqtl_distance_",distance,"_maf_cutoff_",maf_cutoff,"_normalization_meth_",normalization_method,"_data_prep_",data_prep_version,"_num_pcs_",num_pcs,"_time_step_")
output_file <- paste0(visualization_directory, "nominal_eqtl_pvalue_histogram_",distance,"_maf_cutoff_",maf_cutoff,"_normalization_meth_",normalization_method,"_data_prep_",data_prep_version,"_num_pcs_",num_pcs, "_histogram_across_time_steps.pdf")
# pvalue_histogram_across_time_steps(file_stem, output_file)
###############################################
# Plot distribution of pvalues found in our eqtl data
# But only those variant-gene pairs that are found in:
## a. Nick Banovich's ipsc data
ipsc_file_stem <- paste0(visualization_directory, "ipsc_banovich_comparison_distance_",distance,"_maf_cutoff_",maf_cutoff,"_normalization_meth_",normalization_method,"_data_prep_",data_prep_version,"_num_pcs_",num_pcs,"_time_step_")
ipsc_plot_file <- paste0(visualization_directory, "ipsc_banovich_comparison_distance_",distance,"_maf_cutoff_",maf_cutoff,"_normalization_meth_",normalization_method,"_data_prep_",data_prep_version,"_num_pcs_",num_pcs,"_bar_plot.png")
eqtl_comparison_to_reference_bar_plot(ipsc_file_stem, ipsc_plot_file)
## b. GTEx Heart left ventricle data
gtex_file_stem <- paste0(visualization_directory, "gtex_v7_heart_left_ventricle_comparison_distance_",distance,"_maf_cutoff_",maf_cutoff,"_normalization_meth_",normalization_method,"_data_prep_",data_prep_version,"_num_pcs_",num_pcs,"_time_step_")
gtex_plot_file <- paste0(visualization_directory, "gtex_v7_heart_left_ventricle_comparison_distance_",distance,"_maf_cutoff_",maf_cutoff,"_normalization_meth_",normalization_method,"_data_prep_",data_prep_version,"_num_pcs_",num_pcs, "_bar_plot.png")
eqtl_comparison_to_reference_bar_plot(gtex_file_stem, gtex_plot_file)
| /visualize_eqtls_across_time_steps.R | no_license | BennyStrobes/ipsc_qtl_pipelines | R | false | false | 7,332 | r | args = commandArgs(trailingOnly=TRUE)
library(edgeR)
library(ggplot2)
library(ggthemes)
library(reshape)
library(cowplot)
eqtl_comparison_to_reference_bar_plot <- function(file_stem, output_file) {
# First extract data. And get into nice data format
pvalues <- c()
version <- c()
time_step <- c()
for (temp_time_step in 0:15) {
ipsc_file <- paste0(file_stem, temp_time_step,"_real_v_matched_controls.txt")
data <- read.table(ipsc_file,header=TRUE)
pvalues <- c(pvalues,data$real_pvalue)
time_step <- c(time_step, rep(temp_time_step, length(data$real_pvalue)))
version <- c(version, as.character(rep("real", length(data$real_pvalue))))
pvalues <- c(pvalues,data$matched_pvalue)
time_step <- c(time_step, rep(temp_time_step, length(data$matched_pvalue)))
version <- c(version, as.character(rep("matched", length(data$matched_pvalue))))
print(temp_time_step)
print(wilcox.test(data$real_pvalue,data$matched_pvalue))
}
df <- data.frame(pvalues = as.numeric(pvalues), version = factor(version,c("real","matched")), time_step = factor(time_step))
box_plot <- ggplot(df, aes(x=time_step, y=pvalues, fill=version)) + geom_boxplot(width=.54)
box_plot <- box_plot + theme(text = element_text(size=18), panel.grid.major = element_blank(), panel.grid.minor = element_blank(),panel.background = element_blank(), axis.line = element_line(colour = "black"))
box_plot <- box_plot + labs(fill= "Test version",x = "time step", y = "pvalue")
ggsave(box_plot, file=output_file,width = 20,height=10.5,units="cm")
}
make_pvalue_histogram_one_time_step <- function(input_file, time_step) {
all_eqtl_nominal_pvalues <- read.table(input_file, header=TRUE)
pvalues <- all_eqtl_nominal_pvalues$pvalue
# pvalues <- runif(30000, 0.0, 1.0)
# Colors for histogram
barfill <- "#4271AE"
barlines <- "#1F3552"
# Title of plot
title <- paste0("t = ", time_step)
# put data into data frame for plotting
df <- data.frame(pvalues = pvalues)
# make plots
p <- ggplot(df, aes(x = pvalues)) +
geom_histogram(aes(y = ..count..), colour = barlines, fill = barfill,binwidth = 0.01, boundary = 0) +
scale_x_continuous(name = "pvalue", breaks = round(seq(0,1,by=.5),1)) + theme(axis.text=element_text(size=13)) +
scale_y_continuous(name = "counts") +
ggtitle(title)
return(p)
}
pvalue_histogram_across_time_steps <- function(input_stem, output_file) {
time_step <- 0
p0 <- make_pvalue_histogram_one_time_step(paste0(input_stem,time_step,"_eqtl_results.txt"), time_step)
print(time_step)
time_step <- 1
p1 <- make_pvalue_histogram_one_time_step(paste0(input_stem,time_step,"_eqtl_results.txt"), time_step)
print(time_step)
time_step <- 2
p2 <- make_pvalue_histogram_one_time_step(paste0(input_stem,time_step,"_eqtl_results.txt"), time_step)
print(time_step)
time_step <- 3
p3 <- make_pvalue_histogram_one_time_step(paste0(input_stem,time_step,"_eqtl_results.txt"), time_step)
print(time_step)
time_step <- 4
p4 <- make_pvalue_histogram_one_time_step(paste0(input_stem,time_step,"_eqtl_results.txt"), time_step)
print(time_step)
time_step <- 5
p5 <- make_pvalue_histogram_one_time_step(paste0(input_stem,time_step,"_eqtl_results.txt"), time_step)
print(time_step)
time_step <- 6
p6 <- make_pvalue_histogram_one_time_step(paste0(input_stem,time_step,"_eqtl_results.txt"), time_step)
print(time_step)
time_step <- 7
p7 <- make_pvalue_histogram_one_time_step(paste0(input_stem,time_step,"_eqtl_results.txt"), time_step)
print(time_step)
time_step <- 8
p8 <- make_pvalue_histogram_one_time_step(paste0(input_stem,time_step,"_eqtl_results.txt"), time_step)
print(time_step)
time_step <- 9
p9 <- make_pvalue_histogram_one_time_step(paste0(input_stem,time_step,"_eqtl_results.txt"), time_step)
print(time_step)
time_step <- 10
p10 <- make_pvalue_histogram_one_time_step(paste0(input_stem,time_step,"_eqtl_results.txt"), time_step)
print(time_step)
time_step <- 11
p11 <- make_pvalue_histogram_one_time_step(paste0(input_stem,time_step,"_eqtl_results.txt"), time_step)
print(time_step)
time_step <- 12
p12 <- make_pvalue_histogram_one_time_step(paste0(input_stem,time_step,"_eqtl_results.txt"), time_step)
print(time_step)
time_step <- 13
p13 <- make_pvalue_histogram_one_time_step(paste0(input_stem,time_step,"_eqtl_results.txt"), time_step)
print(time_step)
time_step <- 14
p14 <- make_pvalue_histogram_one_time_step(paste0(input_stem,time_step,"_eqtl_results.txt"), time_step)
print(time_step)
time_step <- 15
p15 <- make_pvalue_histogram_one_time_step(paste0(input_stem,time_step,"_eqtl_results.txt"), time_step)
print(time_step)
pdf(output_file)
gg <- plot_grid(p0,p1,p2,p3,p4,p5,p6,p7,p8,p9,p10,p11,p12,p13,p14,p15,nrow=4,ncol=4,label_size=13)
combined_gg <- ggdraw() + draw_plot(gg,0,0,1,1)
print(combined_gg)
dev.off()
}
visualization_directory = args[1]
distance = args[2]
maf_cutoff = args[3]
normalization_method = args[4]
independent_time_step_eqtl_dir = args[5]
data_prep_version = args[6]
num_pcs = args[7]
#############################################
# Make histogram of nominal pvalue distribution for each time step
# Then put each time step on one plot
file_stem <- paste0(independent_time_step_eqtl_dir, "eqtl_prepare_eqtl_distance_",distance,"_maf_cutoff_",maf_cutoff,"_normalization_meth_",normalization_method,"_data_prep_",data_prep_version,"_num_pcs_",num_pcs,"_time_step_")
output_file <- paste0(visualization_directory, "nominal_eqtl_pvalue_histogram_",distance,"_maf_cutoff_",maf_cutoff,"_normalization_meth_",normalization_method,"_data_prep_",data_prep_version,"_num_pcs_",num_pcs, "_histogram_across_time_steps.pdf")
# pvalue_histogram_across_time_steps(file_stem, output_file)
###############################################
# Plot distribution of pvalues found in our eqtl data
# But only those variant-gene pairs that are found in:
## a. Nick Banovich's ipsc data
ipsc_file_stem <- paste0(visualization_directory, "ipsc_banovich_comparison_distance_",distance,"_maf_cutoff_",maf_cutoff,"_normalization_meth_",normalization_method,"_data_prep_",data_prep_version,"_num_pcs_",num_pcs,"_time_step_")
ipsc_plot_file <- paste0(visualization_directory, "ipsc_banovich_comparison_distance_",distance,"_maf_cutoff_",maf_cutoff,"_normalization_meth_",normalization_method,"_data_prep_",data_prep_version,"_num_pcs_",num_pcs,"_bar_plot.png")
eqtl_comparison_to_reference_bar_plot(ipsc_file_stem, ipsc_plot_file)
## b. GTEx Heart left ventricle data
gtex_file_stem <- paste0(visualization_directory, "gtex_v7_heart_left_ventricle_comparison_distance_",distance,"_maf_cutoff_",maf_cutoff,"_normalization_meth_",normalization_method,"_data_prep_",data_prep_version,"_num_pcs_",num_pcs,"_time_step_")
gtex_plot_file <- paste0(visualization_directory, "gtex_v7_heart_left_ventricle_comparison_distance_",distance,"_maf_cutoff_",maf_cutoff,"_normalization_meth_",normalization_method,"_data_prep_",data_prep_version,"_num_pcs_",num_pcs, "_bar_plot.png")
eqtl_comparison_to_reference_bar_plot(gtex_file_stem, gtex_plot_file)
|
# Exercise 1: ggplot2 basics
# Install and load `ggplot2`
# You will also want to load `dplyr`
library("ggplot2")
library("dplyr")
# For this exercise you'll be working with the `diamonds` data set included in the ggplot2 library
# Use `?diamonds` to get more information about this data set (including the column descriptions
# Also check the _column names_ and the _number of rows_ in the data set
View(diamonds)
?diamonds
colnames(diamonds)
rownames(diamonds)
row.names.default(diamonds)
nrow(diamonds)
# This data set has a lot of rows. To make things a bit more readable,
# use dplyr's `sample_n()` function to get a random 1000 rows from the data set
# Store this sample in a variable `diamonds.sample`
diamonds.sample <- sample_n(diamonds,1000)
# Start by making a new `ggplot` with the `diamonds.sample` as the data (no geometry yet)
# What do you see?
ggplot(data= diamonds.sample)
# Draw a scatter plot (with point geometry) with for the `diamonds.sample` set,
# with the `carat` mapped to the x-position and `price` mapped to the y-position.
ggplot(data = diamonds.sample) +
geom_point(mapping = aes(x = carat, y= price))
# Draw the same plot as above, but color each of the points based on their clarity.
ggplot(data = diamonds.sample) +
geom_point(mapping = aes(x = carat, y= price, color = clarity))
# Draw the same plot as above, but for the entire `diamonds` data set. Note this may take
# a few seconds to generate.
ggplot(data = diamonds) +
geom_point(mapping = aes(x = carat, y= price))
# Draw another scatter plot for `diamonds.sample` of price (y) by carat (x),
# but with all of the dots colored "blue".
# Hint: you'll need to set the color channel, not map a value to it!
ggplot(data = diamonds.sample) +
geom_point(mapping = aes(x = carat, y= price), color = "blue")
# Draw a scatter plot for `diamonds.sample` of `price` by `carat`, where each
# point has an aesthetic _shape_ based on the diamond's `cut`.
ggplot(data= diamonds.sample) +
geom_point(mapping = aes(x = carat, y = price, shape=cut))
# Draw a scatter plot for `diamonds.sample` of *`cut`* by `carat`, where each
# point has an aesthetic _size_ based on the diamond's *`price`*
ggplot(data = diamonds.sample) +
geom_point(mapping = aes(x = carat, y = cut, size = price))
# Try coloring the above plot based on the diamond's price!
ggplot(data = diamonds.sample) +
geom_point(mapping = aes(x = carat, y = cut, size = price, color = price))
# Draw a line plot (with line geometry) for `diamonds.sample`. The x-position should be mapped to
# carat, y-position to price, and color to carat.
# That's kind of messy. Try using `smooth` geometry instead.
# Draw a plot with bar geometry (a bar chart), mapping the diamond's `cut` to the x-axis
# Add an aesthetic property that will _fill_ each bar geometry based on the `clarity` of the diamonds
# What kind of chart do you get?
# Draw a histogram of diamond prices.
# Try mapping each bar based on clarity as well!
# (For a more traditional "bell-curve", make a histogram of diamond `depths`)
# Draw a plot of the `diamonds.sample` data (price by carat), with both points for each
# diamond AND smoothed lines for each cut (hint: in a separate color)
# Making the points have some `alpha` transparency will make the plot look nicer
# multiple geoms (point & smooth)
## Bonus
# Draw a bar chart of average diamond prices by clarity, and include "error bars" marking
# the standard error of each measurement.
#
# You can calculate standard error as the _standard deviation_ divided by the square root
# of the number of measurements (prices)
# Start by creating a data frame `diamond.summary` that includes summarized data for each clarity group.
# Your summary data shuld include the mean price and the standard error of the price.
# Then draw the plot. The error bars should stretch from the mean-error to the mean+error.
| /exercise-1/exercise.R | permissive | josh3396/module13-ggplot2 | R | false | false | 3,907 | r | # Exercise 1: ggplot2 basics
# Install and load `ggplot2`
# You will also want to load `dplyr`
library("ggplot2")
library("dplyr")
# For this exercise you'll be working with the `diamonds` data set included in the ggplot2 library
# Use `?diamonds` to get more information about this data set (including the column descriptions
# Also check the _column names_ and the _number of rows_ in the data set
View(diamonds)
?diamonds
colnames(diamonds)
rownames(diamonds)
row.names.default(diamonds)
nrow(diamonds)
# This data set has a lot of rows. To make things a bit more readable,
# use dplyr's `sample_n()` function to get a random 1000 rows from the data set
# Store this sample in a variable `diamonds.sample`
diamonds.sample <- sample_n(diamonds,1000)
# Start by making a new `ggplot` with the `diamonds.sample` as the data (no geometry yet)
# What do you see?
ggplot(data= diamonds.sample)
# Draw a scatter plot (with point geometry) with for the `diamonds.sample` set,
# with the `carat` mapped to the x-position and `price` mapped to the y-position.
ggplot(data = diamonds.sample) +
geom_point(mapping = aes(x = carat, y= price))
# Draw the same plot as above, but color each of the points based on their clarity.
ggplot(data = diamonds.sample) +
geom_point(mapping = aes(x = carat, y= price, color = clarity))
# Draw the same plot as above, but for the entire `diamonds` data set. Note this may take
# a few seconds to generate.
ggplot(data = diamonds) +
geom_point(mapping = aes(x = carat, y= price))
# Draw another scatter plot for `diamonds.sample` of price (y) by carat (x),
# but with all of the dots colored "blue".
# Hint: you'll need to set the color channel, not map a value to it!
ggplot(data = diamonds.sample) +
geom_point(mapping = aes(x = carat, y= price), color = "blue")
# Draw a scatter plot for `diamonds.sample` of `price` by `carat`, where each
# point has an aesthetic _shape_ based on the diamond's `cut`.
ggplot(data= diamonds.sample) +
geom_point(mapping = aes(x = carat, y = price, shape=cut))
# Draw a scatter plot for `diamonds.sample` of *`cut`* by `carat`, where each
# point has an aesthetic _size_ based on the diamond's *`price`*
ggplot(data = diamonds.sample) +
geom_point(mapping = aes(x = carat, y = cut, size = price))
# Try coloring the above plot based on the diamond's price!
ggplot(data = diamonds.sample) +
geom_point(mapping = aes(x = carat, y = cut, size = price, color = price))
# Draw a line plot (with line geometry) for `diamonds.sample`. The x-position should be mapped to
# carat, y-position to price, and color to carat.
# That's kind of messy. Try using `smooth` geometry instead.
# Draw a plot with bar geometry (a bar chart), mapping the diamond's `cut` to the x-axis
# Add an aesthetic property that will _fill_ each bar geometry based on the `clarity` of the diamonds
# What kind of chart do you get?
# Draw a histogram of diamond prices.
# Try mapping each bar based on clarity as well!
# (For a more traditional "bell-curve", make a histogram of diamond `depths`)
# Draw a plot of the `diamonds.sample` data (price by carat), with both points for each
# diamond AND smoothed lines for each cut (hint: in a separate color)
# Making the points have some `alpha` transparency will make the plot look nicer
# multiple geoms (point & smooth)
## Bonus
# Draw a bar chart of average diamond prices by clarity, and include "error bars" marking
# the standard error of each measurement.
#
# You can calculate standard error as the _standard deviation_ divided by the square root
# of the number of measurements (prices)
# Start by creating a data frame `diamond.summary` that includes summarized data for each clarity group.
# Your summary data shuld include the mean price and the standard error of the price.
# Then draw the plot. The error bars should stretch from the mean-error to the mean+error.
|
library(data.table)
# temp <- tempfile()
# download.file("https://cdn.scribbr.com/wp-content/uploads//2020/03/crop.data_.anova_.zip",temp)
# data <- read.table(unz(temp, "a1.csv"))
# unlink(temp)
#
# data
#read data from url
diet <- read.csv("https://www.sheffield.ac.uk/polopoly_fs/1.570199!/file/stcp-Rdataset-Diet.csv",
colClasses = c("numeric", "factor", "factor", "factor", "factor", "factor", "numeric"))
head(diet)
# count(diet$Person)
data.class(diet1$Diet)
summary(diet)
# cleaning the dataset
diet <- na.omit(diet)
#checking unique values
length(unique(diet$Person))
length(unique(diet$gender))
length(unique(diet$Age))
length(unique(diet$pre.weight))
length(unique(diet$Diet))
length(unique(diet$weight6weeks))
# one way anova
one.gen <- aov(weight6weeks ~ gender, diet)
one.gen
summary(one.gen)
# plot(one.gen)
one.age <- aov(weight6weeks ~ Age, diet)
one.age
summary(one.age)
# plot(one.age)
one.height <- aov(weight6weeks ~ Height, diet)
one.height
summary(one.height)
one.prew <- aov(weight6weeks ~ pre.weight, diet)
one.prew
summary(one.prew)
one.diet <- aov(weight6weeks ~ Diet, diet)
one.diet
summary(one.diet)
one.diet <- aov(weight6weeks ~ Diet, diet)
one.diet
summary(one.diet)
# coefficients(two.gen.age)
# plot(two.gen.age)
# two way anova
# gen
two.gen.age <- aov(weight6weeks ~ gender + Age, diet)
two.gen.age
summary(two.gen.age)
two.gen.hei <- aov(weight6weeks ~ gender + Height, diet)
two.gen.hei
summary(two.gen.hei)
two.gen.prew <- aov(weight6weeks ~ gender + pre.weight, diet)
two.gen.prew
summary(two.gen.prew)
two.gen.diet <- aov(weight6weeks ~ gender + Diet, diet)
two.gen.diet
summary(two.gen.diet)
# age
two.age.hei <- aov(weight6weeks ~ Age + Height, diet)
two.age.hei
summary(two.age.hei)
two.age.prew <- aov(weight6weeks ~ Age + pre.weight, diet)
two.age.prew
summary(two.age.prew)
two.age.diet <- aov(weight6weeks ~ Age + Diet, diet)
two.age.diet
summary(two.age.diet)
# height
two.hei.prew <- aov(weight6weeks ~ Height + pre.weight, diet)
two.hei.prew
summary(two.hei.prew)
two.hei.diet <- aov(weight6weeks ~ Height + Diet, diet)
two.hei.diet
summary(two.hei.diet)
#prew
two.prew.diet <- aov(weight6weeks ~ pre.weight + Diet, diet)
two.prew.diet
summary(two.prew.diet)
# Self-made ANOVA functions
# for one way
oneAnova <- function(Factor, Outcome)
{
#number of levels of factor
k <- length(unique(Factor))
#vectors to store the classified outcome
v1 <- c()
v2 <- c()
v3 <- c()
#storing the values to the vectors
for(i in 1:length(Factor))
{
if (Factor[i] == 1) {v1 <- c(v1, Outcome[i])}
if (Factor[i] == 2) {v2 <- c(v2, Outcome[i])}
if (Factor[i] == 3) {v3 <- c(v3, Outcome[i])}
}
#size of each level
n1 <- length(v1)
n2 <- length(v2)
n3 <- length(v3)
#total outcome length
n <- n1 + n2 + n3
#calculating mean for each class
m1 <- mean(v1, na.rm = TRUE)
m2 <- mean(v2, na.rm = TRUE)
m3 <- mean(v3, na.rm = TRUE)
#total mean
mt <- ((m1 * n1) + (m2 * n2) + (m3 * n3)) / n
#sum and mean of squares between classes
bss <- n1*(m1 - mt)^2 + n2*(m2 - mt)^2 + n3*(m3 - mt)^2
msb <- bss / (k-1)
cat("\nbss = ", bss, "\n msb = ", msb)
#sum and mean of squares within classes
wss <- 0
for(j in 1:n1) {wss <- wss + (v1[j] - m1)^2}
for(j in 1:n2) {wss <- wss + (v2[j] - m2)^2}
for(j in 1:n3) {wss <- wss + (v3[j] - m3)^2}
msw <- wss / (n - k)
cat("\nwss = ", wss, "\n msw = ", msw)
#Calculating f-ratio
fRatio <- msb / msw
cat("\nF_value obtained : ", fRatio, "\n")
}
oneAnova(diet$gender, diet$weight6weeks)
oneAnova(diet$Age, diet$weight6weeks)
oneAnova(diet$Height, diet$weight6weeks)
oneAnova(diet$pre.weight, diet$weight6weeks)
oneAnova(diet$Diet, diet$weight6weeks)
summary(one.diet)
# for two way
twoAnova <- function()
{
}
# k = 4
# v <- c(4, 5,2,1,5)
# intersect(v, 3:5)
# v1 <- c(1:k)
# v1
# v
#
k <- c(1,2,3)
u <- matrix(0,4, 3)
u
u[,1] <- c(1,2,3,4)
u[,2] <- c(3,4, 2)
u[,3] <- c(5,6,7,0)
u
df <- data.frame(u)
df
# #creating dataframe
# max.len = max(length(v1), length(v2), length(v3))
# v1 = c(v1, rep(NA, max.len - length(v1)))
# v2 = c(v2, rep(NA, max.len - length(v2)))
# v3 = c(v3, rep(NA, max.len - length(v3)))
#
# data <- data.frame(v1, v2, v3)
| /Annova.R | no_license | pradyumngupta/Data_analysis_algos | R | false | false | 4,481 | r | library(data.table)
# temp <- tempfile()
# download.file("https://cdn.scribbr.com/wp-content/uploads//2020/03/crop.data_.anova_.zip",temp)
# data <- read.table(unz(temp, "a1.csv"))
# unlink(temp)
#
# data
#read data from url
diet <- read.csv("https://www.sheffield.ac.uk/polopoly_fs/1.570199!/file/stcp-Rdataset-Diet.csv",
colClasses = c("numeric", "factor", "factor", "factor", "factor", "factor", "numeric"))
head(diet)
# count(diet$Person)
data.class(diet1$Diet)
summary(diet)
# cleaning the dataset
diet <- na.omit(diet)
#checking unique values
length(unique(diet$Person))
length(unique(diet$gender))
length(unique(diet$Age))
length(unique(diet$pre.weight))
length(unique(diet$Diet))
length(unique(diet$weight6weeks))
# one way anova
one.gen <- aov(weight6weeks ~ gender, diet)
one.gen
summary(one.gen)
# plot(one.gen)
one.age <- aov(weight6weeks ~ Age, diet)
one.age
summary(one.age)
# plot(one.age)
one.height <- aov(weight6weeks ~ Height, diet)
one.height
summary(one.height)
one.prew <- aov(weight6weeks ~ pre.weight, diet)
one.prew
summary(one.prew)
one.diet <- aov(weight6weeks ~ Diet, diet)
one.diet
summary(one.diet)
one.diet <- aov(weight6weeks ~ Diet, diet)
one.diet
summary(one.diet)
# coefficients(two.gen.age)
# plot(two.gen.age)
# two way anova
# gen
two.gen.age <- aov(weight6weeks ~ gender + Age, diet)
two.gen.age
summary(two.gen.age)
two.gen.hei <- aov(weight6weeks ~ gender + Height, diet)
two.gen.hei
summary(two.gen.hei)
two.gen.prew <- aov(weight6weeks ~ gender + pre.weight, diet)
two.gen.prew
summary(two.gen.prew)
two.gen.diet <- aov(weight6weeks ~ gender + Diet, diet)
two.gen.diet
summary(two.gen.diet)
# age
two.age.hei <- aov(weight6weeks ~ Age + Height, diet)
two.age.hei
summary(two.age.hei)
two.age.prew <- aov(weight6weeks ~ Age + pre.weight, diet)
two.age.prew
summary(two.age.prew)
two.age.diet <- aov(weight6weeks ~ Age + Diet, diet)
two.age.diet
summary(two.age.diet)
# height
two.hei.prew <- aov(weight6weeks ~ Height + pre.weight, diet)
two.hei.prew
summary(two.hei.prew)
two.hei.diet <- aov(weight6weeks ~ Height + Diet, diet)
two.hei.diet
summary(two.hei.diet)
#prew
two.prew.diet <- aov(weight6weeks ~ pre.weight + Diet, diet)
two.prew.diet
summary(two.prew.diet)
# Self-made ANOVA functions
# for one way
oneAnova <- function(Factor, Outcome)
{
#number of levels of factor
k <- length(unique(Factor))
#vectors to store the classified outcome
v1 <- c()
v2 <- c()
v3 <- c()
#storing the values to the vectors
for(i in 1:length(Factor))
{
if (Factor[i] == 1) {v1 <- c(v1, Outcome[i])}
if (Factor[i] == 2) {v2 <- c(v2, Outcome[i])}
if (Factor[i] == 3) {v3 <- c(v3, Outcome[i])}
}
#size of each level
n1 <- length(v1)
n2 <- length(v2)
n3 <- length(v3)
#total outcome length
n <- n1 + n2 + n3
#calculating mean for each class
m1 <- mean(v1, na.rm = TRUE)
m2 <- mean(v2, na.rm = TRUE)
m3 <- mean(v3, na.rm = TRUE)
#total mean
mt <- ((m1 * n1) + (m2 * n2) + (m3 * n3)) / n
#sum and mean of squares between classes
bss <- n1*(m1 - mt)^2 + n2*(m2 - mt)^2 + n3*(m3 - mt)^2
msb <- bss / (k-1)
cat("\nbss = ", bss, "\n msb = ", msb)
#sum and mean of squares within classes
wss <- 0
for(j in 1:n1) {wss <- wss + (v1[j] - m1)^2}
for(j in 1:n2) {wss <- wss + (v2[j] - m2)^2}
for(j in 1:n3) {wss <- wss + (v3[j] - m3)^2}
msw <- wss / (n - k)
cat("\nwss = ", wss, "\n msw = ", msw)
#Calculating f-ratio
fRatio <- msb / msw
cat("\nF_value obtained : ", fRatio, "\n")
}
oneAnova(diet$gender, diet$weight6weeks)
oneAnova(diet$Age, diet$weight6weeks)
oneAnova(diet$Height, diet$weight6weeks)
oneAnova(diet$pre.weight, diet$weight6weeks)
oneAnova(diet$Diet, diet$weight6weeks)
summary(one.diet)
# for two way
twoAnova <- function()
{
}
# k = 4
# v <- c(4, 5,2,1,5)
# intersect(v, 3:5)
# v1 <- c(1:k)
# v1
# v
#
k <- c(1,2,3)
u <- matrix(0,4, 3)
u
u[,1] <- c(1,2,3,4)
u[,2] <- c(3,4, 2)
u[,3] <- c(5,6,7,0)
u
df <- data.frame(u)
df
# #creating dataframe
# max.len = max(length(v1), length(v2), length(v3))
# v1 = c(v1, rep(NA, max.len - length(v1)))
# v2 = c(v2, rep(NA, max.len - length(v2)))
# v3 = c(v3, rep(NA, max.len - length(v3)))
#
# data <- data.frame(v1, v2, v3)
|
# Polynomial Regression
# Importing the dataset
dataset = read.csv('Position_Salaries.csv')
dataset = dataset[2:3]
# Splitting the dataset into the Training set and Test set
# install.packages('caTools')
# library(caTools)
# set.seed(123)
# split = sample.split(dataset$Profit, SplitRatio = 0.8)
# training_set = subset(dataset, split == TRUE)
# test_set = subset(dataset, split == FALSE)
# Feature Scaling
# training_set = scale(training_set)
# test_set = scale(test_set)
# Fitting Linear Regression to the Dataset
lin_reg = lm(formula = Salary ~ .,
data = dataset)
# Fitting Polynomial Regression to the dataset
dataset$Level2 = dataset$Level ** 2
dataset$Level3 = dataset$Level ** 3
dataset$Level4 = dataset$Level ** 4
poly_reg = lm(formula = Salary ~ .,
data = dataset)
# Visualising the Linear Regression results
ggplot()+
geom_point(aes(x = dataset$Level, y = dataset$Salary),
color = 'red') +
geom_line(aes(x = dataset$Level, y = predict(lin_reg, newdata = dataset)),
color = 'blue') +
ggtitle('Truth vs Bluff (Linear Regression)') +
xlab('Level') + ylab('Salary')
# Visualising the Polynomial Regression results
ggplot()+
geom_point(aes(x = dataset$Level, y = dataset$Salary),
color = 'red') +
geom_line(aes(x = dataset$Level, y = predict(poly_reg, newdata = dataset)),
color = 'blue') +
ggtitle('Truth vs Bluff (Polynomial Regression)') +
xlab('Level') + ylab('Salary')
# Predicting a new result with Linear Regression
y_pred = predict(object = lin_reg, data.frame(Level = 6.5))
# Predicting a new result with Polynomial Regression
y_pred = predict(object = poly_reg, data.frame(Level = 6.5, Level2 = 6.5**2,
Level3 = 6.5**3, Level4 = 6.5**4))
| /Part 2/Section 6 - Polynomial Regression/polynomial_regression.R | no_license | Itsu004/Machine-Learning | R | false | false | 1,862 | r | # Polynomial Regression
# Importing the dataset
dataset = read.csv('Position_Salaries.csv')
dataset = dataset[2:3]
# Splitting the dataset into the Training set and Test set
# install.packages('caTools')
# library(caTools)
# set.seed(123)
# split = sample.split(dataset$Profit, SplitRatio = 0.8)
# training_set = subset(dataset, split == TRUE)
# test_set = subset(dataset, split == FALSE)
# Feature Scaling
# training_set = scale(training_set)
# test_set = scale(test_set)
# Fitting Linear Regression to the Dataset
lin_reg = lm(formula = Salary ~ .,
data = dataset)
# Fitting Polynomial Regression to the dataset
dataset$Level2 = dataset$Level ** 2
dataset$Level3 = dataset$Level ** 3
dataset$Level4 = dataset$Level ** 4
poly_reg = lm(formula = Salary ~ .,
data = dataset)
# Visualising the Linear Regression results
ggplot()+
geom_point(aes(x = dataset$Level, y = dataset$Salary),
color = 'red') +
geom_line(aes(x = dataset$Level, y = predict(lin_reg, newdata = dataset)),
color = 'blue') +
ggtitle('Truth vs Bluff (Linear Regression)') +
xlab('Level') + ylab('Salary')
# Visualising the Polynomial Regression results
ggplot()+
geom_point(aes(x = dataset$Level, y = dataset$Salary),
color = 'red') +
geom_line(aes(x = dataset$Level, y = predict(poly_reg, newdata = dataset)),
color = 'blue') +
ggtitle('Truth vs Bluff (Polynomial Regression)') +
xlab('Level') + ylab('Salary')
# Predicting a new result with Linear Regression
y_pred = predict(object = lin_reg, data.frame(Level = 6.5))
# Predicting a new result with Polynomial Regression
y_pred = predict(object = poly_reg, data.frame(Level = 6.5, Level2 = 6.5**2,
Level3 = 6.5**3, Level4 = 6.5**4))
|
x <- read.table("ESC.PC1.bedGraph")
y <- read.table("2CLC.PC1.bedGraph")
mydata <- data.frame(x=x$V4,y=y$V4)
mydata <- na.omit(mydata)
atob <- mydata[mydata$x>0 & mydata$y<0,]
btoa <- mydata[mydata$x<0 & mydata$y>0,]
s <- c(55,368)/5321
data <- data.frame(type=c("AtoB","BtoA"),value=s)
library(ggplot2)
ggplot(data,aes(x=type,y=value,fill=type)) + geom_bar(stat="identity",width = 0.5) + theme_classic()
| /Hi-C/FigS2C.R | no_license | shenlab423/2CLC-Project-Code | R | false | false | 409 | r | x <- read.table("ESC.PC1.bedGraph")
y <- read.table("2CLC.PC1.bedGraph")
mydata <- data.frame(x=x$V4,y=y$V4)
mydata <- na.omit(mydata)
atob <- mydata[mydata$x>0 & mydata$y<0,]
btoa <- mydata[mydata$x<0 & mydata$y>0,]
s <- c(55,368)/5321
data <- data.frame(type=c("AtoB","BtoA"),value=s)
library(ggplot2)
ggplot(data,aes(x=type,y=value,fill=type)) + geom_bar(stat="identity",width = 0.5) + theme_classic()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.