content large_stringlengths 0 6.46M | path large_stringlengths 3 331 | license_type large_stringclasses 2 values | repo_name large_stringlengths 5 125 | language large_stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.46M | extension large_stringclasses 75 values | text stringlengths 0 6.46M |
|---|---|---|---|---|---|---|---|---|---|
#' @param conflicts This determines what happens when a file with the same name
#' exists at the specified destination. Can be one of the following:
#' * `"error"` (the default): throw an error and abort the file transfer operation.
#' * `"skip"`: skip the conflicting file(s) and continue transferring the
#' remaining files.
#' * `"overwrite"`: replace the existing file with the transferred copy.
| /man-roxygen/conflicts.R | permissive | a-edwards/osfr | R | false | false | 412 | r | #' @param conflicts This determines what happens when a file with the same name
#' exists at the specified destination. Can be one of the following:
#' * `"error"` (the default): throw an error and abort the file transfer operation.
#' * `"skip"`: skip the conflicting file(s) and continue transferring the
#' remaining files.
#' * `"overwrite"`: replace the existing file with the transferred copy.
|
symshot1<-function(PLOT=FALSE)
{
if(missing(PLOT)) { PLOIT = FALSE }
nly=6
ioff=3
xdistance = 6.0
iskip=2
xspace= .1
dt=.004
maxtrc = 60
velair = .350
ampscal = 1.0
ampnois = .2
ampnois = 0.01
ampreflex = 0.1
amprefrac = 0.1
ampair = 0.1
rickfreq = 25
ricklen = 35
tlen = 2.4
tracelen=600
wavelen=50
GRLoc=list()
GRLoc$x=c(6.01078415186,2.77768081931)
GRLoc$y=c(-1.51478902016,-2.38634599907)
GRLslope1 = GRLoc$y[1]/ GRLoc$x[1]
GRLslope2 = GRLoc$y[2]/ GRLoc$x[2]
###### abline(0, GRLoc$y[1]/ GRLoc$x[1], col='blue')
###### abline(0, GRLoc$y[2]/ GRLoc$x[2], col='blue')
########## m is the velocity model
m=matrix(c(0.0, 3.0,
0.3, 4.0,
1.0, 6.5,
2.0, 7.0,
4.5, 8.5,
7.5, 9.5,
50.0, 10), ncol=2, byrow=TRUE)
y = m[1:6,1]
v = m[1:6 ,2]
x1 = rep(0, length(y))
x2 = rep(1, length(y))
x = seq(from=ioff*xspace, to=xdistance, by=xspace)
t2tim = seq(from=0, to=tlen, by=dt)
NLEN = length(t2tim)
Ntrace = length(x)
tair = x/velair
klay = 3
h = diff(y)
xcrit = vector()
trefrac = matrix(ncol=length(x), nrow=klay)
treflex = matrix(ncol=length(x), nrow=3)
xcrit = vector(length=nly)
top = y
vel = v
xc = 0
for( i in 1:(nly-1) )
{
xc = xc +2*h[i]* sqrt( (v[i+1]/v[i])^2 - 1 )
xcrit[i] = xc
}
############ Calculate the theoretical arrival times based on geometry
############
############ refractions
for( n in 1:klay)
{
vn = v[n]
i = seq(from=1, to=n, by=1)
thetai = asin(v[i]/vn)
tn = sum(2*h[i]*cos(thetai)/v[i])
trefrac[n,] = x/v[n] + tn
if(n>1) trefrac[n, x<xcrit[n-1] ] =NA
}
############ calculate reflections
k = 0
for(n in 3:(nly-1))
{
k = k+1
Delt0 = h[1:n]/v[1:length(h[1:n])]
vrms = sqrt( sum( v[1:n]^2*Delt0)/sum(Delt0) )
t0 = sum(Delt0[1:n])
tt = sqrt((x^2)/vrms^2 + t0^2)
## print(c(n, vrms, t0))
## print(range(tt))
##points(x, tt, col=n)
treflex[k,] = tt
}
############
############### set up wavelets:
############ ricker wavelet, shifted so it will be centered on the spike
wavelet = genrick(rickfreq,dt,ricklen)
klem = 11
### nwave = -1 + 2 * (wavelet - min(wavelet) )/diff(range(wavelet))
nwave = RPMG::RESCALE(wavelet, 0, 1, wavelet[1], max(wavelet))
shkip = length(nwave)/2
ones = rep(1, klem)
jspred = applytaper(ones, p = 0.5)
## plot(jspred)
nspred = 0.5+length(jspred)/2
reach = seq(from =1, by=shkip, length=klem)
############### ground roll:
################# this is a sinusoidal signal
grlen = floor(.6/dt)
fgr = 10
tape = applytaper( rep(1, grlen), p = 0.2)
tgr = seq(from=0, by=dt, length=grlen)
siggr = tape*sin(2*pi*fgr*tgr)
## plot(tgr, siggr, type='l')
#######################################################################
#######################################################################
x1 = ampnois*runif(NLEN, -1, 1)
KL = length(nwave)
###
smograms = matrix(ncol=Ntrace , nrow=NLEN)
################################################# Air wave
AIRrec = matrix(ncol=Ntrace , nrow=NLEN)
for(i in 1:Ntrace)
{
x1 = rep(0, times=(NLEN))
## x1 = ampnois*runif(NLEN, -1, 1)
## air
iair = round( tair[i]/dt )
if(iair>0 & iair<NLEN)
{
x1[iair] = x1[iair]+ampair
}
cx1 = x1
AIRrec[,i ] = cx1
}
AIRrec = sigconv(AIRrec, nwave)
######################################################### ground roll
n = 1
GRrec = matrix(ncol=Ntrace , nrow=NLEN)
for(i in 1:Ntrace)
{
x1 = rep(0, times=(NLEN))
zim = round( trefrac[n,i]/dt )
if(zim>0 & zim<NLEN & !is.na(zim))
{
x1[zim] = x1[zim]+amprefrac
}
cx1 = x1
grlen = floor(.6/dt)
fgr = 10
tape = applytaper( rep(1, grlen), p = 0.2)
tgr = seq(from=0, by=dt, length=grlen)
siggr = tape*sin(2*pi*fgr*tgr)
GRrec[,i ] = cx1
}
GRrec = sigconv(GRrec, siggr)
############################################ refractions
REFR = matrix(ncol=Ntrace , nrow=NLEN)
for(i in 1:Ntrace)
{
x1 = rep(0, times=(NLEN))
for(n in 2:klay)
{
zim = round( trefrac[n,i]/dt )
if(zim>0 & zim<NLEN & !is.na(zim))
{
x1[zim] = x1[zim]+amprefrac
}
}
cx1 = x1
REFR[,i ] = cx1
}
REFR = sigconv(REFR , nwave)
################################## reflections
REFL = matrix(ncol=Ntrace , nrow=NLEN)
for(i in 1:Ntrace)
{
x1 = rep(0, times=(NLEN))
for(n in 1:klay)
{
zim = round( treflex[n,i]/dt )
if(zim>0 & zim<NLEN & !is.na(zim))
{
x1[zim] = x1[zim]+ampreflex
}
}
cx1 = x1
REFL[,i ] = cx1
}
REFL = sigconv(REFL , nwave)
smograms = REFL + REFR +GRrec +AIRrec
if(PLOT==TRUE) wiggleimage(smograms , dt=(-dt), dx=x)
THEORY = list(trefrac=trefrac, treflex=treflex, tair=tair, velair=velair, mod=m)
dx=x[2]-x[1]
invisible(list( smograms = smograms, dt=dt, x=x, dx=xspace , REFL=REFL, REFR=REFR, GRrec=GRrec , AIRrec=AIRrec , THEORY = THEORY))
}
| /R/symshot1.R | no_license | cran/RSEIS | R | false | false | 5,758 | r | symshot1<-function(PLOT=FALSE)
{
if(missing(PLOT)) { PLOIT = FALSE }
nly=6
ioff=3
xdistance = 6.0
iskip=2
xspace= .1
dt=.004
maxtrc = 60
velair = .350
ampscal = 1.0
ampnois = .2
ampnois = 0.01
ampreflex = 0.1
amprefrac = 0.1
ampair = 0.1
rickfreq = 25
ricklen = 35
tlen = 2.4
tracelen=600
wavelen=50
GRLoc=list()
GRLoc$x=c(6.01078415186,2.77768081931)
GRLoc$y=c(-1.51478902016,-2.38634599907)
GRLslope1 = GRLoc$y[1]/ GRLoc$x[1]
GRLslope2 = GRLoc$y[2]/ GRLoc$x[2]
###### abline(0, GRLoc$y[1]/ GRLoc$x[1], col='blue')
###### abline(0, GRLoc$y[2]/ GRLoc$x[2], col='blue')
########## m is the velocity model
m=matrix(c(0.0, 3.0,
0.3, 4.0,
1.0, 6.5,
2.0, 7.0,
4.5, 8.5,
7.5, 9.5,
50.0, 10), ncol=2, byrow=TRUE)
y = m[1:6,1]
v = m[1:6 ,2]
x1 = rep(0, length(y))
x2 = rep(1, length(y))
x = seq(from=ioff*xspace, to=xdistance, by=xspace)
t2tim = seq(from=0, to=tlen, by=dt)
NLEN = length(t2tim)
Ntrace = length(x)
tair = x/velair
klay = 3
h = diff(y)
xcrit = vector()
trefrac = matrix(ncol=length(x), nrow=klay)
treflex = matrix(ncol=length(x), nrow=3)
xcrit = vector(length=nly)
top = y
vel = v
xc = 0
for( i in 1:(nly-1) )
{
xc = xc +2*h[i]* sqrt( (v[i+1]/v[i])^2 - 1 )
xcrit[i] = xc
}
############ Calculate the theoretical arrival times based on geometry
############
############ refractions
for( n in 1:klay)
{
vn = v[n]
i = seq(from=1, to=n, by=1)
thetai = asin(v[i]/vn)
tn = sum(2*h[i]*cos(thetai)/v[i])
trefrac[n,] = x/v[n] + tn
if(n>1) trefrac[n, x<xcrit[n-1] ] =NA
}
############ calculate reflections
k = 0
for(n in 3:(nly-1))
{
k = k+1
Delt0 = h[1:n]/v[1:length(h[1:n])]
vrms = sqrt( sum( v[1:n]^2*Delt0)/sum(Delt0) )
t0 = sum(Delt0[1:n])
tt = sqrt((x^2)/vrms^2 + t0^2)
## print(c(n, vrms, t0))
## print(range(tt))
##points(x, tt, col=n)
treflex[k,] = tt
}
############
############### set up wavelets:
############ ricker wavelet, shifted so it will be centered on the spike
wavelet = genrick(rickfreq,dt,ricklen)
klem = 11
### nwave = -1 + 2 * (wavelet - min(wavelet) )/diff(range(wavelet))
nwave = RPMG::RESCALE(wavelet, 0, 1, wavelet[1], max(wavelet))
shkip = length(nwave)/2
ones = rep(1, klem)
jspred = applytaper(ones, p = 0.5)
## plot(jspred)
nspred = 0.5+length(jspred)/2
reach = seq(from =1, by=shkip, length=klem)
############### ground roll:
################# this is a sinusoidal signal
grlen = floor(.6/dt)
fgr = 10
tape = applytaper( rep(1, grlen), p = 0.2)
tgr = seq(from=0, by=dt, length=grlen)
siggr = tape*sin(2*pi*fgr*tgr)
## plot(tgr, siggr, type='l')
#######################################################################
#######################################################################
x1 = ampnois*runif(NLEN, -1, 1)
KL = length(nwave)
###
smograms = matrix(ncol=Ntrace , nrow=NLEN)
################################################# Air wave
AIRrec = matrix(ncol=Ntrace , nrow=NLEN)
for(i in 1:Ntrace)
{
x1 = rep(0, times=(NLEN))
## x1 = ampnois*runif(NLEN, -1, 1)
## air
iair = round( tair[i]/dt )
if(iair>0 & iair<NLEN)
{
x1[iair] = x1[iair]+ampair
}
cx1 = x1
AIRrec[,i ] = cx1
}
AIRrec = sigconv(AIRrec, nwave)
######################################################### ground roll
n = 1
GRrec = matrix(ncol=Ntrace , nrow=NLEN)
for(i in 1:Ntrace)
{
x1 = rep(0, times=(NLEN))
zim = round( trefrac[n,i]/dt )
if(zim>0 & zim<NLEN & !is.na(zim))
{
x1[zim] = x1[zim]+amprefrac
}
cx1 = x1
grlen = floor(.6/dt)
fgr = 10
tape = applytaper( rep(1, grlen), p = 0.2)
tgr = seq(from=0, by=dt, length=grlen)
siggr = tape*sin(2*pi*fgr*tgr)
GRrec[,i ] = cx1
}
GRrec = sigconv(GRrec, siggr)
############################################ refractions
REFR = matrix(ncol=Ntrace , nrow=NLEN)
for(i in 1:Ntrace)
{
x1 = rep(0, times=(NLEN))
for(n in 2:klay)
{
zim = round( trefrac[n,i]/dt )
if(zim>0 & zim<NLEN & !is.na(zim))
{
x1[zim] = x1[zim]+amprefrac
}
}
cx1 = x1
REFR[,i ] = cx1
}
REFR = sigconv(REFR , nwave)
################################## reflections
REFL = matrix(ncol=Ntrace , nrow=NLEN)
for(i in 1:Ntrace)
{
x1 = rep(0, times=(NLEN))
for(n in 1:klay)
{
zim = round( treflex[n,i]/dt )
if(zim>0 & zim<NLEN & !is.na(zim))
{
x1[zim] = x1[zim]+ampreflex
}
}
cx1 = x1
REFL[,i ] = cx1
}
REFL = sigconv(REFL , nwave)
smograms = REFL + REFR +GRrec +AIRrec
if(PLOT==TRUE) wiggleimage(smograms , dt=(-dt), dx=x)
THEORY = list(trefrac=trefrac, treflex=treflex, tair=tair, velair=velair, mod=m)
dx=x[2]-x[1]
invisible(list( smograms = smograms, dt=dt, x=x, dx=xspace , REFL=REFL, REFR=REFR, GRrec=GRrec , AIRrec=AIRrec , THEORY = THEORY))
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/phase_diff.R
\name{phase_diff}
\alias{phase_diff}
\title{phase_diff}
\usage{
phase_diff(x1, x2, lowlim = 0, highlim = 360)
}
\arguments{
\item{x1, x2}{x coordinates to calculate phase difference between}
\item{lowlim, highlim}{maximum and minimum expected x values. Defaults apply to degrees in a circle}
}
\value{
Returns a vector of values (between -0.5 and 0.5) indicating the phase difference between two x coordinates
}
\description{
\code{phase_diff} calcultes the phase difference between two x coordinates
}
\examples{
x1 <- seq(0, 360, 360/4)
x2 <- 180
phase_diff(x1, x2)
}
| /man/phase_diff.Rd | permissive | abeith/eyeCleanR | R | false | true | 666 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/phase_diff.R
\name{phase_diff}
\alias{phase_diff}
\title{phase_diff}
\usage{
phase_diff(x1, x2, lowlim = 0, highlim = 360)
}
\arguments{
\item{x1, x2}{x coordinates to calculate phase difference between}
\item{lowlim, highlim}{maximum and minimum expected x values. Defaults apply to degrees in a circle}
}
\value{
Returns a vector of values (between -0.5 and 0.5) indicating the phase difference between two x coordinates
}
\description{
\code{phase_diff} calcultes the phase difference between two x coordinates
}
\examples{
x1 <- seq(0, 360, 360/4)
x2 <- 180
phase_diff(x1, x2)
}
|
test_that("record", {
withr::local_options(asciicast_typing_speed = 0)
hello <- system.file(package = "asciicast", "examples", "hello.R")
cast <- record(hello, interactive = FALSE)
expect_snapshot(cast$output$data)
})
test_that("errors", {
withr::local_options(asciicast_typing_speed = 0)
cast1 <- record(textConnection("foo12313\nbarsdsdfsdf\n"))
expect_snapshot(cast1$output$data)
})
test_that("R quits", {
withr::local_options(asciicast_typing_speed = 0)
cast <- record(textConnection("quit('no')\n"))
expect_snapshot(cast$output$data)
})
test_that("R crashes", {
# TODO: why does this fail?
if (!is_embedded()) {
skip("Fails on non-embedded R")
}
withr::local_options(asciicast_typing_speed = 0)
cast <- record(textConnection("callr:::crash()\n"), interactive = FALSE)
expect_snapshot(cast$output$data, variant = os_arch())
})
test_that("incomplete expression", {
withr::local_options(asciicast_typing_speed = 0)
expect_error(
record(textConnection("1 + (\n")),
"Incomplete asciicast expression"
)
})
test_that("incomplete expression allowed", {
withr::local_options(asciicast_typing_speed = 0)
expect_silent(
record(textConnection("1 + (\n"), incomplete_error = FALSE)
)
})
test_that("timeout", {
withr::local_options(asciicast_typing_speed = 0)
expect_error(
record(textConnection("Sys.sleep(1)\n"), timeout = 0.1),
"asciicast timeout after line"
)
})
test_that("echo = FALSE", {
withr::local_options(asciicast_typing_speed = 0)
hello <- system.file(package = "asciicast", "examples", "hello.R")
cast <- record(hello, interactive = FALSE, echo = FALSE)
expect_snapshot(cast$output$data)
})
test_that("speed", {
hello <- system.file(package = "asciicast", "examples", "hello.R")
cast1 <- record(hello)
cast2 <- record(hello, speed = 10)
expect_true(
utils::tail(cast2$output$time, 1) < utils::tail(cast1$output$time, 1) /2
)
})
test_that("subprocess fails", {
mockery::stub(asciicast_start_process, "processx::poll", list("timeout"))
expect_error(
asciicast_start_process(),
"subprocess did not connect back"
)
})
test_that("startup crashes", {
# TODO: why does this fail?
if (!is_embedded()) {
skip("Fails on non-embedded R")
}
expect_error(
asciicast_start_process(
startup = quote(callr:::crash()),
interactive = FALSE
),
"asciicast process exited while running"
)
})
test_that("cannot send input, buffer is full", {
skip_on_os("windows") # TODO
expect_error(
record(textConnection(strrep("1 + ", 100000))),
"Cannot send input, buffer is full"
)
})
test_that("shift", {
expect_equal(shift(character()), character())
expect_equal(shift("a"), "")
expect_equal(shift(letters), c(letters[-1], ""))
})
test_that("add_empty_wait", {
withr::local_options(asciicast_typing_speed = 0)
cast1 <- record(textConnection("1+1\n\n2+2\n"), empty_wait = 0)
cast2 <- record(textConnection("1+1\n\n2+2\n"), empty_wait = 5)
expect_true(
utils::tail(cast1$output$time, 1) < utils::tail(cast2$output$time, 1) - 3
)
})
test_that("adjust_typing_speed", {
withr::local_options(asciicast_typing_speed = 0)
cast1 <- record(textConnection("1+1\n\n2+2\n"), empty_wait = 0)
data <- cast1$output
data1 <- adjust_typing_speed(data, 0.05)
data2 <- adjust_typing_speed(data, 0.5)
expect_true(
utils::tail(data1$time, 1) < utils::tail(data2$time, 1) - 1
)
empty <- data[integer(), ]
expect_equal(adjust_typing_speed(empty, 0.05), empty)
})
test_that("find_rem error", {
mockery::stub(find_rem, "get_embedded", "")
expect_error(
find_rem(),
"Cannot find embedded R executable"
)
})
test_that("forced pause", {
cast <- record(c(
"#! --",
"1 + 1",
"#! --",
"2 + 2"
))
cmds <- grep("^type:", cast$output$data, value=TRUE)
expect_snapshot(cmds)
})
test_that("edge case with no wait", {
cast <- record(c(
"#! --",
"1 + 1"
), end_wait = 0)
cmds <- grep("^type:", cast$output$data, value=TRUE)
expect_snapshot(cmds)
})
| /tests/testthat/test-embed.R | permissive | r-lib/asciicast | R | false | false | 4,062 | r |
test_that("record", {
withr::local_options(asciicast_typing_speed = 0)
hello <- system.file(package = "asciicast", "examples", "hello.R")
cast <- record(hello, interactive = FALSE)
expect_snapshot(cast$output$data)
})
test_that("errors", {
withr::local_options(asciicast_typing_speed = 0)
cast1 <- record(textConnection("foo12313\nbarsdsdfsdf\n"))
expect_snapshot(cast1$output$data)
})
test_that("R quits", {
withr::local_options(asciicast_typing_speed = 0)
cast <- record(textConnection("quit('no')\n"))
expect_snapshot(cast$output$data)
})
test_that("R crashes", {
# TODO: why does this fail?
if (!is_embedded()) {
skip("Fails on non-embedded R")
}
withr::local_options(asciicast_typing_speed = 0)
cast <- record(textConnection("callr:::crash()\n"), interactive = FALSE)
expect_snapshot(cast$output$data, variant = os_arch())
})
test_that("incomplete expression", {
withr::local_options(asciicast_typing_speed = 0)
expect_error(
record(textConnection("1 + (\n")),
"Incomplete asciicast expression"
)
})
test_that("incomplete expression allowed", {
withr::local_options(asciicast_typing_speed = 0)
expect_silent(
record(textConnection("1 + (\n"), incomplete_error = FALSE)
)
})
test_that("timeout", {
withr::local_options(asciicast_typing_speed = 0)
expect_error(
record(textConnection("Sys.sleep(1)\n"), timeout = 0.1),
"asciicast timeout after line"
)
})
test_that("echo = FALSE", {
withr::local_options(asciicast_typing_speed = 0)
hello <- system.file(package = "asciicast", "examples", "hello.R")
cast <- record(hello, interactive = FALSE, echo = FALSE)
expect_snapshot(cast$output$data)
})
test_that("speed", {
hello <- system.file(package = "asciicast", "examples", "hello.R")
cast1 <- record(hello)
cast2 <- record(hello, speed = 10)
expect_true(
utils::tail(cast2$output$time, 1) < utils::tail(cast1$output$time, 1) /2
)
})
test_that("subprocess fails", {
mockery::stub(asciicast_start_process, "processx::poll", list("timeout"))
expect_error(
asciicast_start_process(),
"subprocess did not connect back"
)
})
test_that("startup crashes", {
# TODO: why does this fail?
if (!is_embedded()) {
skip("Fails on non-embedded R")
}
expect_error(
asciicast_start_process(
startup = quote(callr:::crash()),
interactive = FALSE
),
"asciicast process exited while running"
)
})
test_that("cannot send input, buffer is full", {
skip_on_os("windows") # TODO
expect_error(
record(textConnection(strrep("1 + ", 100000))),
"Cannot send input, buffer is full"
)
})
test_that("shift", {
expect_equal(shift(character()), character())
expect_equal(shift("a"), "")
expect_equal(shift(letters), c(letters[-1], ""))
})
test_that("add_empty_wait", {
withr::local_options(asciicast_typing_speed = 0)
cast1 <- record(textConnection("1+1\n\n2+2\n"), empty_wait = 0)
cast2 <- record(textConnection("1+1\n\n2+2\n"), empty_wait = 5)
expect_true(
utils::tail(cast1$output$time, 1) < utils::tail(cast2$output$time, 1) - 3
)
})
test_that("adjust_typing_speed", {
withr::local_options(asciicast_typing_speed = 0)
cast1 <- record(textConnection("1+1\n\n2+2\n"), empty_wait = 0)
data <- cast1$output
data1 <- adjust_typing_speed(data, 0.05)
data2 <- adjust_typing_speed(data, 0.5)
expect_true(
utils::tail(data1$time, 1) < utils::tail(data2$time, 1) - 1
)
empty <- data[integer(), ]
expect_equal(adjust_typing_speed(empty, 0.05), empty)
})
test_that("find_rem error", {
mockery::stub(find_rem, "get_embedded", "")
expect_error(
find_rem(),
"Cannot find embedded R executable"
)
})
test_that("forced pause", {
cast <- record(c(
"#! --",
"1 + 1",
"#! --",
"2 + 2"
))
cmds <- grep("^type:", cast$output$data, value=TRUE)
expect_snapshot(cmds)
})
test_that("edge case with no wait", {
cast <- record(c(
"#! --",
"1 + 1"
), end_wait = 0)
cmds <- grep("^type:", cast$output$data, value=TRUE)
expect_snapshot(cmds)
})
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/summary.R
\name{summary}
\alias{summary}
\title{Calculate summary statistics for 'mobility.model' class}
\usage{
summary(object, probs, ac_lags, ...)
}
\arguments{
\item{object}{a \code{mobility.model} object (can also accept a \code{\link[coda:mcmc.list]{mcmc.list}} object)}
\item{probs}{numeric vector giving the quantiles to calculate for each parameter (default = \code{c(0.025, 0.5, 0.975)})}
\item{ac_lags}{numeric vector of lags over which to calculate autocorrelation of samples within chains (default = \code{c(2,5,10)})}
\item{...}{further arguments passed to or from other methods}
}
\value{
a dataframe with summary statistics
}
\description{
This is a wrapper function of \code{\link[MCMCvis:MCMCsummary]{MCMCsummary}} that calculates summary statistics for each
parameter in a \code{mobility.model} object. Summary statistics are calculated for all parameters across
each chain along with convergance diagnosics like the Gelman-Rubin convergence diagnostic and (Rhat) and samples
auto-correlation foreach parameter. If the model object contains deviance and penalty parameters, then Deviance Information
Criterion (DIC) is calculated and appended to the summary.
}
\examples{
mod <- mobility(data=mobility_matrices, model='gravity', type='transport', DIC=TRUE)
summary(mod)
}
\seealso{
Other model:
\code{\link{check}()},
\code{\link{compare}()},
\code{\link{fit_jags}()},
\code{\link{fit_prob_travel}()},
\code{\link{mobility}()},
\code{\link{predict}()},
\code{\link{residuals}()}
}
\author{
John Giles
}
\concept{model}
| /man/summary.Rd | no_license | COVID-19-Mobility-Data-Network/mobility | R | false | true | 1,620 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/summary.R
\name{summary}
\alias{summary}
\title{Calculate summary statistics for 'mobility.model' class}
\usage{
summary(object, probs, ac_lags, ...)
}
\arguments{
\item{object}{a \code{mobility.model} object (can also accept a \code{\link[coda:mcmc.list]{mcmc.list}} object)}
\item{probs}{numeric vector giving the quantiles to calculate for each parameter (default = \code{c(0.025, 0.5, 0.975)})}
\item{ac_lags}{numeric vector of lags over which to calculate autocorrelation of samples within chains (default = \code{c(2,5,10)})}
\item{...}{further arguments passed to or from other methods}
}
\value{
a dataframe with summary statistics
}
\description{
This is a wrapper function of \code{\link[MCMCvis:MCMCsummary]{MCMCsummary}} that calculates summary statistics for each
parameter in a \code{mobility.model} object. Summary statistics are calculated for all parameters across
each chain along with convergance diagnosics like the Gelman-Rubin convergence diagnostic and (Rhat) and samples
auto-correlation foreach parameter. If the model object contains deviance and penalty parameters, then Deviance Information
Criterion (DIC) is calculated and appended to the summary.
}
\examples{
mod <- mobility(data=mobility_matrices, model='gravity', type='transport', DIC=TRUE)
summary(mod)
}
\seealso{
Other model:
\code{\link{check}()},
\code{\link{compare}()},
\code{\link{fit_jags}()},
\code{\link{fit_prob_travel}()},
\code{\link{mobility}()},
\code{\link{predict}()},
\code{\link{residuals}()}
}
\author{
John Giles
}
\concept{model}
|
# Experiment to generate learning curves
library(methods)
library(RSSL)
library(createdatasets)
library(randomForest)
library(parallel)
set.seed(42)
setdatadir("data")
measures <- list("Error"=measure_error,
"Average Loss Test"=measure_losstest)
detectBatchCPUs <- function() {
ncores <- as.integer(Sys.getenv("SLURM_CPUS_PER_TASK"))
if (is.na(ncores)) {
ncores <- as.integer(Sys.getenv("SLURM_JOB_CPUS_PER_NODE"))
}
if (is.na(ncores)) {
ncores <- parallel::detectCores()
}
if (is.na(ncores)) {
stop("Can't detect number of cores.")
}
return(ncores)
}
datasets<-list("Haberman"=createHaberman(),
"Ionosphere"=createIonosphere(),
"Parkinsons"=createParkinsons(),
"Diabetes"=na.roughfix(createDiabetes()),
"Sonar"=createSonar(),
"SPECT"=createSPECT(),
"SPECTF"=createSPECTF(),
"Transfusion"=createTransfusion(),
"WDBC"=createWDBC(),
"Mammography"=na.roughfix(createMammographicMass()),
"Digit1"=createDigit1(),
"USPS"=createUSPS(),
"COIL2"=createCOIL2(),
"BCI"=createBCI(),
"g241c"=createG241C(),
"g241d"=createG241N())
models <- list("Haberman"=formula(Survival~.),
"Ionosphere"=formula(Return~.),
"Parkinsons"=formula(status~ . -subject -recording),
"Diabetes"=formula(Diabetes~.),
"Sonar"=formula(Label ~ .),
"SPECT"=formula(Diagnosis ~ .),
"SPECTF"=formula(Diagnosis ~ .),
"Transfusion"=formula(Donated ~ .),
"WDBC"=formula(Diagnosis ~ . -ID),
"Mammography"=formula(Severity ~ . -BIRADS),
"Digit1"=formula(Class ~ .),
"USPS"=formula(Class ~ .),
"COIL2"=formula(Class ~ .),
"BCI"=formula(Class ~ .),
"g241c"=formula(Class ~ .),
"g241d"=formula(Class ~ .))
classifiers <- list(
"Supervised" = function(X,y,X_u,y_u) {
LeastSquaresClassifier(X,y)
},
"Soft" = function(X,y,X_u,y_u) {
EMLeastSquaresClassifier(X,y,X_u, eps = 1e-8, method="block", objective="soft", max_iter=10000)
},
"Hard" = function(X,y,X_u,y_u) {
EMLeastSquaresClassifier(X,y,X_u, eps = 1e-8, method="block", objective="hard", max_iter=10000)
},
"Oracle"=function(X,y,X_u,y_u) {
LeastSquaresClassifier(rbind(X,X_u),unlist(list(y,y_u)),intercept=TRUE,x_center=TRUE,scale=FALSE)
}
)
lc <- LearningCurveSSL(models,datasets,
classifiers=classifiers,
measures=measures,
n_l="enough",repeats=1000,verbose=TRUE,
pre_scale = TRUE, pre_pca = TRUE,
low_level_cores = detectBatchCPUs(),sizes = 2^(1:10))
save(lc,file="R/learningcurves-enough.RData")
| /R/learningcurves-enough.R | no_license | jkrijthe/optimisticssl | R | false | false | 2,966 | r | # Experiment to generate learning curves
library(methods)
library(RSSL)
library(createdatasets)
library(randomForest)
library(parallel)
set.seed(42)
setdatadir("data")
measures <- list("Error"=measure_error,
"Average Loss Test"=measure_losstest)
detectBatchCPUs <- function() {
ncores <- as.integer(Sys.getenv("SLURM_CPUS_PER_TASK"))
if (is.na(ncores)) {
ncores <- as.integer(Sys.getenv("SLURM_JOB_CPUS_PER_NODE"))
}
if (is.na(ncores)) {
ncores <- parallel::detectCores()
}
if (is.na(ncores)) {
stop("Can't detect number of cores.")
}
return(ncores)
}
datasets<-list("Haberman"=createHaberman(),
"Ionosphere"=createIonosphere(),
"Parkinsons"=createParkinsons(),
"Diabetes"=na.roughfix(createDiabetes()),
"Sonar"=createSonar(),
"SPECT"=createSPECT(),
"SPECTF"=createSPECTF(),
"Transfusion"=createTransfusion(),
"WDBC"=createWDBC(),
"Mammography"=na.roughfix(createMammographicMass()),
"Digit1"=createDigit1(),
"USPS"=createUSPS(),
"COIL2"=createCOIL2(),
"BCI"=createBCI(),
"g241c"=createG241C(),
"g241d"=createG241N())
models <- list("Haberman"=formula(Survival~.),
"Ionosphere"=formula(Return~.),
"Parkinsons"=formula(status~ . -subject -recording),
"Diabetes"=formula(Diabetes~.),
"Sonar"=formula(Label ~ .),
"SPECT"=formula(Diagnosis ~ .),
"SPECTF"=formula(Diagnosis ~ .),
"Transfusion"=formula(Donated ~ .),
"WDBC"=formula(Diagnosis ~ . -ID),
"Mammography"=formula(Severity ~ . -BIRADS),
"Digit1"=formula(Class ~ .),
"USPS"=formula(Class ~ .),
"COIL2"=formula(Class ~ .),
"BCI"=formula(Class ~ .),
"g241c"=formula(Class ~ .),
"g241d"=formula(Class ~ .))
classifiers <- list(
"Supervised" = function(X,y,X_u,y_u) {
LeastSquaresClassifier(X,y)
},
"Soft" = function(X,y,X_u,y_u) {
EMLeastSquaresClassifier(X,y,X_u, eps = 1e-8, method="block", objective="soft", max_iter=10000)
},
"Hard" = function(X,y,X_u,y_u) {
EMLeastSquaresClassifier(X,y,X_u, eps = 1e-8, method="block", objective="hard", max_iter=10000)
},
"Oracle"=function(X,y,X_u,y_u) {
LeastSquaresClassifier(rbind(X,X_u),unlist(list(y,y_u)),intercept=TRUE,x_center=TRUE,scale=FALSE)
}
)
lc <- LearningCurveSSL(models,datasets,
classifiers=classifiers,
measures=measures,
n_l="enough",repeats=1000,verbose=TRUE,
pre_scale = TRUE, pre_pca = TRUE,
low_level_cores = detectBatchCPUs(),sizes = 2^(1:10))
save(lc,file="R/learningcurves-enough.RData")
|
#setwd("C:\\Users\\kelvinlim\\Desktop\\CAG")
#library(rJava)
#library(xlsxjars)
#library(openxlsx)
library(plyr)
#library(ggplot2)
library(lubridate)
library(igraph)
library(arules)
#library(datasets)
library(arulesViz)
#data=subset(data, select=c("From","To","Profile"))
#data=read.csv("CAG fake data2.csv")
#Vdata=read.csv("Vdata.csv")
# CHOSEN = "Movie Theatre"#"Ippudo Express"#"ZARA"#"Dnata Lounge"
# pfl = "Chinese, M, >64yrs"
#
#
# goPlot(CHOSEN,pfl)
goPlot<-function(CHOSEN,pfl)
{
data=read.csv("data/Biz Rule1.csv")
Vdata=read.csv("data/Shops_kel.csv")
str(data)
print(head(data))
data=data[data$Profile==pfl,][,c("From","To")]
rules.all <-apriori(data,
parameter= list(supp=0.00000000000000025, conf=0.00000000000025,minlen=2,target = "rules"))
rules.all <- subset(rules.all, subset = rhs %pin% "To")
## subsetting rules
rules.1 <- subset(rules.all, subset = lhs %pin% CHOSEN)
# if(length(rules.1)>3)
# rules.1 <- rules.1[1:3,]
ruledf = data.frame(
lhs = labels(lhs(rules.1))$elements,
rhs = labels(rhs(rules.1))$elements,
rules.1@quality)
ruledf$lhs<-substr(as.character(ruledf$lhs),7,nchar(as.character(ruledf$lhs))-1)
ruledf$rhs<-substr(as.character(ruledf$rhs),5,nchar(as.character(ruledf$rhs))-1)
l2<-ruledf$rhs
if(length(l2)>=3)
rules.2<-subset(rules.all, subset = lhs %pin% CHOSEN | lhs %pin% l2[1] | lhs %pin% l2[2] | lhs %pin% l2[3])
if(length(l2)==2)
rules.2<-subset(rules.all, subset = lhs %pin% CHOSEN | lhs %pin% l2[1] | lhs %pin% l2[2] )
if(length(l2)==1)
rules.2<-subset(rules.all, subset = lhs %pin% CHOSEN | lhs %pin% l2[1] )
ruledf = data.frame(
lhs = labels(lhs(rules.2))$elements,
rhs = labels(rhs(rules.2))$elements,
rules.2@quality)
ruledf$lhs<-substr(as.character(ruledf$lhs),7,nchar(as.character(ruledf$lhs))-1)
ruledf$rhs<-substr(as.character(ruledf$rhs),5,nchar(as.character(ruledf$rhs))-1)
g<-graph.data.frame(ruledf,directed=TRUE)
Vdata1=Vdata[Vdata$ShopName %in% V(g)$name,]
Vdata1=Vdata1[match(V(g)$name,Vdata1$ShopName),]
V(g)$color=as.character(Vdata1$Colour)
V(g)$pop=as.numeric(Vdata1$AvgTime)
#calculate edge distance
ruledf <- (merge(Vdata1[,c("ShopName","posX","posY")], ruledf, by.x = 'ShopName',by.y="lhs"))
colnames(ruledf)[1]<-"lhs"
colnames(ruledf)[2]<-"lhs.x"
colnames(ruledf)[3]<-"lhs.y"
ruledf <- (merge(Vdata1[,c("ShopName","posX","posY")], ruledf, by.x = 'ShopName',by.y="rhs"))
colnames(ruledf)[1]<-"rhs"
colnames(ruledf)[2]<-"rhs.x"
colnames(ruledf)[3]<-"rhs.y"
ruledf$distance=sqrt((ruledf$rhs.x-ruledf$lhs.x)^2+(ruledf$rhs.y-ruledf$lhs.y)^2)
#calculate edge duration
ruledf <- merge(Vdata1[,c("ShopName","AvgTime")], ruledf, by.x = 'ShopName',by.y="lhs")
colnames(ruledf)[1]<-"lhs"
colnames(ruledf)[2]<-"lhs.AvgTime"
ruledf <- merge(Vdata1[,c("ShopName","AvgTime")], ruledf, by.x = 'ShopName',by.y="rhs")
colnames(ruledf)[1]<-"rhs"
colnames(ruledf)[2]<-"rhs.AvgTime"
ruledf$duration=ruledf$lhs.AvgTime+ruledf$rhs.AvgTime
ruledf<-subset(ruledf,select=c( "lhs", "rhs" , "rhs.AvgTime" , "lhs.AvgTime", "rhs.x" ,"rhs.y" ,"lhs.x" , "lhs.y" ,"support", "confidence", "lift" ,"distance" ,"duration" ))
g<-graph.data.frame(ruledf,directed=TRUE)
#ruledf<-ruledf[1:30,]
## Trying this block here too. OK
Vdata1=Vdata[Vdata$ShopName %in% V(g)$name,]
Vdata1=Vdata1[match(V(g)$name,Vdata1$ShopName),]
V(g)$color=as.character(Vdata1$Colour)
V(g)$pop=as.numeric(Vdata1$AvgTime)
for(i in 1:length(V(g)))
{
if(V(g)$name[i]==CHOSEN)
V(g)$pshape[i]="rectangle"
if(V(g)$name[i]!=CHOSEN)
V(g)$pshape[i]="circle"
}
for(i in 1:length(V(g)))
{
if(V(g)$name[i]==CHOSEN)
V(g)$pweight[i]=2
if(V(g)$name[i]!=CHOSEN)
V(g)$pweight[i]=1
}
par(mai=c(0,0,0,0))
# E(g)$weight<-seq(1,15.5,by=0.5)
# V(g)$pop<-seq(1,19,by=0.5)
set.seed(190)
plot(g,
edge.width=((5-1)*(E(g)$support-min(E(g)$support))/(max(E(g)$support)-min(E(g)$support)))+1,
edge.arrow.size=0.45,
edge.color="pink",
edge.curved = F,
edge.label=paste0(round(E(g)$distance,0),"m"),
#edge.label=paste0(round(E(g)$support,4)*100,"%"),
edge.label.color="black",
#edge.label.dist = 0.2,
vertex.label = V(g)$name,
vertex.label.color = "black",
vertex.label.dist=0,
vertex.label.cex=V(g)$pweight,
vertex.label.font=20,
vertex.frame.color=V(g)$color,
vertex.color=V(g)$color,
vertex.shape = V(g)$pshape,
#vertex.size = V(g)$pop,
vertex.size = ((25-8)*(V(g)$pop-min(V(g)$pop))/(max(V(g)$pop)-min(V(g)$pop)))+8
)
print(V(g)$name)
} #end of function
# tkplot(g,
# #edge.width=c(1,1,1,1),
# edge.arrow.size=c(0.3),
# edge.color="pink",
# edge.curved = T,
# vertex.label = V(g)$name,
# vertex.label.color = "dark blue",
# vertex.label.dist=0,
# vertex.label.cex=0.88,
# #vertex.label.font=20,
# vertex.frame.color='yellow',
# vertex.label.color='black',
# vertex.color=c("yellow","orange","green"),
# vertex.shape = "rectangle",
# vertex.size = 10,
# vertex.size2 = 10 )
# E(g)$support*10
# ((E(g)$support-min(E(g)$support))/(max(E(g)$support)-min(E(g)$support)))/2
#
#
# g <- make_ring(10)
| /codes/CAG2.R | no_license | tohweizhong/CAG-Hackathon-2015 | R | false | false | 5,324 | r | #setwd("C:\\Users\\kelvinlim\\Desktop\\CAG")
#library(rJava)
#library(xlsxjars)
#library(openxlsx)
library(plyr)
#library(ggplot2)
library(lubridate)
library(igraph)
library(arules)
#library(datasets)
library(arulesViz)
#data=subset(data, select=c("From","To","Profile"))
#data=read.csv("CAG fake data2.csv")
#Vdata=read.csv("Vdata.csv")
# CHOSEN = "Movie Theatre"#"Ippudo Express"#"ZARA"#"Dnata Lounge"
# pfl = "Chinese, M, >64yrs"
#
#
# goPlot(CHOSEN,pfl)
goPlot<-function(CHOSEN,pfl)
{
data=read.csv("data/Biz Rule1.csv")
Vdata=read.csv("data/Shops_kel.csv")
str(data)
print(head(data))
data=data[data$Profile==pfl,][,c("From","To")]
rules.all <-apriori(data,
parameter= list(supp=0.00000000000000025, conf=0.00000000000025,minlen=2,target = "rules"))
rules.all <- subset(rules.all, subset = rhs %pin% "To")
## subsetting rules
rules.1 <- subset(rules.all, subset = lhs %pin% CHOSEN)
# if(length(rules.1)>3)
# rules.1 <- rules.1[1:3,]
ruledf = data.frame(
lhs = labels(lhs(rules.1))$elements,
rhs = labels(rhs(rules.1))$elements,
rules.1@quality)
ruledf$lhs<-substr(as.character(ruledf$lhs),7,nchar(as.character(ruledf$lhs))-1)
ruledf$rhs<-substr(as.character(ruledf$rhs),5,nchar(as.character(ruledf$rhs))-1)
l2<-ruledf$rhs
if(length(l2)>=3)
rules.2<-subset(rules.all, subset = lhs %pin% CHOSEN | lhs %pin% l2[1] | lhs %pin% l2[2] | lhs %pin% l2[3])
if(length(l2)==2)
rules.2<-subset(rules.all, subset = lhs %pin% CHOSEN | lhs %pin% l2[1] | lhs %pin% l2[2] )
if(length(l2)==1)
rules.2<-subset(rules.all, subset = lhs %pin% CHOSEN | lhs %pin% l2[1] )
ruledf = data.frame(
lhs = labels(lhs(rules.2))$elements,
rhs = labels(rhs(rules.2))$elements,
rules.2@quality)
ruledf$lhs<-substr(as.character(ruledf$lhs),7,nchar(as.character(ruledf$lhs))-1)
ruledf$rhs<-substr(as.character(ruledf$rhs),5,nchar(as.character(ruledf$rhs))-1)
g<-graph.data.frame(ruledf,directed=TRUE)
Vdata1=Vdata[Vdata$ShopName %in% V(g)$name,]
Vdata1=Vdata1[match(V(g)$name,Vdata1$ShopName),]
V(g)$color=as.character(Vdata1$Colour)
V(g)$pop=as.numeric(Vdata1$AvgTime)
#calculate edge distance
ruledf <- (merge(Vdata1[,c("ShopName","posX","posY")], ruledf, by.x = 'ShopName',by.y="lhs"))
colnames(ruledf)[1]<-"lhs"
colnames(ruledf)[2]<-"lhs.x"
colnames(ruledf)[3]<-"lhs.y"
ruledf <- (merge(Vdata1[,c("ShopName","posX","posY")], ruledf, by.x = 'ShopName',by.y="rhs"))
colnames(ruledf)[1]<-"rhs"
colnames(ruledf)[2]<-"rhs.x"
colnames(ruledf)[3]<-"rhs.y"
ruledf$distance=sqrt((ruledf$rhs.x-ruledf$lhs.x)^2+(ruledf$rhs.y-ruledf$lhs.y)^2)
#calculate edge duration
ruledf <- merge(Vdata1[,c("ShopName","AvgTime")], ruledf, by.x = 'ShopName',by.y="lhs")
colnames(ruledf)[1]<-"lhs"
colnames(ruledf)[2]<-"lhs.AvgTime"
ruledf <- merge(Vdata1[,c("ShopName","AvgTime")], ruledf, by.x = 'ShopName',by.y="rhs")
colnames(ruledf)[1]<-"rhs"
colnames(ruledf)[2]<-"rhs.AvgTime"
ruledf$duration=ruledf$lhs.AvgTime+ruledf$rhs.AvgTime
ruledf<-subset(ruledf,select=c( "lhs", "rhs" , "rhs.AvgTime" , "lhs.AvgTime", "rhs.x" ,"rhs.y" ,"lhs.x" , "lhs.y" ,"support", "confidence", "lift" ,"distance" ,"duration" ))
g<-graph.data.frame(ruledf,directed=TRUE)
#ruledf<-ruledf[1:30,]
## Trying this block here too. OK
Vdata1=Vdata[Vdata$ShopName %in% V(g)$name,]
Vdata1=Vdata1[match(V(g)$name,Vdata1$ShopName),]
V(g)$color=as.character(Vdata1$Colour)
V(g)$pop=as.numeric(Vdata1$AvgTime)
for(i in 1:length(V(g)))
{
if(V(g)$name[i]==CHOSEN)
V(g)$pshape[i]="rectangle"
if(V(g)$name[i]!=CHOSEN)
V(g)$pshape[i]="circle"
}
for(i in 1:length(V(g)))
{
if(V(g)$name[i]==CHOSEN)
V(g)$pweight[i]=2
if(V(g)$name[i]!=CHOSEN)
V(g)$pweight[i]=1
}
par(mai=c(0,0,0,0))
# E(g)$weight<-seq(1,15.5,by=0.5)
# V(g)$pop<-seq(1,19,by=0.5)
set.seed(190)
plot(g,
edge.width=((5-1)*(E(g)$support-min(E(g)$support))/(max(E(g)$support)-min(E(g)$support)))+1,
edge.arrow.size=0.45,
edge.color="pink",
edge.curved = F,
edge.label=paste0(round(E(g)$distance,0),"m"),
#edge.label=paste0(round(E(g)$support,4)*100,"%"),
edge.label.color="black",
#edge.label.dist = 0.2,
vertex.label = V(g)$name,
vertex.label.color = "black",
vertex.label.dist=0,
vertex.label.cex=V(g)$pweight,
vertex.label.font=20,
vertex.frame.color=V(g)$color,
vertex.color=V(g)$color,
vertex.shape = V(g)$pshape,
#vertex.size = V(g)$pop,
vertex.size = ((25-8)*(V(g)$pop-min(V(g)$pop))/(max(V(g)$pop)-min(V(g)$pop)))+8
)
print(V(g)$name)
} #end of function
# tkplot(g,
# #edge.width=c(1,1,1,1),
# edge.arrow.size=c(0.3),
# edge.color="pink",
# edge.curved = T,
# vertex.label = V(g)$name,
# vertex.label.color = "dark blue",
# vertex.label.dist=0,
# vertex.label.cex=0.88,
# #vertex.label.font=20,
# vertex.frame.color='yellow',
# vertex.label.color='black',
# vertex.color=c("yellow","orange","green"),
# vertex.shape = "rectangle",
# vertex.size = 10,
# vertex.size2 = 10 )
# E(g)$support*10
# ((E(g)$support-min(E(g)$support))/(max(E(g)$support)-min(E(g)$support)))/2
#
#
# g <- make_ring(10)
|
#' @title Convert effect size d into OR
#' @name convert_d2or
#'
#' @description Compute effect size \code{OR} from effect size \code{d}.
#'
#' @param d The effect size \code{d}.
#' @param se The standard error of \code{d}. One of \code{se} or \code{v}
#' must be specified.
#' @param v The variance of \code{d}. One of \code{se} or \code{v} must be
#' specified.
#' @param es.type Type of effect size odds ratio that should be returned.
#' May be \code{es.type = "logit"} or \code{es.type = "cox"}
#' (see 'Details').
#' @param info String with information on the transformation. Used for the
#' print-method. Usually, this argument can be ignored
#'
#' @inheritParams esc_beta
#' @inheritParams hedges_g
#'
#' @note Effect size is returned as \code{exp(log_values)} (odds ratio),
#' confidence intervals are also exponentiated. To get the log-values,
#' use \code{\link{convert_d2logit}}.
#' \strong{However}, variance and standard error of this function
#' are returned on the log-scale!
#'
#' @details Conversion from \code{d} to odds ratios can be done with two
#' methods:
#' \describe{
#' \item{\code{es.type = "logit"}}{uses the Hasselblad and Hedges logit method.}
#' \item{\code{es.type = "cox"}}{uses the modified logit method as proposed by Cox.
#' This method performs slightly better for rare or frequent events, i.e.
#' if the success rate is close to 0 or 1.}
#' }
#'
#' @return The effect size \code{es}, the standard error \code{se}, the variance
#' of the effect size \code{var}, the lower and upper confidence limits
#' \code{ci.lo} and \code{ci.hi}, the weight factor \code{w} and the
#' total sample size \code{totaln}.
#'
#' @references Lipsey MW, Wilson DB. 2001. Practical meta-analysis. Thousand Oaks, Calif: Sage Publications
#' \cr \cr
#' Wilson DB. 2016. Formulas Used by the "Practical Meta-Analysis Effect Size Calculator". Unpublished manuscript: George Mason University
#' \cr \cr
#' Cox DR. 1970. Analysis of binary data. New York: Chapman & Hall/CRC
#' \cr \cr
#' Hasselblad V, Hedges LV. 1995. Meta-analysis of screening and diagnostic tests. Psychological Bulletin 117(1): 167–178. \doi{10.1037/0033-2909.117.1.167}
#' \cr \cr
#' Borenstein M, Hedges LV, Higgins JPT, Rothstein HR. 2009. Introduction to Meta-Analysis. Chichester, West Sussex, UK: Wiley
#'
#' @examples
#' # d to odds ratio
#' convert_d2or(0.7, se = 0.5)
#' # odds ratio to d
#' convert_or2d(3.56, se = 0.91)
#'
#' @export
convert_d2or <- function(d, se, v, totaln,
es.type = c("logit", "cox"),
info = NULL, study = NULL) {
# match arguments
es.type <- match.arg(es.type)
# check if parameter are complete
if ((missing(se) || is.null(se) || is.na(se)) && (missing(v) || is.null(v) || is.na(v))) {
warning("Either `se` or `v` must be specified.", call. = F)
return(esc_generic(es = NA, v = NA, es.type = es.type, grp1n = NA, grp2n = NA, info = NA, study = NA))
}
# do we have se?
if (!missing(se) && !is.null(se) && !is.na(se)) v <- se ^ 2
# do we have total n?
if (missing(totaln) || is.na(totaln)) totaln <- NULL
# do we have a separate info string?
if (is.null(info)) {
info <- "effect size d to effect size OR"
if (es.type == "cox") info <- paste0(info, "(Cox)")
}
if (es.type == "logit") {
# Hasselblad and Hedges logit
es <- pi / sqrt(3) * d
v <- (pi ^ 2) / 3 * v
measure <- "or"
} else {
# Cox logit
es <- d * 1.65
v <- v / .367
measure <- "cox-or"
}
# return effect size d
structure(
class = c("esc", "convert_d2or"),
list(
es = exp(es),
se = sqrt(v),
var = v,
ci.lo = exp(lower_d(es, v)),
ci.hi = exp(upper_d(es, v)),
w = 1 / v,
totaln = totaln,
measure = measure,
info = info,
study = study
)
)
}
| /R/convert_d2or.R | no_license | cran/esc | R | false | false | 4,189 | r | #' @title Convert effect size d into OR
#' @name convert_d2or
#'
#' @description Compute effect size \code{OR} from effect size \code{d}.
#'
#' @param d The effect size \code{d}.
#' @param se The standard error of \code{d}. One of \code{se} or \code{v}
#' must be specified.
#' @param v The variance of \code{d}. One of \code{se} or \code{v} must be
#' specified.
#' @param es.type Type of effect size odds ratio that should be returned.
#' May be \code{es.type = "logit"} or \code{es.type = "cox"}
#' (see 'Details').
#' @param info String with information on the transformation. Used for the
#' print-method. Usually, this argument can be ignored
#'
#' @inheritParams esc_beta
#' @inheritParams hedges_g
#'
#' @note Effect size is returned as \code{exp(log_values)} (odds ratio),
#' confidence intervals are also exponentiated. To get the log-values,
#' use \code{\link{convert_d2logit}}.
#' \strong{However}, variance and standard error of this function
#' are returned on the log-scale!
#'
#' @details Conversion from \code{d} to odds ratios can be done with two
#' methods:
#' \describe{
#' \item{\code{es.type = "logit"}}{uses the Hasselblad and Hedges logit method.}
#' \item{\code{es.type = "cox"}}{uses the modified logit method as proposed by Cox.
#' This method performs slightly better for rare or frequent events, i.e.
#' if the success rate is close to 0 or 1.}
#' }
#'
#' @return The effect size \code{es}, the standard error \code{se}, the variance
#' of the effect size \code{var}, the lower and upper confidence limits
#' \code{ci.lo} and \code{ci.hi}, the weight factor \code{w} and the
#' total sample size \code{totaln}.
#'
#' @references Lipsey MW, Wilson DB. 2001. Practical meta-analysis. Thousand Oaks, Calif: Sage Publications
#' \cr \cr
#' Wilson DB. 2016. Formulas Used by the "Practical Meta-Analysis Effect Size Calculator". Unpublished manuscript: George Mason University
#' \cr \cr
#' Cox DR. 1970. Analysis of binary data. New York: Chapman & Hall/CRC
#' \cr \cr
#' Hasselblad V, Hedges LV. 1995. Meta-analysis of screening and diagnostic tests. Psychological Bulletin 117(1): 167–178. \doi{10.1037/0033-2909.117.1.167}
#' \cr \cr
#' Borenstein M, Hedges LV, Higgins JPT, Rothstein HR. 2009. Introduction to Meta-Analysis. Chichester, West Sussex, UK: Wiley
#'
#' @examples
#' # d to odds ratio
#' convert_d2or(0.7, se = 0.5)
#' # odds ratio to d
#' convert_or2d(3.56, se = 0.91)
#'
#' @export
convert_d2or <- function(d, se, v, totaln,
es.type = c("logit", "cox"),
info = NULL, study = NULL) {
# match arguments
es.type <- match.arg(es.type)
# check if parameter are complete
if ((missing(se) || is.null(se) || is.na(se)) && (missing(v) || is.null(v) || is.na(v))) {
warning("Either `se` or `v` must be specified.", call. = F)
return(esc_generic(es = NA, v = NA, es.type = es.type, grp1n = NA, grp2n = NA, info = NA, study = NA))
}
# do we have se?
if (!missing(se) && !is.null(se) && !is.na(se)) v <- se ^ 2
# do we have total n?
if (missing(totaln) || is.na(totaln)) totaln <- NULL
# do we have a separate info string?
if (is.null(info)) {
info <- "effect size d to effect size OR"
if (es.type == "cox") info <- paste0(info, "(Cox)")
}
if (es.type == "logit") {
# Hasselblad and Hedges logit
es <- pi / sqrt(3) * d
v <- (pi ^ 2) / 3 * v
measure <- "or"
} else {
# Cox logit
es <- d * 1.65
v <- v / .367
measure <- "cox-or"
}
# return effect size d
structure(
class = c("esc", "convert_d2or"),
list(
es = exp(es),
se = sqrt(v),
var = v,
ci.lo = exp(lower_d(es, v)),
ci.hi = exp(upper_d(es, v)),
w = 1 / v,
totaln = totaln,
measure = measure,
info = info,
study = study
)
)
}
|
library(stringr)
regularizeString <- function(x){
str_replace_all(x, "\\s+"," ") %>%
str_trim()
}
regularize <- function(data){
charcols <- which(sapply(data, class) == "character")
for(c in charcols){
data[[c]] <- regularizeString(data[[c]])
}
data
}
| /regularize.R | no_license | Peder2911/Relationizer | R | false | false | 283 | r |
library(stringr)
regularizeString <- function(x){
str_replace_all(x, "\\s+"," ") %>%
str_trim()
}
regularize <- function(data){
charcols <- which(sapply(data, class) == "character")
for(c in charcols){
data[[c]] <- regularizeString(data[[c]])
}
data
}
|
# # <---- subset data to the pheno pair
# import sample labels
samples <- read.table("data/metadata.txt", sep = ",", header = TRUE)
# expression
exprs <- read.table("data/expression_vals.txt", sep=",", header = TRUE)
# hi-c data
hic <- read.table("data/hic_vals.txt", sep=",", header = TRUE)
# Extract parameters
exprs_pair <- exprs
methyl_pair <- hic
pheno <- samples$SP
N <- ncol(exprs_pair)
ngenes <- nrow(exprs_pair)
# <---- Model 1: exprs ~ pheno
# specify pheno coding
design_1 <- model.matrix(~pheno)
exprs_counts <- 2^exprs_pair
exprs_voom <- voom(exprs_counts, design=design_1, normalize.method = "none")
model_1 <- lmFit(exprs_voom, design_1)
model_1 <- eBayes(model_1)
# <---- Model 3: exprs corrected for methylation ~ pheno
# specify design matrix
resid_exprs <- array(0, dim = dim(exprs_pair))
for (index in 1:nrow(exprs_pair)){
resid_exprs[index,] <- lm(t(exprs_pair[index, ]) ~ t(methyl_pair[index, ]))$resid
}
rownames(resid_exprs) <- rownames(exprs_pair)
model_3 <- lmFit(resid_exprs, design_1)
# get effect sizes
beta1 <- coef(model_1[,2])
beta3 <- coef(model_3[,2])
se_beta1 <- se_beta2 <- se_beta3 <- cov_beta13 <- vector("numeric", ngenes)
# <---- get variances of beta1^S pheno effect
se_beta1 <- model_1$sigma*sqrt((solve(t(design_1)%*%design_1))[2,2])
# checking the validity of the results
head(cbind(se_beta1,model_1$sigma*sqrt(model_1$cov.coefficients[2,2])))
# <---- get variances of beta2 methylation effect
for (g in 1:length(se_beta2)) {
design_2g <- model.matrix(~ as.numeric(methyl_pair[g,]))
sigma_g <- model_1$sigma[g]
se_beta2[index] <- sigma_g*sqrt((solve(t(design_2g)%*%design_2g))[2,2])
}
# <---- get variances of beta3 pheno effect
A <- solve(t(design_1)%*%design_1)%*%t(design_1)
contr.vector <- array(c(0,1), dim=c(2,1))
# compute beta3 by hand
# can't directly apply limma because limma requires
# that sample covariates have the same values across genes
for (g in 1:length(se_beta3)) {
M_g <- t(methyl_pair[g,])
design_2g <- model.matrix(~ as.numeric(M_g))
A_2g <- solve(t(design_2g)%*%design_2g)%*%t(design_2g)
sigma_g <- model_1$sigma[g]
var_beta2g <- (se_beta2^2)[g]
var_part1 <- (se_beta1[g])^2
var_part2 <- ( A%*%M_g%*%var_beta2g%*%t(M_g)%*%t(A) )[2,2]
var_part3 <- ( 2*(sigma_g^2)*A%*%t(A_2g)%*%contr.vector%*%t(M_g)%*%t(A) )[2,2]
se_beta3[g] <- sqrt(var_part1 + var_part2 + var_part3)
}
# cov(beta1,beta3)
for (g in 1:length(cov_beta13)) {
M_g <- t(methyl_pair[g,])
design_2g <- model.matrix(~ as.numeric(M_g))
A_2g <- solve(t(design_2g)%*%design_2g)%*%t(design_2g)
sigma_g <- model_1$sigma[g]
var_part1 <- (se_beta1[g])^2
cov_beta13[g] <- var_part1 - (sigma_g^2)*((A %*% t(A_2g) %*%contr.vector%*%t(M_g)%*%t(A))[2,2])
}
cov_diff_sqrt <- sqrt(se_beta1^2 + se_beta3^2 - 2*cov_beta13)
beta_diff <- beta1-beta3
# putting results in a list
# beta_diff: differences between species effect on expression
# and species effect on expression data after regressing out hi-C
df_reg <- list(beta_diff=beta_diff, cov_diff_sqrt=cov_diff_sqrt)
# applying ash
# df: degrees of freedom = total number of samples - 2
ash_reg <- ash(as.vector(beta_diff), cov_diff_sqrt, df= length(pheno)-2)
# save(df_reg, ash_reg,
# reg3, reg1,
# file="output/sobeltest.Rmd/results-reg.rda")
par(mfrow=c(2,2), mar=c(4,4,4,1))
plot(x=df_reg$beta_diff,y=df_reg$cov_diff_sqrt, pch=16, cex=.6,
xlab="Effect size", ylab="Standard error",
main = "Effect size vs SE",
col=ifelse(ash_reg$result$svalue<.05, "red", "black"))
abline(v=0, lty=2, col="grey20")
plot(x=df_reg$beta_diff, y=ash_reg$result$PosteriorMean, pch=16, cex=.6,
xlab="Effect size", ylab="Posterior mean",
main = "Effect size vs Posterior Mean",
col=ifelse(ash_reg$result$svalue<.05, "red", "black"))
plot(x=df_reg$cov_diff_sqrt, y=ash_reg$result$PosteriorSD, pch=16, cex=.6,
xlab="Standard error", ylab="Posterior standard deviation",
main = "SE vs Posterior SD",
col=ifelse(ash_reg$result$svalue<.05, "red", "black"))
plot(x=df_reg$beta_diff, y=-log10(ash_reg$result$svalue), pch=16, cex=.6,
xlab="Effect size", ylab="-log10(s-value)",
main = "Effect size vs -log10(s-value)",
col=ifelse(ash_reg$result$svalue<.05, "red", "black"))
| /code/covariate-test-ittai.R | no_license | jhsiao999/mediation | R | false | false | 4,249 | r | # # <---- subset data to the pheno pair
# import sample labels
samples <- read.table("data/metadata.txt", sep = ",", header = TRUE)
# expression
exprs <- read.table("data/expression_vals.txt", sep=",", header = TRUE)
# hi-c data
hic <- read.table("data/hic_vals.txt", sep=",", header = TRUE)
# Extract parameters
exprs_pair <- exprs
methyl_pair <- hic
pheno <- samples$SP
N <- ncol(exprs_pair)
ngenes <- nrow(exprs_pair)
# <---- Model 1: exprs ~ pheno
# specify pheno coding
design_1 <- model.matrix(~pheno)
exprs_counts <- 2^exprs_pair
exprs_voom <- voom(exprs_counts, design=design_1, normalize.method = "none")
model_1 <- lmFit(exprs_voom, design_1)
model_1 <- eBayes(model_1)
# <---- Model 3: exprs corrected for methylation ~ pheno
# specify design matrix
resid_exprs <- array(0, dim = dim(exprs_pair))
for (index in 1:nrow(exprs_pair)){
resid_exprs[index,] <- lm(t(exprs_pair[index, ]) ~ t(methyl_pair[index, ]))$resid
}
rownames(resid_exprs) <- rownames(exprs_pair)
model_3 <- lmFit(resid_exprs, design_1)
# get effect sizes
beta1 <- coef(model_1[,2])
beta3 <- coef(model_3[,2])
se_beta1 <- se_beta2 <- se_beta3 <- cov_beta13 <- vector("numeric", ngenes)
# <---- get variances of beta1^S pheno effect
se_beta1 <- model_1$sigma*sqrt((solve(t(design_1)%*%design_1))[2,2])
# checking the validity of the results
head(cbind(se_beta1,model_1$sigma*sqrt(model_1$cov.coefficients[2,2])))
# <---- get variances of beta2 methylation effect
for (g in 1:length(se_beta2)) {
design_2g <- model.matrix(~ as.numeric(methyl_pair[g,]))
sigma_g <- model_1$sigma[g]
se_beta2[index] <- sigma_g*sqrt((solve(t(design_2g)%*%design_2g))[2,2])
}
# <---- get variances of beta3 pheno effect
A <- solve(t(design_1)%*%design_1)%*%t(design_1)
contr.vector <- array(c(0,1), dim=c(2,1))
# compute beta3 by hand
# can't directly apply limma because limma requires
# that sample covariates have the same values across genes
for (g in 1:length(se_beta3)) {
M_g <- t(methyl_pair[g,])
design_2g <- model.matrix(~ as.numeric(M_g))
A_2g <- solve(t(design_2g)%*%design_2g)%*%t(design_2g)
sigma_g <- model_1$sigma[g]
var_beta2g <- (se_beta2^2)[g]
var_part1 <- (se_beta1[g])^2
var_part2 <- ( A%*%M_g%*%var_beta2g%*%t(M_g)%*%t(A) )[2,2]
var_part3 <- ( 2*(sigma_g^2)*A%*%t(A_2g)%*%contr.vector%*%t(M_g)%*%t(A) )[2,2]
se_beta3[g] <- sqrt(var_part1 + var_part2 + var_part3)
}
# cov(beta1,beta3)
for (g in 1:length(cov_beta13)) {
M_g <- t(methyl_pair[g,])
design_2g <- model.matrix(~ as.numeric(M_g))
A_2g <- solve(t(design_2g)%*%design_2g)%*%t(design_2g)
sigma_g <- model_1$sigma[g]
var_part1 <- (se_beta1[g])^2
cov_beta13[g] <- var_part1 - (sigma_g^2)*((A %*% t(A_2g) %*%contr.vector%*%t(M_g)%*%t(A))[2,2])
}
cov_diff_sqrt <- sqrt(se_beta1^2 + se_beta3^2 - 2*cov_beta13)
beta_diff <- beta1-beta3
# putting results in a list
# beta_diff: differences between species effect on expression
# and species effect on expression data after regressing out hi-C
df_reg <- list(beta_diff=beta_diff, cov_diff_sqrt=cov_diff_sqrt)
# applying ash
# df: degrees of freedom = total number of samples - 2
ash_reg <- ash(as.vector(beta_diff), cov_diff_sqrt, df= length(pheno)-2)
# save(df_reg, ash_reg,
# reg3, reg1,
# file="output/sobeltest.Rmd/results-reg.rda")
par(mfrow=c(2,2), mar=c(4,4,4,1))
plot(x=df_reg$beta_diff,y=df_reg$cov_diff_sqrt, pch=16, cex=.6,
xlab="Effect size", ylab="Standard error",
main = "Effect size vs SE",
col=ifelse(ash_reg$result$svalue<.05, "red", "black"))
abline(v=0, lty=2, col="grey20")
plot(x=df_reg$beta_diff, y=ash_reg$result$PosteriorMean, pch=16, cex=.6,
xlab="Effect size", ylab="Posterior mean",
main = "Effect size vs Posterior Mean",
col=ifelse(ash_reg$result$svalue<.05, "red", "black"))
plot(x=df_reg$cov_diff_sqrt, y=ash_reg$result$PosteriorSD, pch=16, cex=.6,
xlab="Standard error", ylab="Posterior standard deviation",
main = "SE vs Posterior SD",
col=ifelse(ash_reg$result$svalue<.05, "red", "black"))
plot(x=df_reg$beta_diff, y=-log10(ash_reg$result$svalue), pch=16, cex=.6,
xlab="Effect size", ylab="-log10(s-value)",
main = "Effect size vs -log10(s-value)",
col=ifelse(ash_reg$result$svalue<.05, "red", "black"))
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/rne_capped.R
\name{get_T_rne_history}
\alias{get_T_rne_history}
\title{Get the intermediate steps in from t = T to t = 1 for
a T-RNE or capped RNE that has been solved with
\code{save.history = TRUE}}
\usage{
get_T_rne_history(g)
}
\description{
Get the intermediate steps in from t = T to t = 1 for
a T-RNE or capped RNE that has been solved with
\code{save.history = TRUE}
}
| /man/get_T_rne_history.Rd | no_license | skranz/RelationalContracts | R | false | true | 455 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/rne_capped.R
\name{get_T_rne_history}
\alias{get_T_rne_history}
\title{Get the intermediate steps in from t = T to t = 1 for
a T-RNE or capped RNE that has been solved with
\code{save.history = TRUE}}
\usage{
get_T_rne_history(g)
}
\description{
Get the intermediate steps in from t = T to t = 1 for
a T-RNE or capped RNE that has been solved with
\code{save.history = TRUE}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/class-read_cell_part.R
\name{read_cell_part-class}
\alias{read_cell_part-class}
\title{\code{read_cell_part} class}
\description{
The \code{read_cell_part} class is based on list,
created in order to store (possible) intermediate \code{\link[=read_cells]{read_cells}} output in detailed format.
\strong{Note:} This class is designed mainly for \emph{internal use}.
It is recommended to manipulate objects of this class only for cases where desired output is not coming.
}
\section{Properties of \code{read_cell_part}}{
Objects of class \code{read_cell_part} \strong{may} have following named nodes (the nodes may change based on stage):
\itemize{
\item \code{file_name} : file name which was given to process
\item \code{stage} : stage at which it was last processed.
\item \code{info} : a list containing further information on type and content of the file (provided the file got read)
\item \code{is_empty} : whether the file contains no 'known tabular information'
\item \code{cell_list} : list of \code{\link[=cell_df-class]{cell_df}}
(possibly after \code{Value Attribute Classification} done if stage is higher than \code{make_cells})
\item \code{cell_analysis_list} : list of \code{\link[=cell_analysis-class]{cell_analysis}}
\item \code{final_composition} : final composition (a \code{tibble}) with only main columns
(or all column if \code{compose_main_cols_only = FALSE})
\item \code{final_composition_main} : only appear if \code{compose_main_cols_only = FALSE}.
This holds final composition (a \code{tibble}) with only main columns
}
}
\section{Applicable methods on \code{read_cell_part}}{
\itemize{
\item \code{print}: Prints identifier of the class and the stage at which it is last processed.
}
}
\keyword{internal}
| /man/read_cell_part-class.Rd | permissive | dondealban/tidycells | R | false | true | 1,816 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/class-read_cell_part.R
\name{read_cell_part-class}
\alias{read_cell_part-class}
\title{\code{read_cell_part} class}
\description{
The \code{read_cell_part} class is based on list,
created in order to store (possible) intermediate \code{\link[=read_cells]{read_cells}} output in detailed format.
\strong{Note:} This class is designed mainly for \emph{internal use}.
It is recommended to manipulate objects of this class only for cases where desired output is not coming.
}
\section{Properties of \code{read_cell_part}}{
Objects of class \code{read_cell_part} \strong{may} have following named nodes (the nodes may change based on stage):
\itemize{
\item \code{file_name} : file name which was given to process
\item \code{stage} : stage at which it was last processed.
\item \code{info} : a list containing further information on type and content of the file (provided the file got read)
\item \code{is_empty} : whether the file contains no 'known tabular information'
\item \code{cell_list} : list of \code{\link[=cell_df-class]{cell_df}}
(possibly after \code{Value Attribute Classification} done if stage is higher than \code{make_cells})
\item \code{cell_analysis_list} : list of \code{\link[=cell_analysis-class]{cell_analysis}}
\item \code{final_composition} : final composition (a \code{tibble}) with only main columns
(or all column if \code{compose_main_cols_only = FALSE})
\item \code{final_composition_main} : only appear if \code{compose_main_cols_only = FALSE}.
This holds final composition (a \code{tibble}) with only main columns
}
}
\section{Applicable methods on \code{read_cell_part}}{
\itemize{
\item \code{print}: Prints identifier of the class and the stage at which it is last processed.
}
}
\keyword{internal}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/trialTime.R
\name{trialTime}
\alias{trialTime}
\title{Extracts trial times from raw .asc files}
\usage{
trialTime(data_list, maxtrial = 999, startFlag = "SYNCTIME",
endFlag = "DISPLAY OFF")
}
\arguments{
\item{data_list}{Input of data files to be processed. This can be specified in three ways:
\enumerate{
\item a directory that contains all the files (it will select all files ending with ".asc",
and order them by participant number, if present).
\item Directory to a txt file that contains all the .asc data file names inside:
e.g., data_list= "C:/My Data/data_list.txt".
In the .txt file, the directory for each .asc data file should appear on a separate row,
e.g.: C:/My Data/subject1.asc /n
C:/My Data/subject2.asc
\item A directory to a single .asc file: e.g., data_list= "C:/My Data/subject1.asc".
}}
\item{maxtrial}{Maximum number of trials in the experiment}
\item{startFlag}{The flag in the data that indicates the start of trial (default: SYNCTIME in Eyetrack/Eyelink)}
\item{endFlag}{The flag in the data that indicates the end of the trial (default: DISPLAY OFF in Eyetrack)}
}
\description{
Extracts trial times from raw .asc files
}
\author{
Martin R. Vasilev
}
| /man/trialTime.Rd | no_license | martin-vasilev/EMreading | R | false | true | 1,262 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/trialTime.R
\name{trialTime}
\alias{trialTime}
\title{Extracts trial times from raw .asc files}
\usage{
trialTime(data_list, maxtrial = 999, startFlag = "SYNCTIME",
endFlag = "DISPLAY OFF")
}
\arguments{
\item{data_list}{Input of data files to be processed. This can be specified in three ways:
\enumerate{
\item a directory that contains all the files (it will select all files ending with ".asc",
and order them by participant number, if present).
\item Directory to a txt file that contains all the .asc data file names inside:
e.g., data_list= "C:/My Data/data_list.txt".
In the .txt file, the directory for each .asc data file should appear on a separate row,
e.g.: C:/My Data/subject1.asc /n
C:/My Data/subject2.asc
\item A directory to a single .asc file: e.g., data_list= "C:/My Data/subject1.asc".
}}
\item{maxtrial}{Maximum number of trials in the experiment}
\item{startFlag}{The flag in the data that indicates the start of trial (default: SYNCTIME in Eyetrack/Eyelink)}
\item{endFlag}{The flag in the data that indicates the end of the trial (default: DISPLAY OFF in Eyetrack)}
}
\description{
Extracts trial times from raw .asc files
}
\author{
Martin R. Vasilev
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/surv.R
\name{surv_test}
\alias{surv_test}
\alias{lr_pval}
\alias{lr_text}
\alias{tt_pval}
\alias{tt_text}
\alias{hr_pval}
\alias{hr_text}
\alias{pw_pval}
\alias{pw_text}
\alias{c_text}
\alias{cc_pval}
\alias{cc_text}
\title{Survival curve tests}
\usage{
lr_pval(object, details = FALSE, data = NULL, ...)
lr_text(formula, data, rho = 0, ..., details = TRUE, pFUN = NULL)
tt_pval(object, details = FALSE, data = NULL, ...)
tt_text(formula, data, ..., details = TRUE, pFUN = NULL)
hr_pval(object, details = FALSE, data = NULL, ...)
hr_text(formula, data, ..., details = TRUE, pFUN = NULL)
pw_pval(object, details = FALSE, data = NULL, ..., method = "none")
pw_text(formula, data, ..., details = TRUE, pFUN = NULL, method = "none")
c_text(
formula,
data,
tau = NULL,
iter = 1000L,
seed = 1L,
digits = 2L,
conf = 0.95,
show_conf = TRUE,
details = FALSE
)
cc_pval(formula1, formula2, data, tau = NULL, iter = 1000L, seed = 1L)
cc_text(
formula1,
formula2,
data,
tau = NULL,
iter = 1000L,
seed = 1L,
digits = 2L,
conf = 0.95,
show_conf = TRUE,
details = TRUE
)
}
\arguments{
\item{object}{a \code{\link{survfit}}, \code{\link{survdiff}}, or
\code{\link{coxph}} object; alternatively a
\code{\link[=Surv]{survival formula}} in which case \code{data} must be
given}
\item{details}{logical; \code{TRUE} returns statistic, degrees of freedom,
and p-value where \code{FALSE} returns only a pvalue}
\item{formula, data, rho, ...}{passed to \code{\link{survdiff}} or
\code{\link{coxph}}}
\item{pFUN}{logical; if \code{TRUE}, p-values are formatted with
\code{\link{pvalr}}; if \code{FALSE}, no formatting is performed;
alternatively, a function can be passed which should take a numeric value
and return a character string (or a value to be coerced) for printing}
\item{method}{for \code{pw_*}, the method used to adjust p-values for
multiple comparisons (default is \code{"none"}); see
\code{\link{p.adjust.methods}}}
\item{tau, iter, seed}{arguments passed to \code{\link[survC1]{Inf.Cval}} or
\code{\link[survC1]{Inf.Cval.Delta}}}
\item{digits, conf, show_conf}{for \code{c_text} and \code{cc_text}, options
to control the text output}
\item{formula1, formula2}{for \code{cc_pva} and \code{cc_text}, the formulas
of the two models to compare}
}
\description{
Internal functions for \code{\link{survdiff}} and \code{\link{survfit}}
objects. Current methods include log-rank (\code{lr_*}) and pairwise
(\code{pw_*}) log-rank tests (by default although the exact test may be
controlled with the \code{rho} parameter passed to \code{survdiff}); a
trend test described by Tarone (\code{tt_*}); and Wald tests of
coefficients in a Cox regression (\code{hr_*}).
\code{*_pval} functions take (\code{survfit} or \code{survdiff}) objects
or formulas and compute test statistics, p-values, etc. and return a
numeric vector or list.
\code{*_text} functions format the test results for plotting and return
an expression or vector of character strings.
Note that \code{pw_pval} and \code{pw_text} do not support formulas
with more than one predictor, e.g., \code{y ~ a + b}. An equivalent
formula is acceptable, e.g., \code{y ~ x} where \code{x} is the
\code{interaction(a, b)} or similar. See \code{\link{survdiff_pairs}}
for more details and examples.
\code{c_text}, \code{cc_pval}, and \code{cc_text} are convenience functions
for \code{\link[survC1]{Inf.Cval}} and \code{\link[survC1]{Inf.Cval.Delta}}
to compute and compare c-statistics on censored data. \code{c_text} shows
c for a single model where \code{cc_text} and \code{cc_pval} compare two
models; see Uno (2011).
}
\examples{
\dontrun{
library('survival')
data('larynx', package = 'KMsurv')
larynx$stage <- factor(larynx$stage)
form <- Surv(time, delta) ~ stage
sf <- survfit(form, larynx)
sd <- survdiff(form, larynx)
kmplot(sf, lr_test = TRUE)
## log-rank
rawr:::lr_pval(sf)
rawr:::lr_pval(sd, TRUE)
rawr:::lr_text(Surv(time, delta) ~ stage, larynx)
## pairwise log-rank
sf$call$formula <- form
rawr:::pw_pval(sf)
rawr:::pw_text(sf)
## tarone trend
rawr:::tt_pval(sf)
rawr:::tt_pval(sd, TRUE)
rawr:::tt_text(Surv(time, delta) ~ stage, larynx)
## compare
chi <- coxph(Surv(time, delta) ~ stage, larynx)$score
list(chi = chi, p.value = pchisq(chi, 1, lower.tail = FALSE))
## hazard ratio/wald p-values
rawr:::hr_pval(sf)
rawr:::hr_pval(sd, TRUE)
rawr:::hr_text(Surv(time, delta) ~ stage, larynx)
## c-statistics for censored data (uno, 2011)
f1 <- Surv(time, delta) ~ stage
f2 <- Surv(time, delta) ~ stage + age
rawr:::c_text(f1, larynx)
rawr:::c_text(f2, larynx, details = TRUE)
rawr:::cc_text(f1, f2, larynx)
rawr:::cc_pval(f1, f2, larynx)
}
}
\references{
Tarone, Robert E. Tests for Trend in Life Table Analysis. \emph{Biometrika}
\strong{62} vol. 62 (Dec 1975), 679-82.
Tarone, see also:
\url{https://stat.ethz.ch/pipermail/r-help/2008-April/160209.html}
Uno, Hajime, et al. On the C-statistics for Evaluating Overall Adequacy of
Risk Prediction Procedures with Censored Survival Data. \emph{Statistics
in Medicine} \strong{30} vol. 10 (May 2011), 1105-17.
}
\seealso{
\code{\link{survdiff_pairs}}
}
| /man/surv_test.Rd | no_license | raredd/rawr | R | false | true | 5,198 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/surv.R
\name{surv_test}
\alias{surv_test}
\alias{lr_pval}
\alias{lr_text}
\alias{tt_pval}
\alias{tt_text}
\alias{hr_pval}
\alias{hr_text}
\alias{pw_pval}
\alias{pw_text}
\alias{c_text}
\alias{cc_pval}
\alias{cc_text}
\title{Survival curve tests}
\usage{
lr_pval(object, details = FALSE, data = NULL, ...)
lr_text(formula, data, rho = 0, ..., details = TRUE, pFUN = NULL)
tt_pval(object, details = FALSE, data = NULL, ...)
tt_text(formula, data, ..., details = TRUE, pFUN = NULL)
hr_pval(object, details = FALSE, data = NULL, ...)
hr_text(formula, data, ..., details = TRUE, pFUN = NULL)
pw_pval(object, details = FALSE, data = NULL, ..., method = "none")
pw_text(formula, data, ..., details = TRUE, pFUN = NULL, method = "none")
c_text(
formula,
data,
tau = NULL,
iter = 1000L,
seed = 1L,
digits = 2L,
conf = 0.95,
show_conf = TRUE,
details = FALSE
)
cc_pval(formula1, formula2, data, tau = NULL, iter = 1000L, seed = 1L)
cc_text(
formula1,
formula2,
data,
tau = NULL,
iter = 1000L,
seed = 1L,
digits = 2L,
conf = 0.95,
show_conf = TRUE,
details = TRUE
)
}
\arguments{
\item{object}{a \code{\link{survfit}}, \code{\link{survdiff}}, or
\code{\link{coxph}} object; alternatively a
\code{\link[=Surv]{survival formula}} in which case \code{data} must be
given}
\item{details}{logical; \code{TRUE} returns statistic, degrees of freedom,
and p-value where \code{FALSE} returns only a pvalue}
\item{formula, data, rho, ...}{passed to \code{\link{survdiff}} or
\code{\link{coxph}}}
\item{pFUN}{logical; if \code{TRUE}, p-values are formatted with
\code{\link{pvalr}}; if \code{FALSE}, no formatting is performed;
alternatively, a function can be passed which should take a numeric value
and return a character string (or a value to be coerced) for printing}
\item{method}{for \code{pw_*}, the method used to adjust p-values for
multiple comparisons (default is \code{"none"}); see
\code{\link{p.adjust.methods}}}
\item{tau, iter, seed}{arguments passed to \code{\link[survC1]{Inf.Cval}} or
\code{\link[survC1]{Inf.Cval.Delta}}}
\item{digits, conf, show_conf}{for \code{c_text} and \code{cc_text}, options
to control the text output}
\item{formula1, formula2}{for \code{cc_pva} and \code{cc_text}, the formulas
of the two models to compare}
}
\description{
Internal functions for \code{\link{survdiff}} and \code{\link{survfit}}
objects. Current methods include log-rank (\code{lr_*}) and pairwise
(\code{pw_*}) log-rank tests (by default although the exact test may be
controlled with the \code{rho} parameter passed to \code{survdiff}); a
trend test described by Tarone (\code{tt_*}); and Wald tests of
coefficients in a Cox regression (\code{hr_*}).
\code{*_pval} functions take (\code{survfit} or \code{survdiff}) objects
or formulas and compute test statistics, p-values, etc. and return a
numeric vector or list.
\code{*_text} functions format the test results for plotting and return
an expression or vector of character strings.
Note that \code{pw_pval} and \code{pw_text} do not support formulas
with more than one predictor, e.g., \code{y ~ a + b}. An equivalent
formula is acceptable, e.g., \code{y ~ x} where \code{x} is the
\code{interaction(a, b)} or similar. See \code{\link{survdiff_pairs}}
for more details and examples.
\code{c_text}, \code{cc_pval}, and \code{cc_text} are convenience functions
for \code{\link[survC1]{Inf.Cval}} and \code{\link[survC1]{Inf.Cval.Delta}}
to compute and compare c-statistics on censored data. \code{c_text} shows
c for a single model where \code{cc_text} and \code{cc_pval} compare two
models; see Uno (2011).
}
\examples{
\dontrun{
library('survival')
data('larynx', package = 'KMsurv')
larynx$stage <- factor(larynx$stage)
form <- Surv(time, delta) ~ stage
sf <- survfit(form, larynx)
sd <- survdiff(form, larynx)
kmplot(sf, lr_test = TRUE)
## log-rank
rawr:::lr_pval(sf)
rawr:::lr_pval(sd, TRUE)
rawr:::lr_text(Surv(time, delta) ~ stage, larynx)
## pairwise log-rank
sf$call$formula <- form
rawr:::pw_pval(sf)
rawr:::pw_text(sf)
## tarone trend
rawr:::tt_pval(sf)
rawr:::tt_pval(sd, TRUE)
rawr:::tt_text(Surv(time, delta) ~ stage, larynx)
## compare
chi <- coxph(Surv(time, delta) ~ stage, larynx)$score
list(chi = chi, p.value = pchisq(chi, 1, lower.tail = FALSE))
## hazard ratio/wald p-values
rawr:::hr_pval(sf)
rawr:::hr_pval(sd, TRUE)
rawr:::hr_text(Surv(time, delta) ~ stage, larynx)
## c-statistics for censored data (uno, 2011)
f1 <- Surv(time, delta) ~ stage
f2 <- Surv(time, delta) ~ stage + age
rawr:::c_text(f1, larynx)
rawr:::c_text(f2, larynx, details = TRUE)
rawr:::cc_text(f1, f2, larynx)
rawr:::cc_pval(f1, f2, larynx)
}
}
\references{
Tarone, Robert E. Tests for Trend in Life Table Analysis. \emph{Biometrika}
\strong{62} vol. 62 (Dec 1975), 679-82.
Tarone, see also:
\url{https://stat.ethz.ch/pipermail/r-help/2008-April/160209.html}
Uno, Hajime, et al. On the C-statistics for Evaluating Overall Adequacy of
Risk Prediction Procedures with Censored Survival Data. \emph{Statistics
in Medicine} \strong{30} vol. 10 (May 2011), 1105-17.
}
\seealso{
\code{\link{survdiff_pairs}}
}
|
library(here)
library(rmarkdown)
pasta <- "Rmds"
# arquivo <- "00_apresenta_curso"
# arquivo <- "01_um_ensaio_aleatorizado"
# arquivo <- "02_estrutura"
# arquivo <- "03_inferencia_causal_em_experimentos_aleatorizados"
# arquivo <- "05_entre_estudos_observacionais_e_experimentos"
arquivo <- "02_OficinaDAG_PPGEpi_SII"
# arquivo <- "02_OficinaDAG_PPGEpi"
# arquivo <- "03_OficinaDAG_PPGEpi"
arquivo_rmd <- paste0(arquivo, ".Rmd")
arquivo_pdf <- paste0(arquivo, ".pdf")
input <- here(pasta, arquivo_rmd)
arquivo_r <- paste0(arquivo, ".R")
rmarkdown::render(input = input,
output_format = beamer_presentation(theme = "AnnArbor",
highlight = "zenburn",
colortheme = "dolphin",
fonttheme = "structurebold",
includes = list(in_header = here("styles", "mystyle.tex")),
slide_level = 2,
keep_tex = FALSE,
fig_caption = FALSE),
output_file = arquivo_pdf,
output_dir = here("output_pdf"),
encoding = "UTF-8")
knitr::purl(input = input,
output = here("RScripts", arquivo_r),
documentation = 1,
encoding = "UTF-8")
| /RScripts/render_beamer.R | no_license | rdosreis/EPI13DINTER | R | false | false | 1,495 | r | library(here)
library(rmarkdown)
pasta <- "Rmds"
# arquivo <- "00_apresenta_curso"
# arquivo <- "01_um_ensaio_aleatorizado"
# arquivo <- "02_estrutura"
# arquivo <- "03_inferencia_causal_em_experimentos_aleatorizados"
# arquivo <- "05_entre_estudos_observacionais_e_experimentos"
arquivo <- "02_OficinaDAG_PPGEpi_SII"
# arquivo <- "02_OficinaDAG_PPGEpi"
# arquivo <- "03_OficinaDAG_PPGEpi"
arquivo_rmd <- paste0(arquivo, ".Rmd")
arquivo_pdf <- paste0(arquivo, ".pdf")
input <- here(pasta, arquivo_rmd)
arquivo_r <- paste0(arquivo, ".R")
rmarkdown::render(input = input,
output_format = beamer_presentation(theme = "AnnArbor",
highlight = "zenburn",
colortheme = "dolphin",
fonttheme = "structurebold",
includes = list(in_header = here("styles", "mystyle.tex")),
slide_level = 2,
keep_tex = FALSE,
fig_caption = FALSE),
output_file = arquivo_pdf,
output_dir = here("output_pdf"),
encoding = "UTF-8")
knitr::purl(input = input,
output = here("RScripts", arquivo_r),
documentation = 1,
encoding = "UTF-8")
|
testlist <- list(id = 0L, x = numeric(0), y = Inf)
result <- do.call(ggforce:::enclose_points,testlist)
str(result) | /ggforce/inst/testfiles/enclose_points/libFuzzer_enclose_points/enclose_points_valgrind_files/1610030280-test.R | no_license | akhikolla/updated-only-Issues | R | false | false | 115 | r | testlist <- list(id = 0L, x = numeric(0), y = Inf)
result <- do.call(ggforce:::enclose_points,testlist)
str(result) |
library(dplyr)
Compare <- read.csv("~/Compare.csv")
Compare
summary(filter(Compare,Change.in.Res.Pos.Rep.Rate>.05 ),Compare$Change.in.Res.Pos.Rep.Rate )
summary(filter(Compare,Change.in.TC.Rep.Rate>.05 ),Compare$Change.in.TC.Rep.Rate )
A=subset(Compare,Change.in.Res.Pos.Rep.Rate>.05 )
B=subset(Compare,Change.in.TC.Rep.Rate>.05 )
library(plotly)
library(ggplot2)
set.seed(1234)
attach(A)
qplot(A)
p=qplot(Change.in.Score, data=A, geom="density", fill=Change.in.Res.Pos.Rep.Rate, alpha=I(.5),
main="Distribution of Scores", xlab="Score",
ylab="Density")
p <- ggplotly(p)
p
attach(B)
qplot(B)
p=qplot(Change.in.Score, data=B, geom="density", fill=Change.in.TC.Rep.Rate, alpha=I(.5),
main="Distribution of Scores", xlab="Score",
ylab="Density")
p <- ggplotly(p)
p
| /Compare.R | no_license | Lan131/2017 | R | false | false | 816 | r | library(dplyr)
Compare <- read.csv("~/Compare.csv")
Compare
summary(filter(Compare,Change.in.Res.Pos.Rep.Rate>.05 ),Compare$Change.in.Res.Pos.Rep.Rate )
summary(filter(Compare,Change.in.TC.Rep.Rate>.05 ),Compare$Change.in.TC.Rep.Rate )
A=subset(Compare,Change.in.Res.Pos.Rep.Rate>.05 )
B=subset(Compare,Change.in.TC.Rep.Rate>.05 )
library(plotly)
library(ggplot2)
set.seed(1234)
attach(A)
qplot(A)
p=qplot(Change.in.Score, data=A, geom="density", fill=Change.in.Res.Pos.Rep.Rate, alpha=I(.5),
main="Distribution of Scores", xlab="Score",
ylab="Density")
p <- ggplotly(p)
p
attach(B)
qplot(B)
p=qplot(Change.in.Score, data=B, geom="density", fill=Change.in.TC.Rep.Rate, alpha=I(.5),
main="Distribution of Scores", xlab="Score",
ylab="Density")
p <- ggplotly(p)
p
|
# plotting immunogenecity profile for given sequence
# Keunwan Park, Kbio
library(ggplot2)
#library(gridExtra)
#library(RGraphics)
#library(cowplot)
options<-commandArgs(trailingOnly=T)
if(length(options) < 2) stop("Invalid argument number\n\nRscript .r [immunogenecity pred output] [out png file, only prefix!]\n")
if(!is.na(options[1])) input_dat=options[1]
if(!is.na(options[2])) out_png=options[2]
data <- read.table(input_dat,header=T)
#colnames(data) <- c("seq_id","aa","allele","score")
x_lab=vector()
x_idx=vector()
for(i in c(1:nrow(data)) ){
idx <- as.integer( as.numeric(data[i,1]) )
x_lab[ idx ] = paste(data[i,1],data[i,2],sep="")
x_idx[ idx ] = data[i,1]
}
print(x_idx)
score1=as.vector(matrix(0,1,length(x_lab)))
score5=as.vector(matrix(0,1,length(x_lab)))
score10=as.vector(matrix(0,1,length(x_lab)))
for(i in c(1:nrow(data)) ){
idx <- as.integer( as.numeric(data[i,1]) )
if(data[i,"score"] < 1){
score1[idx] = score1[idx] + 1
}
if(data[i,"score"] < 5){
score5[idx] = score5[idx] + 1
}
if(data[i,"score"] < 10){
score10[idx] = score10[idx] + 1
}
}
score1_class=as.vector(matrix(0,1,length(x_lab)))
score5_class=as.vector(matrix(0,1,length(x_lab)))
score10_class=as.vector(matrix(0,1,length(x_lab)))
## ---- CUT OFF for promiscuous binding -----------
Promiscuity_cut = 0.25 # %
## ------------------------------------------------
n_allele = length(levels(data[,"allele"]))
for(i in c(1:length(score1))){
if(score1[i]/n_allele > Promiscuity_cut){
score1_class[i] = "red"
}else{
score1_class[i] = "white"
}
if(score5[i]/n_allele > Promiscuity_cut){
score5_class[i] = "orange"
}else{
score5_class[i] = "white"
}
if(score10[i]/n_allele > Promiscuity_cut){
score10_class[i] = "yellow"
}else{
score10_class[i] = "white"
}
}
df1 <- data.frame( idx=x_idx, seq_id = x_lab, promiscuity = score1_class, value = score1, class="score1");
df5 <- data.frame( idx=x_idx, seq_id = x_lab, promiscuity = score5_class, value = score5, class="score5");
df10 <- data.frame( idx=x_idx, seq_id = x_lab, promiscuity = score10_class, value = score10, class="score10");
df <- rbind(df1,df5,df10)
base_size <- 9
col <- c("red","orange","yellow","white")
p1 <- ggplot(df, aes(class,idx)) +
geom_tile(aes(fill=df$promiscuity),colour="grey50",size=0.5) +
scale_fill_identity("Class", labels =c("Promiscuous1","Promiscuous5","Promiscuous10","X"), breaks = col, guide = "legend") +
#scale_fill_gradient(low = "white", high = "red",guide="legend")
theme_grey(base_size = base_size) +
labs(x = "", y = "", title = ">25%" ) +
coord_fixed(ratio=0.7) +
scale_y_discrete(expand = c(0, 0),limits=c(min(df$idx):max(df$idx)),labels=x_lab) +
#scale_y_discrete(expand = c(0, 0)) +
scale_x_discrete(expand = c(0, 0),labels=c("P1","P5","P10")) +
theme_bw() +
theme(plot.title = element_text(size = 10), panel.grid.major = element_blank(), panel.grid.minor = element_blank(), panel.border = element_rect(fill = NA, colour = "black", size = 1) ) +
#theme(legend.position = "none",axis.ticks = element_blank(), axis.text.x = element_text(size = base_size, angle = 0, hjust = 0, colour = "black"), axis.text.y = element_text(size = base_size*0.8, hjust = 1, colour = "grey50"))
theme(legend.position = "none",axis.ticks = element_blank(), axis.text.x = element_text(size = base_size, angle=300, hjust = 0, colour = "grey50"), axis.text.y = element_text(size = base_size*0.8, hjust = 1, colour = "grey50"))
ratio <- (length(x_lab)/200)
hei= as.integer(35*ratio)
#if(hei > 40){ hei = 39}
ggsave( file = paste(out_png,"_25.png",sep=""), height=hei,limitsize=FALSE )
### CUT 2, this is really stupid
## ---- CUT OFF for promiscuous binding -----------
Promiscuity_cut = 0.35 # %
## ------------------------------------------------
for(i in c(1:length(score1))){
if(score1[i]/n_allele > Promiscuity_cut){
score1_class[i] = "red"
}else{
score1_class[i] = "white"
}
if(score5[i]/n_allele > Promiscuity_cut){
score5_class[i] = "orange"
}else{
score5_class[i] = "white"
}
if(score10[i]/n_allele > Promiscuity_cut){
score10_class[i] = "yellow"
}else{
score10_class[i] = "white"
}
}
df1 <- data.frame( idx=x_idx, seq_id = x_lab, promiscuity = score1_class, value = score1, class="score1");
df5 <- data.frame( idx=x_idx, seq_id = x_lab, promiscuity = score5_class, value = score5, class="score5");
df10 <- data.frame( idx=x_idx, seq_id = x_lab, promiscuity = score10_class, value = score10, class="score10");
df <- rbind(df1,df5,df10)
p2 <- ggplot(df, aes(class,idx)) +
geom_tile(aes(fill=df$promiscuity),colour="grey50",size=0.5) +
scale_fill_identity("Class", labels =c("Promiscuous1","Promiscuous5","Promiscuous10","X"), breaks = col, guide = "legend") +
#scale_fill_gradient(low = "white", high = "red",guide="legend")
theme_grey(base_size = base_size) +
labs(x = "", y = "", title = ">35%" ) +
coord_fixed(ratio=0.7) +
scale_y_discrete(expand = c(0, 0),limits=c(min(df$idx):max(df$idx)),labels=x_lab) +
#scale_y_discrete(expand = c(0, 0)) +
scale_x_discrete(expand = c(0, 0),labels=c("P1","P5","P10")) +
theme_bw() +
theme(plot.title = element_text(size = 10), panel.grid.major = element_blank(), panel.grid.minor = element_blank(), panel.border = element_rect(fill = NA, colour = "black", size = 1) ) +
theme(legend.position = "none",axis.ticks = element_blank(), axis.text.x = element_text(size = base_size, angle = 300, hjust = 0, colour = "grey50"), axis.text.y = element_text(size = base_size*0.8, hjust = 1, colour = "grey50"))
ggsave( file = paste(out_png,"_35.png",sep=""), height=hei,limitsize=FALSE )
### CUT 3, this is really stupid, too
## ---- CUT OFF for promiscuous binding -----------
Promiscuity_cut = 0.5 # %
## ------------------------------------------------
for(i in c(1:length(score1))){
if(score1[i]/n_allele > Promiscuity_cut){
score1_class[i] = "red"
}else{
score1_class[i] = "white"
}
if(score5[i]/n_allele > Promiscuity_cut){
score5_class[i] = "orange"
}else{
score5_class[i] = "white"
}
if(score10[i]/n_allele > Promiscuity_cut){
score10_class[i] = "yellow"
}else{
score10_class[i] = "white"
}
}
df1 <- data.frame( idx=x_idx, seq_id = x_lab, promiscuity = score1_class, value = score1, class="score1");
df5 <- data.frame( idx=x_idx, seq_id = x_lab, promiscuity = score5_class, value = score5, class="score5");
df10 <- data.frame( idx=x_idx, seq_id = x_lab, promiscuity = score10_class, value = score10, class="score10");
df <- rbind(df1,df5,df10)
p3 <- ggplot(df, aes(class,idx)) +
geom_tile(aes(fill=df$promiscuity),colour="grey50",size=0.5) +
scale_fill_identity("Class", labels =c("Promiscuous1","Promiscuous5","Promiscuous10","X"), breaks = col, guide = "legend") +
#scale_fill_gradient(low = "white", high = "red",guide="legend")
theme_grey(base_size = base_size) +
labs(x = "", y = "",title=">50%") +
coord_fixed(ratio=0.7) +
scale_y_discrete(expand = c(0, 0),limits=c(min(df$idx):max(df$idx)),labels=x_lab) +
#scale_y_discrete(expand = c(0, 0)) +
scale_x_discrete(expand = c(0, 0),labels=c("P1","P5","P10")) +
theme_bw() +
theme(plot.title = element_text(size = 10), panel.grid.major = element_blank(), panel.grid.minor = element_blank(), panel.border = element_rect(fill = NA, colour = "black", size = 1) ) +
theme(legend.position = "none", axis.ticks = element_blank(), axis.text.x = element_text(size = base_size, angle = 300, hjust = 0, colour = "grey50"), axis.text.y = element_text(size = base_size*0.8, hjust = 1, colour = "grey50"))
ggsave( file = paste(out_png,"_50.png",sep=""), height=hei ,limitsize=FALSE )
##------------------Multiple Plots----
#grid.arrange(p1, p2, p3, legend, ncol=4, nrow=1, main="Promiscous Activity",widths=c(2.3, 2.3, 2.3,0.8))
#grid.arrange(p1, ncol=1, nrow=1, main="Promiscous Activity")
#save_plot(out_png, plot.mpg,base_aspect_ratio = 1.3)
#pdf(out_png,height=hei)
#grid.newpage()
# Create layout : nrow = 2, ncol = 2
#pushViewport(viewport(layout = grid.layout(1, 3)))
# A helper function to define a region on the layout
#define_region <- function(row, col){
# viewport(layout.pos.row = row, layout.pos.col = col)
#}
# Arrange the plots
#print(p1, vp = define_region(1, 1))
#print(p2, vp = define_region(1, 2))
#print(p3, vp = define_region(1, 3))
#save_plot(out_png, base_aspect_ratio = 1.3)
#ggsave( file = out_png, height=hei )
#dev.off()
| /script/kbio/plot_immune_profile_step2.r | permissive | seok0801/Kepre | R | false | false | 8,388 | r |
# plotting immunogenecity profile for given sequence
# Keunwan Park, Kbio
library(ggplot2)
#library(gridExtra)
#library(RGraphics)
#library(cowplot)
options<-commandArgs(trailingOnly=T)
if(length(options) < 2) stop("Invalid argument number\n\nRscript .r [immunogenecity pred output] [out png file, only prefix!]\n")
if(!is.na(options[1])) input_dat=options[1]
if(!is.na(options[2])) out_png=options[2]
data <- read.table(input_dat,header=T)
#colnames(data) <- c("seq_id","aa","allele","score")
x_lab=vector()
x_idx=vector()
for(i in c(1:nrow(data)) ){
idx <- as.integer( as.numeric(data[i,1]) )
x_lab[ idx ] = paste(data[i,1],data[i,2],sep="")
x_idx[ idx ] = data[i,1]
}
print(x_idx)
score1=as.vector(matrix(0,1,length(x_lab)))
score5=as.vector(matrix(0,1,length(x_lab)))
score10=as.vector(matrix(0,1,length(x_lab)))
for(i in c(1:nrow(data)) ){
idx <- as.integer( as.numeric(data[i,1]) )
if(data[i,"score"] < 1){
score1[idx] = score1[idx] + 1
}
if(data[i,"score"] < 5){
score5[idx] = score5[idx] + 1
}
if(data[i,"score"] < 10){
score10[idx] = score10[idx] + 1
}
}
score1_class=as.vector(matrix(0,1,length(x_lab)))
score5_class=as.vector(matrix(0,1,length(x_lab)))
score10_class=as.vector(matrix(0,1,length(x_lab)))
## ---- CUT OFF for promiscuous binding -----------
Promiscuity_cut = 0.25 # %
## ------------------------------------------------
n_allele = length(levels(data[,"allele"]))
for(i in c(1:length(score1))){
if(score1[i]/n_allele > Promiscuity_cut){
score1_class[i] = "red"
}else{
score1_class[i] = "white"
}
if(score5[i]/n_allele > Promiscuity_cut){
score5_class[i] = "orange"
}else{
score5_class[i] = "white"
}
if(score10[i]/n_allele > Promiscuity_cut){
score10_class[i] = "yellow"
}else{
score10_class[i] = "white"
}
}
df1 <- data.frame( idx=x_idx, seq_id = x_lab, promiscuity = score1_class, value = score1, class="score1");
df5 <- data.frame( idx=x_idx, seq_id = x_lab, promiscuity = score5_class, value = score5, class="score5");
df10 <- data.frame( idx=x_idx, seq_id = x_lab, promiscuity = score10_class, value = score10, class="score10");
df <- rbind(df1,df5,df10)
base_size <- 9
col <- c("red","orange","yellow","white")
p1 <- ggplot(df, aes(class,idx)) +
geom_tile(aes(fill=df$promiscuity),colour="grey50",size=0.5) +
scale_fill_identity("Class", labels =c("Promiscuous1","Promiscuous5","Promiscuous10","X"), breaks = col, guide = "legend") +
#scale_fill_gradient(low = "white", high = "red",guide="legend")
theme_grey(base_size = base_size) +
labs(x = "", y = "", title = ">25%" ) +
coord_fixed(ratio=0.7) +
scale_y_discrete(expand = c(0, 0),limits=c(min(df$idx):max(df$idx)),labels=x_lab) +
#scale_y_discrete(expand = c(0, 0)) +
scale_x_discrete(expand = c(0, 0),labels=c("P1","P5","P10")) +
theme_bw() +
theme(plot.title = element_text(size = 10), panel.grid.major = element_blank(), panel.grid.minor = element_blank(), panel.border = element_rect(fill = NA, colour = "black", size = 1) ) +
#theme(legend.position = "none",axis.ticks = element_blank(), axis.text.x = element_text(size = base_size, angle = 0, hjust = 0, colour = "black"), axis.text.y = element_text(size = base_size*0.8, hjust = 1, colour = "grey50"))
theme(legend.position = "none",axis.ticks = element_blank(), axis.text.x = element_text(size = base_size, angle=300, hjust = 0, colour = "grey50"), axis.text.y = element_text(size = base_size*0.8, hjust = 1, colour = "grey50"))
ratio <- (length(x_lab)/200)
hei= as.integer(35*ratio)
#if(hei > 40){ hei = 39}
ggsave( file = paste(out_png,"_25.png",sep=""), height=hei,limitsize=FALSE )
### CUT 2, this is really stupid
## ---- CUT OFF for promiscuous binding -----------
Promiscuity_cut = 0.35 # %
## ------------------------------------------------
for(i in c(1:length(score1))){
if(score1[i]/n_allele > Promiscuity_cut){
score1_class[i] = "red"
}else{
score1_class[i] = "white"
}
if(score5[i]/n_allele > Promiscuity_cut){
score5_class[i] = "orange"
}else{
score5_class[i] = "white"
}
if(score10[i]/n_allele > Promiscuity_cut){
score10_class[i] = "yellow"
}else{
score10_class[i] = "white"
}
}
df1 <- data.frame( idx=x_idx, seq_id = x_lab, promiscuity = score1_class, value = score1, class="score1");
df5 <- data.frame( idx=x_idx, seq_id = x_lab, promiscuity = score5_class, value = score5, class="score5");
df10 <- data.frame( idx=x_idx, seq_id = x_lab, promiscuity = score10_class, value = score10, class="score10");
df <- rbind(df1,df5,df10)
p2 <- ggplot(df, aes(class,idx)) +
geom_tile(aes(fill=df$promiscuity),colour="grey50",size=0.5) +
scale_fill_identity("Class", labels =c("Promiscuous1","Promiscuous5","Promiscuous10","X"), breaks = col, guide = "legend") +
#scale_fill_gradient(low = "white", high = "red",guide="legend")
theme_grey(base_size = base_size) +
labs(x = "", y = "", title = ">35%" ) +
coord_fixed(ratio=0.7) +
scale_y_discrete(expand = c(0, 0),limits=c(min(df$idx):max(df$idx)),labels=x_lab) +
#scale_y_discrete(expand = c(0, 0)) +
scale_x_discrete(expand = c(0, 0),labels=c("P1","P5","P10")) +
theme_bw() +
theme(plot.title = element_text(size = 10), panel.grid.major = element_blank(), panel.grid.minor = element_blank(), panel.border = element_rect(fill = NA, colour = "black", size = 1) ) +
theme(legend.position = "none",axis.ticks = element_blank(), axis.text.x = element_text(size = base_size, angle = 300, hjust = 0, colour = "grey50"), axis.text.y = element_text(size = base_size*0.8, hjust = 1, colour = "grey50"))
ggsave( file = paste(out_png,"_35.png",sep=""), height=hei,limitsize=FALSE )
### CUT 3, this is really stupid, too
## ---- CUT OFF for promiscuous binding -----------
Promiscuity_cut = 0.5 # %
## ------------------------------------------------
for(i in c(1:length(score1))){
if(score1[i]/n_allele > Promiscuity_cut){
score1_class[i] = "red"
}else{
score1_class[i] = "white"
}
if(score5[i]/n_allele > Promiscuity_cut){
score5_class[i] = "orange"
}else{
score5_class[i] = "white"
}
if(score10[i]/n_allele > Promiscuity_cut){
score10_class[i] = "yellow"
}else{
score10_class[i] = "white"
}
}
df1 <- data.frame( idx=x_idx, seq_id = x_lab, promiscuity = score1_class, value = score1, class="score1");
df5 <- data.frame( idx=x_idx, seq_id = x_lab, promiscuity = score5_class, value = score5, class="score5");
df10 <- data.frame( idx=x_idx, seq_id = x_lab, promiscuity = score10_class, value = score10, class="score10");
df <- rbind(df1,df5,df10)
p3 <- ggplot(df, aes(class,idx)) +
geom_tile(aes(fill=df$promiscuity),colour="grey50",size=0.5) +
scale_fill_identity("Class", labels =c("Promiscuous1","Promiscuous5","Promiscuous10","X"), breaks = col, guide = "legend") +
#scale_fill_gradient(low = "white", high = "red",guide="legend")
theme_grey(base_size = base_size) +
labs(x = "", y = "",title=">50%") +
coord_fixed(ratio=0.7) +
scale_y_discrete(expand = c(0, 0),limits=c(min(df$idx):max(df$idx)),labels=x_lab) +
#scale_y_discrete(expand = c(0, 0)) +
scale_x_discrete(expand = c(0, 0),labels=c("P1","P5","P10")) +
theme_bw() +
theme(plot.title = element_text(size = 10), panel.grid.major = element_blank(), panel.grid.minor = element_blank(), panel.border = element_rect(fill = NA, colour = "black", size = 1) ) +
theme(legend.position = "none", axis.ticks = element_blank(), axis.text.x = element_text(size = base_size, angle = 300, hjust = 0, colour = "grey50"), axis.text.y = element_text(size = base_size*0.8, hjust = 1, colour = "grey50"))
ggsave( file = paste(out_png,"_50.png",sep=""), height=hei ,limitsize=FALSE )
##------------------Multiple Plots----
#grid.arrange(p1, p2, p3, legend, ncol=4, nrow=1, main="Promiscous Activity",widths=c(2.3, 2.3, 2.3,0.8))
#grid.arrange(p1, ncol=1, nrow=1, main="Promiscous Activity")
#save_plot(out_png, plot.mpg,base_aspect_ratio = 1.3)
#pdf(out_png,height=hei)
#grid.newpage()
# Create layout : nrow = 2, ncol = 2
#pushViewport(viewport(layout = grid.layout(1, 3)))
# A helper function to define a region on the layout
#define_region <- function(row, col){
# viewport(layout.pos.row = row, layout.pos.col = col)
#}
# Arrange the plots
#print(p1, vp = define_region(1, 1))
#print(p2, vp = define_region(1, 2))
#print(p3, vp = define_region(1, 3))
#save_plot(out_png, base_aspect_ratio = 1.3)
#ggsave( file = out_png, height=hei )
#dev.off()
|
library(readxl)
library(GenomicRanges)
library(data.table)
library(dplyr)
library(tidyverse)
############################################
### Table 1: Peak SNPs with p < 5e-9, independent loci as identified from conditional analysis
############################################
## File created by Matt, merging all primary and conditional results for 59 variants
multiVarRes<-read.table("~/Research/OneDrive/telomere/gwas/results/hits_summary_with_ancestry_with_joint_20200512.txt", header = TRUE, stringsAsFactors = FALSE, sep="\t") %>% select(-novelty)
## File from me with all primary results (all + population subgroups) and some conditional results, with OASIS annotation
condRes<-read.csv(gzfile("~/Research/OneDrive/telomere/gwas/results/MERGED_allChrs_telomere_adjagesexstudyseqctrbatchPCs_minDP0_BRAVODepthDrop_p_lt_5e-9_conditionalAllSNPResSub_OASISAnno.csv.gz"), header=TRUE, stringsAsFactors = FALSE)
allRes<-left_join(multiVarRes, condRes)
## File from Matt with results from tests for heterogeneity added
hetRes <- read.csv("~/Research/OneDrive/telomere/gwas/results/hits_summary_with_ancestry_with_joint_20200512_wHetTest.csv", header = TRUE, stringsAsFactors = FALSE)
hetRes <- hetRes %>% select(snpID, Q, Qp, I2, I2CI)
allRes <- left_join(allRes, hetRes)
## File with some locus info, including novelty (do not use column from Matt's results)
moreInfo <- read_excel("~/Research/OneDrive/telomere/gwas/results/forRasikaOASISLookup-withnovelty_v2_20200512.xlsx")
moreInfo <- moreInfo %>% mutate(asterisk = ifelse(is.na(asterisk) , "", asterisk)) %>% select(snpID, asterisk, novelty)
allRes <- left_join(allRes, moreInfo)
allRes <- allRes %>% mutate(annotation = ifelse(Type %in% c("exonic", "intronic") & !(Function == ""), Function, RglmDB)) %>% mutate(annotation = ifelse(annotation == "", NA, annotation), Qp = ifelse(Qp == 1, NA, Qp))
allResForTable <- allRes %>% select(chr, pos, LocusName.Final, rsNum, asterisk, novelty, annotation, MAC, pval_primary, PVE_Primary, pval_joint, pval_joint_white, pval_joint_black, pval_joint_hl, pval_joint_asian, Est_joint, Est_joint_white, Est_joint_black, Est_joint_hl, Est_joint_asian, Qp)
allResForTable <- allResForTable %>% mutate(asterisk = ifelse(pval_primary > 5e-9, "*", asterisk), Est_joint = 1000*Est_joint, Est_joint_white = 1000*Est_joint_white, Est_joint_black = 1000*Est_joint_black, Est_joint_hl = 1000*Est_joint_hl, Est_joint_asian = 1000* Est_joint_asian) %>% select(chr:rsNum, asterisk, novelty:Qp)
allResForTable <- allResForTable %>% arrange(as.numeric(chr), pos)
write.csv(allResForTable, file = "~/Research/OneDrive/telomere/gwas/manuscript/tableCSVs/Table1_20200519.csv", row.names=FALSE, quote = FALSE, na = "-")
write.csv(allRes %>% arrange(as.numeric(chr), pos), file = "~/Research/OneDrive/telomere/gwas/results/MERGED_allChrs_telomere_adjagesexstudyseqctrbatchPCs_minDP0_BRAVODepthDrop_p_lt_5e-9_conditionalAllSNPResSub_OASISAnno_jointResults_20200512.csv", row.names = FALSE, quote = TRUE)
#allResForNancy <- allRes %>% select(chr, pos, snpID, LocusName.Final, rsNum, ref, alt, Est_joint_pooled = Est_joint, SE_joint_pooled = SE_joint, pval_joint_pooled = pval_joint, Est_joint_white, SE_joint_white, pval_joint_white, Est_joint_black, SE_joint_black, pval_joint_black, Est_joint_hl, SE_joint_hl, pval_joint_hl, Est_joint_asian, SE_joint_asian, pval_joint_asian ) %>% mutate_at(vars(Est_joint_pooled, SE_joint_pooled, Est_joint_white, SE_joint_white, Est_joint_black, SE_joint_black, Est_joint_hl, SE_joint_hl, Est_joint_asian, SE_joint_asian), .funs = funs(. * 1000)) %>% arrange(as.numeric(chr), pos)
#write.csv(allResForNancy, file = "~/Research/OneDrive/telomere/gwas/results/jointResultsForNancy_20200512.csv", row.names = FALSE, quote = FALSE)
#######################################################
### Table S1: Counts by study and ancestry group
#######################################################
load("~/Research/OneDrive/telomere/gwas/results/allResMerge_forAnalysis_031520.rda")
load("~/Research/OneDrive/telomere/gwas/results/White_forAnalysis_031920.rda")
load("~/Research/OneDrive/telomere/gwas/results/Black_forAnalysis_031920.rda")
load("~/Research/OneDrive/telomere/gwas/results/Asian_forAnalysis_031920.rda")
load("~/Research/OneDrive/telomere/gwas/results/HL_forAnalysis_032320.rda")
allSubGroups<-rbind(forAnalysisWhiteAmish %>% mutate(ancestryGroup = "White") %>% select(NWDID, ancestryGroup), rbind(forAnalysisBlack %>% mutate(ancestryGroup = "Black") %>% select(NWDID, ancestryGroup), rbind(forAnalysisHL %>% mutate(ancestryGroup = "Hispanic/Latino") %>% select(NWDID, ancestryGroup), forAnalysisAsian %>% mutate(ancestryGroup = "Asian") %>% select(NWDID, ancestryGroup))))
forAnalysis <- left_join(forAnalysis, allSubGroups)
forAnalysis <- forAnalysis %>% mutate(ancestryGroup = ifelse(is.na(ancestryGroup), "Other", ancestryGroup), ancestryGroup = factor(ancestryGroup, levels=c("White", "Black", "Hispanic/Latino", "Asian", "Other")))
demoTable<-left_join(forAnalysis %>% group_by(Study=study) %>% summarise('Total Count'=n(), 'Male Count (Pct)'=paste0(sum(sex == "M"), " (", round(mean(sex=="M"),2), ")"), 'Mean Age (SD, Range)'=paste0(round(mean(age_at_dna_blood_draw_wgs),0), " (", round(sd(age_at_dna_blood_draw_wgs),1), ", ", round(min(age_at_dna_blood_draw_wgs),0), "-", round(max(age_at_dna_blood_draw_wgs),0), ")")), forAnalysis %>% group_by(Study=study, ancestryGroup) %>% summarise(n=n()) %>% spread(ancestryGroup, n))
demoTable<-left_join(demoTable,forAnalysis %>% group_by(Study=study, seq_center_new) %>% summarise(n=n()) %>% spread(seq_center_new, n) )
## add row at bottom with totals
Total<-c("Total", ungroup(forAnalysis) %>% summarise('Total Count'=n(), 'Male Count (Pct)'=paste0(sum(sex == "M"), " (", round(mean(sex=="M"),2), ")"), 'Mean Age (SD, Range)'=paste0(round(mean(age_at_dna_blood_draw_wgs),0), " (", round(sd(age_at_dna_blood_draw_wgs),1), ", ", round(min(age_at_dna_blood_draw_wgs),0), "-", round(max(age_at_dna_blood_draw_wgs),0), ")")), ungroup(forAnalysis) %>% group_by(ancestryGroup) %>% summarise(n=n()) %>% spread(ancestryGroup, n), ungroup(forAnalysis) %>% group_by(seq_center_new) %>% summarise(n=n()) %>% spread(seq_center_new, n))
demoTable<-rbind(demoTable, unlist(Total)) %>% print(n=Inf)
write.csv(demoTable, "~/Research/OneDrive/telomere/gwas/manuscript/tableCSVs/SupplTable1_Demographics.csv", row.names=FALSE, quote=TRUE, na="-")
## want to pull studies used in previous paper
#load("~/Research/telomere/manuscript/results/samplesForReplication.rda")
#load("~/Research/telomere/manuscript/results/samplesForDiscovery_112918.rda")
#prevStudies <- unique(c(forDiscovery$study, forReplication$study))
#currStudies <- unique(forAnalysis$study)
#all(prevStudies %in% currStudies)
#setdiff(currStudies, prevStudies)
#studyInfo<- read_excel("~/Research/OneDrive/telomere/gwas/inputFiles/Access to study on dbGAP.xlsx")
#all(forAnalysis$study %in% studyInfo$Study)
#ctsByStudy <- forAnalysis %>% count(study) %>% mutate(newStudy = !study %in% prevStudies)
#ctsByStudy <- studyInfo %>% select(study = Study, Topmed_project, topmed_phs) %>% right_join(ctsByStudy)
#write.csv(ctsByStudy, file = "~/Research/OneDrive/telomere/gwas/manuscript/basicStudyInfo.csv", row.names=FALSE, quote= FALSE)
##############################
### Table S2: Conditional results
##############################
condResTable <- allRes %>% select("snpID", "chr", "pos", "LocusName.Final", "rsNum", "round_Cond", "Est_primary", "pval_primary", paste(c("Est_cond", "pval_cond"), rep(1:6, each = 2), sep = "_")) %>% mutate_at(vars(contains("Est_")), ~(.*1000)) %>% mutate(round_Cond = ifelse(is.na(round_Cond), "Primary", sub("cond", "Cond_", round_Cond)))
ldTable<-read.csv("~/Research/OneDrive/telomere/gwas/results/sentinels_pairwise_ld_all.csv", header=TRUE, stringsAsFactors = FALSE)
ldTable <- ldTable %>% dplyr::rename(snpID = other_snp)
condResTable <- condResTable %>% left_join(ldTable)
condResTable <- condResTable %>% mutate(r2 = ifelse(round_Cond == "Primary", NA, r2), dprime = ifelse(round_Cond == "Primary", NA, dprime)) %>% select(chr:round_Cond, r2, dprime, Est_primary:pval_cond_6)
write.csv(condResTable, "~/Research/OneDrive/telomere/gwas/manuscript/tableCSVs/SupplTable2_ConditionalRes.csv", row.names=FALSE, quote=TRUE, na="")
##############################
### Table S3: SNPs from previous studies
##############################
## code for pulling variants from primary results
#gunzip -c /dcl01/mathias1/data/telomere_mtaub/gwas/results/ALL_GWASResults/allChrs_telomere_adjagesexstudyseqctrbatchPCs_minDP0_BRAVODepthDrop.csv.gz | awk 'NR==FNR{a[$1,$2]="foo";next}; a[$2,$3]=="foo" {print}' FS=',' /dcl01/mathias1/data/telomere_mtaub/gwas/results/novel_loci/published_sentinels_unique_chrPos.csv FS=',' - > /dcl01/mathias1/data/telomere_mtaub/gwas/results/novel_loci/allChrs_telomere_adjagesexstudyseqctrbatchPCs_minDP0_BRAVODepthDrop_publishedSentinels.csv
#gunzip -c /dcl01/mathias1/data/telomere_mtaub/gwas/results/White_GWASResults/White_allChrs_telomere_adjagesexstudyseqctrbatchPCs_minDP0_BRAVODepthDrop.csv.gz | awk 'NR==FNR{a[$1,$2]="foo";next}; a[$2,$3]=="foo" {print}' FS=',' /dcl01/mathias1/data/telomere_mtaub/gwas/results/novel_loci/published_sentinels_unique_chrPos.csv FS=',' - > /dcl01/mathias1/data/telomere_mtaub/gwas/results/novel_loci/White_allChrs_telomere_adjagesexstudyseqctrbatchPCs_minDP0_BRAVODepthDrop_publishedSentinels.csv
#gunzip -c /dcl01/mathias1/data/telomere_mtaub/gwas/results/Black_GWASResults/Black_allChrs_telomere_adjagesexstudyseqctrbatchPCs_minDP0_BRAVODepthDrop.csv.gz | awk 'NR==FNR{a[$1,$2]="foo";next}; a[$2,$3]=="foo" {print}' FS=',' /dcl01/mathias1/data/telomere_mtaub/gwas/results/novel_loci/published_sentinels_unique_chrPos.csv FS=',' - > /dcl01/mathias1/data/telomere_mtaub/gwas/results/novel_loci/Black_allChrs_telomere_adjagesexstudyseqctrbatchPCs_minDP0_BRAVODepthDrop_publishedSentinels.csv
#gunzip -c /dcl01/mathias1/data/telomere_mtaub/gwas/results/HispanicLatino_GWASResults/HL_allChrs_telomere_adjagesexstudyseqctrbatchPCs_minDP0_BRAVODepthDrop.csv.gz | awk 'NR==FNR{a[$1,$2]="foo";next}; a[$2,$3]=="foo" {print}' FS=',' /dcl01/mathias1/data/telomere_mtaub/gwas/results/novel_loci/published_sentinels_unique_chrPos.csv FS=',' - > /dcl01/mathias1/data/telomere_mtaub/gwas/results/novel_loci/HL_allChrs_telomere_adjagesexstudyseqctrbatchPCs_minDP0_BRAVODepthDrop_publishedSentinels.csv
#gunzip -c /dcl01/mathias1/data/telomere_mtaub/gwas/results/Asian_GWASResults/Asian_allChrs_telomere_adjagesexstudyseqctrbatchPCs_minDP0_BRAVODepthDrop.csv.gz | awk 'NR==FNR{a[$1,$2]="foo";next}; a[$2,$3]=="foo" {print}' FS=',' /dcl01/mathias1/data/telomere_mtaub/gwas/results/novel_loci/published_sentinels_unique_chrPos.csv FS=',' - > /dcl01/mathias1/data/telomere_mtaub/gwas/results/novel_loci/Asian_allChrs_telomere_adjagesexstudyseqctrbatchPCs_minDP0_BRAVODepthDrop_publishedSentinels.csv
#gunzip -c /dcl01/mathias1/data/telomere_mtaub/gwas/results/Asian_GWASResults/Asian_allChrs_telomere_adjagesexstudyseqctrbatchPCs_minDP0_BRAVODepthDrop.csv.gz | head -1 > /dcl01/mathias1/data/telomere_mtaub/gwas/results/novel_loci/allChrs_telomere_adjagesexstudyseqctrbatchPCs_minDP0_BRAVODepthDrop_colNames.csv
prevSentinels <- read_excel("~/Research/OneDrive/telomere/gwas/results/ALL_GWASResults/novel_loci/published_sentinels-betas_PMCID.xlsx") %>% rename(rsNum = SNP) %>% mutate(P = sub("−", "-", P))
prevSentinelsPos <- read.table("~/Research/OneDrive/telomere/gwas/results/ALL_GWASResults/novel_loci/pairwise-sentinel-LD-reqd_files/published_sentinels_positions.txt", header=TRUE, stringsAsFactors = FALSE, sep="\t") %>% rename(rsNum = Variation.ID)
moreInfo <- read_excel("~/Research/OneDrive/telomere/gwas/results/forRasikaOASISLookup-withnovelty_v2_20200512.xlsx")
annotateTable<-moreInfo %>% mutate(chr = gsub("\\:.*", "", snpID), chr = as.numeric(ifelse(chr == "X", 23,chr)), pos = as.numeric(sapply(strsplit(snpID, split=":"), function(x) x[2]))) %>% dplyr::select(SNP=snpID, label=LocusName.Final, chr, pos, negKb = `"-Kb"`, posKb = `"+Kb"`, novelty) %>% filter(!is.na(negKb))
rangesToPull<-GRanges(seqnames=annotateTable$chr, ranges=IRanges(start=annotateTable$pos - 1000*annotateTable$negKb, end=annotateTable$pos + 1000*annotateTable$posKb))
start(ranges(rangesToPull)[5]) <- start(ranges(rangesToPull)[5]) - 101000
end(ranges(rangesToPull)[5]) <- end(ranges(rangesToPull)[5]) + 2950000
#write.csv(prevSentinelsPos %>% mutate(chrPos = paste(Chromosome, Position, sep=":")) %>% filter(!duplicated(chrPos)) %>% select(Chromosome, Position), file = "~/Research/OneDrive/telomere/gwas/results/ALL_GWASResults/novel_loci/pairwise-sentinel-LD-reqd_files/published_sentinels_unique_chrPos.csv", row.names = FALSE, quote = FALSE)
# confirming only 57 unique entries, so will drop any
length(unique(prevSentinelsPos$rsNum))
length(unique(apply(prevSentinelsPos, 1, function(x) paste(x, collapse = ":"))))
prevSentinelsPos <- prevSentinelsPos %>% filter(!duplicated(rsNum))
prevSentinels <- left_join(prevSentinels, prevSentinelsPos)
prevSentinels <- prevSentinels %>% mutate(chrPos = paste(Chromosome, Position, sep=":"))
colNames <- read.csv("~/Research/OneDrive/telomere/gwas/results/ALL_GWASResults/novel_loci/pairwise-sentinel-LD-reqd_files/allChrs_telomere_adjagesexstudyseqctrbatchPCs_minDP0_BRAVODepthDrop_colNames.csv")
colNames <- names(colNames)
prevAll<-read.csv("~/Research/OneDrive/telomere/gwas/results/ALL_GWASResults/novel_loci/pairwise-sentinel-LD-reqd_files/allChrs_telomere_adjagesexstudyseqctrbatchPCs_minDP0_BRAVODepthDrop_publishedSentinels.csv", header=FALSE, stringsAsFactors = FALSE)
names(prevAll)<-c(colNames[1:4], paste0(colNames[5:14], ".ALL"), colNames[15:length(colNames)])
for (ancTag in c("White", "Black", "HL", "Asian")){
prevCurr<-read.csv(paste0("~/Research/OneDrive/telomere/gwas/results/ALL_GWASResults/novel_loci/pairwise-sentinel-LD-reqd_files/", ancTag, "_allChrs_telomere_adjagesexstudyseqctrbatchPCs_minDP0_BRAVODepthDrop_publishedSentinels.csv"), header=FALSE, stringsAsFactors = FALSE)
names(prevCurr)<-c(colNames[1:4], paste0(colNames[5:14], ".", ancTag), colNames[15:length(colNames)])
prevAll <- left_join(prevAll, prevCurr)
}
prevAll <- prevAll %>% mutate(chrPos = paste(chr, pos, sep=":"))
prevAll <- prevAll %>% mutate(Est.ALL = 1000*Est.ALL, Est.White = 1000*Est.White, Est.Black = 1000*Est.Black, Est.HL = 1000*Est.HL, Est.Asian = 1000*Est.Asian)
prevSentinels <- left_join(prevSentinels, prevAll)
# there is one with multi alleles, but only one matches ref/alt from Dorajoo
prevSentinels %>% filter(chrPos == "8:73008648")
prevSentinels <- prevSentinels %>% filter(!snpID == "8:73008648:T:A")
## need to check on signs for betas and remove betas where EA is missing
prevSentinels <- prevSentinels %>% mutate(EA = toupper(EA))
prevSentinels %>% filter(EA != alt) %>% select(rsNum, EA, chr, pos, ref, alt, Beta, Est.ALL)
prevSentinels <- prevSentinels %>% mutate(Beta = ifelse(EA != alt, -Beta, ifelse(is.na(EA), NA, Beta)), Sign = ifelse(is.na(Beta), "N/A", ifelse(Beta < 0, "-", "+")), PMCID = substr(PMCID, 1, 10))
prevSentinelsGR <- GRanges(seqnames=prevSentinels$Chromosome, ranges=IRanges(start=prevSentinels$Position, width = 1))
table1PrevOL <- findOverlaps(prevSentinelsGR, rangesToPull)
prevSentinels$inTable1 <- "Yes"
prevSentinels$inTable1[setdiff(1:nrow(prevSentinels), queryHits(table1PrevOL))] <- "No"
# changed to only include trans-ethnic results
prevSentinelsForTable <- prevSentinels %>% select("chr", "pos", "rsNum", "ref", "alt", "GENE", "Author", "Year", "PMCID", "P", "Sign", "inTable1", paste(c("Score.pval", "Est", "freq"), rep(c("ALL"), each = 3), sep=".")) %>% arrange(chr, pos, Author)
prevSentinelsForTable <- prevSentinelsForTable %>% filter(!as.numeric(P) > 5e-8)
write.csv(prevSentinelsForTable, file = "~/Research/OneDrive/telomere/gwas/manuscript/tableCSVs/SupplTable3_published_sentinels_TOPMedResults.csv", row.names=FALSE, quote = TRUE, na = "-")
## ADD COLUMN: is this in Table 1 or not? Add to the ALL section (and check -- if it is exactly those with p < 5x10-9 or from loci from table 1. Question is really do we identify all these loci?? Or which do we not identify?)
## For replication table summary : correlation of effect sizes for replication of novel loci??
##############################
### Table S4: Annotation of Table 1 variants
##############################
annoColsToPull <- c("chr", "pos", "LocusName.Final", "rsNum", "ref", "alt", "Type", "Function", "AAchg", "GENE", "RglmDB", "eigenPC", "Dnase", "Reg", "SIFT", "PP2_HDIV","PP2_HVAR", "LRT", "MT", "MA", "FATHMM", "metaSVM", "metaLR", "PhyloP", "SiPhy", "GERP..", "CADD", "ClinVar", "Phast")
write.csv(allRes %>% arrange(as.numeric(chr), pos) %>% select(annoColsToPull), file = "~/Research/OneDrive/telomere/gwas/manuscript/tableCSVs/SupplTable4_Annotation.csv", row.names=FALSE, quote = TRUE, na = "")
##############################
### Table S5: Replication of novel findings
##############################
# dorajoo
dorajoo<-fread("~/Research/OneDrive/telomere/gwas/results/ALL_GWASResults/novel_loci/replicated_loci/replication_dependent_files/dorajoo_replication.txt")
colnames(dorajoo)<-c("V1", "chr", "hg37pos", "other_allele", "test_allele", "pvalue", "beta", "se", "p_het")
#dorajoo has a bunch of SNPs where it's rsID:pos:ref:alt
#I need to separate it to just the rsID
library(stringr)
dorajoo$count<-str_count(dorajoo$V1, ':')
zero<-subset(dorajoo, dorajoo$count == 0)
colnames(zero)[1]<-"SNP"
three<-subset(dorajoo, dorajoo$count == 3)
three[,c("SNP", "pos", "ref", "alt"):=tstrsplit(V1, ":", fixed=T)]
three<-three[,c(11,2:10)]
dorajoo<-rbind(zero, three)
#there are some rsID's which say GSA-rsid# get rid of the prefix
dorajoo$SNP<-sub("GSA-", "", dorajoo$SNP)
dor<-dorajoo[,c(1:8)]
colnames(dor)<-c("rsNum", "dorajoo_chr", "dorajoo_hg37_pos", "dorajoo_NEA", "dorajoo_EA", "dorajoo_pvalue",
"dorajoo_beta", "dorajoo_se")
# li
li<-fread("~/Research/OneDrive/telomere/gwas/results/ALL_GWASResults/novel_loci/replicated_loci/replication_dependent_files/li_replication.txt")
colnames(li)<-c("rsNum", "li_chr", "li_hg37pos", "li_EA", "li_NEA", "li_EAF", "li_BETA", "li_SE", "li_pvalue",
"n", "fdr")
li<-li[,c(1:9)]
repTable <- left_join(allResForTable, dor) %>% left_join(li) %>% filter(novelty == "Novel") %>% select( chr, pos, LocusName.Final, rsNum,Est_joint, li_pvalue, li_BETA, li_SE, dorajoo_pvalue, dorajoo_beta, dorajoo_se)
nTests<-repTable %>% filter(!is.na(li_pvalue) | !is.na(dorajoo_pvalue)) %>% nrow()
repTable <- repTable %>% mutate(threshold = ifelse(rowSums(cbind(li_pvalue, dorajoo_pvalue) < 0.05/nTests, na.rm = TRUE)>0, paste0("<", round(0.05/nTests,4)), ifelse(rowSums(cbind(li_pvalue, dorajoo_pvalue) < 0.05, na.rm = TRUE)>0, "<0.05", ifelse(rowSums(is.na(cbind(li_pvalue, dorajoo_pvalue)))==2, NA, ">0.05"))))
write.csv(repTable, file = "~/Research/OneDrive/telomere/gwas/manuscript/tableCSVs/SupplTable5_replication_novel_TOPMedResults.csv", row.names=FALSE, quote = TRUE, na = "-")
##############################
### Table S6: Expanded population level results
##############################
colsForPrimaryTable <- c("chr", "pos", "LocusName.Final", "rsNum", paste0("Est_primary", c("", "_white", "_black", "_hl", "_asian")), paste0("pval_primary", c("", "_white", "_black", "_hl", "_asian")), paste0("freq", c("", ".White", ".Black", ".HispanicLatino", ".Asian")), paste0("PVE", c("_Primary", ".White", ".Black", ".HispanicLatino", ".Asian")))
primaryTable <- allRes %>% select(colsForPrimaryTable) %>% mutate_at(vars(Est_primary, Est_primary_white, Est_primary_black, Est_primary_hl, Est_primary_asian), .funs = funs(. * 1000)) %>% arrange(as.numeric(chr), pos)
write.csv(primaryTable, file = "~/Research/OneDrive/telomere/gwas/manuscript/tableCSVs/SupplTable6_primaryResults.csv", row.names=FALSE, quote = TRUE, na = "-")
##############################
### Table S9: BioVU PheWAS Results
##############################
phewasAAOld <- read_excel("~/Research/OneDrive/telomere/BioVU/AA_PheWAS_combined.xlsx")
phewasAA <- read_excel("~/Research/OneDrive/telomere/BioVU/re/AA_PheWAS_combined.xlsx")
phewasAA <- phewasAA %>% group_by(snp) %>% mutate(nTests = sum(!is.na(OR)), newBonf = ifelse(p < 0.05/nTests, TRUE, FALSE))
phewasAA <- phewasAA %>% group_by(phenotype) %>% mutate(nSnps = sum(!is.na(OR)))
phewasAASub<-phewasAA %>% filter(!is.na(OR))
# they match
table(phewasAASub$bonferroni, phewasAASub$newBonf)
phewasAASig <- phewasAASub %>% filter(newBonf) %>% mutate(nSamps = paste(n_cases, n_controls, sep = " / "), ancGroup = "AA") %>% select(rsNum = snp, phenotype, group, description, OR, p, nSamps, ancGroup)
phewasBySNP<-phewasAASig %>% ungroup() %>% left_join(allRes %>% select(chr, pos, rsNum, LocusName.Final, ref, alt, Est_joint)) %>% mutate(Est_joint = 1000*Est_joint) %>% arrange(as.numeric(chr), pos, p) %>% select(chr, pos, LocusName.Final, rsNum, ref, alt, Est_joint, phenotype, group, description, OR, p, nSamps, ancGroup)
phewasEA <- read_excel("~/Research/OneDrive/telomere/BioVU/re/EA_PheWAS_combined.xlsx")
phewasEA <- phewasEA %>% group_by(snp) %>% mutate(nTests = sum(!is.na(OR)), newBonf = ifelse(p < 0.05/nTests, TRUE, FALSE))
phewasEA <- phewasEA %>% group_by(phenotype) %>% mutate(nSnps = sum(!is.na(OR)))
phewasEASub<-phewasEA %>% filter(!is.na(OR))
# they match
table(phewasEASub$bonferroni, phewasEASub$newBonf)
phewasEASig <- phewasEASub %>% filter(newBonf) %>% mutate(nSamps = paste(n_cases, n_controls, sep = " / "), ancGroup = "EA") %>% select(rsNum = snp, phenotype, group, description, OR, p, nSamps, ancGroup)
phewasBySNPEA<-phewasEASig %>% ungroup() %>% left_join(allRes %>% select(chr, pos, rsNum, LocusName.Final, ref, alt, Est_joint)) %>% mutate(Est_joint = 1000*Est_joint) %>% arrange(as.numeric(chr), pos, p) %>% select(chr, pos, LocusName.Final, rsNum, ref, alt, Est_joint, phenotype, group, description, OR, p, nSamps, ancGroup)
#phewasEA <- phewasEA %>% mutate(OR = exp(beta), nSamps = NA, ancGroup = "EA") %>% select(rsNum, phenotype, group, description, OR, p, nSamps, ancGroup)
#phewasEA <- phewasEA %>% left_join(allRes %>% select(chr, pos, rsNum, LocusName.Final, ref, alt, Est_joint)) %>% mutate(Est_joint = 1000*Est_joint) %>% arrange(as.numeric(chr), pos, p) %>% select(chr, pos, LocusName.Final, rsNum, ref, alt, Est_joint, phenotype, group, description, OR, p, nSamps, ancGroup)
phewasBySNPBoth <- rbind(phewasBySNP, phewasBySNPEA) %>% arrange(as.numeric(chr), pos, p)
write.csv(phewasBySNPBoth, file = "~/Research/OneDrive/telomere/gwas/manuscript/tableCSVs/SupplTable9_Part2_PheWASBySNP.csv", row.names=FALSE, quote = TRUE, na = "-")
#phewasEAPRS <- read_delim("~/Research/OneDrive/telomere/BioVU/Telomere_EA_PRS_Phewas.txt", delim = "\t")
phewasEAPRS <- read_excel("~/Research/OneDrive/telomere/BioVU/re/EA_PRS_Scaled_PheWAS_Results.xlsx")
phewasEAPRSSub <- phewasEAPRS %>% filter(!is.na(OR))
phewasEAPRSSub <- phewasEAPRSSub %>% mutate(newBonf = ifelse(p < 0.05/nrow(phewasEAPRSSub), TRUE, FALSE))
# they match
table(phewasEAPRSSub$bonferroni, phewasEAPRSSub$newBonf)
phewasEAPRSSig <- phewasEAPRSSub %>% filter(newBonf) %>% mutate(nSamps = paste(n_cases, n_controls, sep = " / "), ancGroup = "EA") %>% select(phenotype, group, description, OR, p, nSamps, ancGroup) %>% arrange(phenotype)
phewasAAPRS <- read_excel("~/Research/OneDrive/telomere/BioVU/re/AA_PRS_Scaled_PheWAS_Results.xlsx")
phewasAAPRSSub <- phewasAAPRS %>% filter(!is.na(OR))
phewasAAPRSSub <- phewasAAPRSSub %>% mutate(newBonf = ifelse(p < 0.05/nrow(phewasAAPRSSub), TRUE, FALSE))
# they match
table(phewasAAPRSSub$bonferroni, phewasAAPRSSub$newBonf)
phewasAAPRSSig <- phewasAAPRSSub %>% filter(newBonf) %>% mutate(nSamps = paste(n_cases, n_controls, sep = " / "), ancGroup = "AA") %>% select(phenotype, group, description, OR, p, nSamps, ancGroup)
## no significant AA PRS results
write.csv(phewasEAPRSSig, file = "~/Research/OneDrive/telomere/gwas/manuscript/tableCSVs/SupplTable9_Part1_PheWASByPRS.csv", row.names=FALSE, quote = TRUE, na = "-")
##############################
### Table SXX: BioVU LabWAS Results -- LEAVE OUT FOR NOW
##############################
## my bonferroni correction does not match hers here or for EAs
labwasAA <- read_csv("~/Research/OneDrive/telomere/BioVU/AA_LabWAS_Results_Combined.csv")
labwasAA <- labwasAA %>% group_by(Predictor) %>% mutate(nTests = sum(!is.na(OR)), newBonf = ifelse(p < 0.05/nTests, TRUE, FALSE))
table(labwasAA$Bonferroni, labwasAA$newBonf)
labwasAASig <- labwasAA %>% filter(newBonf) %>% mutate(ancGroup = "AA") %>% select(Predictor, Full_name, Group, N, p, OR, newBonf, ancGroup)
labwasEA <- read_csv("~/Research/OneDrive/telomere/BioVU/EA_LabWAS_Results_Combined.csv")
labwasEA <- labwasEA %>% group_by(Predictor) %>% mutate(nTests = sum(!is.na(OR)), newBonf = ifelse(p < 0.05/nTests, TRUE, FALSE))
table(labwasEA$Bonferroni, labwasEA$newBonf)
labwasEASig <- labwasEA %>% filter(newBonf) %>% mutate(ancGroup = "EA") %>% select(Predictor, Full_name, Group, N, p, OR, newBonf, ancGroup)
labwasAAPRS <- read_csv("~/Research/OneDrive/telomere/BioVU/Telomere_AA_PRS_LabWAS_Results.csv")
labwasAAPRS <- labwasAAPRS %>% mutate(newBonf = ifelse(p < 0.05/nrow(labwasAAPRS), TRUE, FALSE))
table(labwasAAPRS$Bonferroni, labwasAAPRS$newBonf) # nothing significant for AA PRS
labwasEAPRS <- read_csv("~/Research/OneDrive/telomere/BioVU/Telomere_EA_PRS_LabWAS_Results.csv")
labwasEAPRS <- labwasEAPRS %>% mutate(newBonf = ifelse(p < 0.05/nrow(labwasEAPRS), TRUE, FALSE))
table(labwasEAPRS$Bonferroni, labwasEAPRS$newBonf)
labwasEAPRSSig <- labwasEAPRS %>% filter(newBonf) %>% mutate(ancGroup = "EA") %>% select(Full_name, Group, OR, p, N, ancGroup) %>% arrange(p)
write.csv(labwasEAPRSSig, file = "~/Research/OneDrive/telomere/gwas/manuscript/tableCSVs/SupplTable10_Part1_LabWASByPRS.csv", row.names=FALSE, quote = TRUE, na = "-")
labwasBySNP<-rbind(labwasEASig %>% ungroup(), labwasAASig %>% ungroup()) %>% rename(rsNum = Predictor) %>% left_join(allRes %>% select(chr, pos, rsNum, LocusName.Final, ref, alt, Est_joint)) %>% mutate(Est_joint = 1000*Est_joint) %>% arrange(as.numeric(chr), pos, p) %>% select(chr, pos, LocusName.Final, rsNum, ref, alt, Est_joint, Full_name, Group, OR, p, N, ancGroup)
write.csv(labwasBySNP, file = "~/Research/OneDrive/telomere/gwas/manuscript/tableCSVs/SupplTable10_Part2_LabWASBySNP.csv", row.names=FALSE, quote = TRUE, na = "-")
##############################
### Table S10: UKBiobank Results
##############################
## these results use a per-SNP bonferroni cutoff since that is what was done for BioVU
UKBBRes <- read.csv("~/Research/OneDrive/telomere/gwas/results/UKBB/UKBB_SAIGEResSigBySNP.csv", header= TRUE, stringsAsFactors = FALSE)
UKBBRes <- UKBBRes %>% rename(rsNum = ID) %>% left_join(allResForTable %>% select(rsNum, LocusName.Final, Est_joint)) %>% mutate(OR = exp(beta))
UKBBResTable <- UKBBRes %>% mutate(nSamps = paste(Ncases, Ncontrols, sep = " / ")) %>% select(X.CHROM, POS, LocusName.Final, rsNum, REF, ALT, Est_joint, PheCode, Group, Description, OR, pval, nSamps) %>% arrange(as.numeric(X.CHROM), POS, pval)
write.csv(UKBBResTable, file = "~/Research/OneDrive/telomere/gwas/manuscript/tableCSVs/SupplTable11_UKBBResults.csv", row.names=FALSE, quote = TRUE, na = "-")
##############################
### Additional checks of reported paper results
##############################
# count of significant loci
length(unique(allResForTable$LocusName.Final))
# count of novel loci
allResForTable %>% filter(!duplicated(LocusName.Final)) %>% count(novelty) %>% filter(novelty == "Novel")
# count of novel variants
allResForTable %>% count(novelty) %>% filter(novelty == "Novel")
# how many loci replicated
repTable %>% group_by(LocusName.Final) %>% summarise(isSig = any(threshold %in% c("<0.0026", "<0.05"))) %>% count(isSig)
# count of loci with more than one signal
allResForTable %>% count(LocusName.Final) %>% filter(n > 1)
# count that directly match prior signals
# this is based on things labeled ** which was done by hand -- SHOULD BE DOUBLE CHECKED
allResForTable %>% count(asterisk)
# counts of different sample groups
nrow(forAnalysis)
nrow(forAnalysisWhiteAmish)
nrow(forAnalysisBlack)
nrow(forAnalysisHL)
nrow(forAnalysisAsian)
nrow(forAnalysis) - (nrow(forAnalysisWhiteAmish) + nrow(forAnalysisBlack) + nrow(forAnalysisHL) + nrow(forAnalysisAsian))
# percent male
mean(forAnalysis$sex == "M")
# age ranges
range(forAnalysis$age_at_dna_blood_draw_wgs)
# total number of variants computed as
#[mtaub@compute-060 ALL_GWASResults]$ gunzip -c allChrs_telomere_adjagesexstudyseqctrbatchPCs_minDP0.csv.gz | wc -l
#162583130
# count of replicated prior loci and their names
# to get 25 prior loci group ("TERC" "LRRC34 (TERC)") ("OBFC1" "STN1 (OBFC1)") ("ZNF208" "ZNF676") ("RTEL1/STMN3" "RTEL1" "RTEL1/ZBTB46")
# 30 - 5 extras = 25
# so there are 16 total of the 25 prior that are replicated by us
unique(prevSentinelsForTable$GENE)
length(allResForTable %>% filter(!duplicated(LocusName.Final), is.na(novelty)) %>% pull(LocusName.Final))
sub("\\_", "\\/", paste(allResForTable %>% filter(!duplicated(LocusName.Final), is.na(novelty)) %>% pull(LocusName.Final), collapse = ", ") )
# this second one includes PRRC2A which we think is actually not necessarily included in the nearby HSPA1A signal (due to low LD between our sentinel and the previously reported SNV) and is missing ZNF257/ZNF676 just due to naming differences
unique(prevSentinelsForTable %>% filter(Score.pval.ALL<5e-9 | GENE %in% allResForTable$LocusName.Final) %>% pull(GENE))
# close to replicated loci; only SENP7 and CTC1 were not previoulsy mentioned
prevSentinelsForTable %>% filter(Score.pval.ALL < 0.05, Score.pval.ALL > 5e-9) %>% select(GENE, rsNum, Score.pval.ALL)
# distance between PRRC2A and HSPA1A
allResForTable %>% filter(LocusName.Final == "HSPA1A") %>% pull(pos) - prevSentinelsForTable %>% filter(GENE == "PRRC2A") %>% pull(pos)
# not replicated
sub("\\_", "\\/", paste(prevSentinelsForTable %>% filter(inTable1 == "No", Score.pval.ALL > 0.05) %>% pull(GENE), collapse = ", "))
# for known loci, how many have multiple independent SNVs?
allResForTable %>% filter(is.na(novelty)) %>% count(LocusName.Final) %>% filter(n > 1)
allResForTable %>% filter(is.na(novelty)) %>% count(asterisk) %>% filter(asterisk == "**")
# number/list of loci with missense coding variants
allResForTable %>% filter(is.na(novelty), annotation == "missense") %>% pull(LocusName.Final)
# number of variants in known loci with RegulomeDB score < 7
allResForTable %>% filter(is.na(novelty)) %>% count(annotation)
# count of variants falling in novel loci
allResForTable %>% filter(novelty == "Novel") %>% nrow()
# replication results
# note: TYMS is getting counted twice
repTable %>% filter(!is.na(li_pvalue) | !is.na(dorajoo_pvalue)) %>% nrow()
repTable %>% filter(threshold == "<0.0026")
paste(repTable %>% filter(threshold == "<0.0026") %>% pull(LocusName.Final), collapse = ", ")
repTable %>% filter(threshold == "<0.05")
paste(repTable %>% filter(threshold == "<0.05") %>% pull(LocusName.Final), collapse = ", ")
# p-value for SAMHD1
allResForTable %>% filter(LocusName.Final == "SAMHD1") %>% pull(pval_joint)
# correlation numbers for replication results
repTable %>% summarise(corTOPMedLi = cor.test(Est_joint, li_BETA)$estimate, corPTOPMedLi = cor.test(Est_joint, li_BETA)$p.value, corTOPMedDorajoo = cor.test(Est_joint, dorajoo_beta)$estimate, corPTOPMedDorajoo = cor.test(Est_joint, dorajoo_beta)$p.value)
repTable %>% filter(!is.na(threshold), !threshold == ">0.05") %>% summarise(corTOPMedLi = cor.test(Est_joint, li_BETA)$estimate, corPTOPMedLi = cor.test(Est_joint, li_BETA)$p.value, corTOPMedDorajoo = cor.test(Est_joint, dorajoo_beta)$estimate, corPTOPMedDorajoo = cor.test(Est_joint, dorajoo_beta)$p.value)
# min and max effect sizes of common and rare variants
allRes %>% filter(freq >= 0.05 & freq <= 0.95) %>% summarise(minPrimary = min(abs(Est_primary)), maxPrimary = max(abs(Est_primary)))
allRes %>% filter(!(freq >= 0.05 & freq <= 0.95)) %>% summarise(minPrimary = min(abs(Est_primary)), maxPrimary = max(abs(Est_primary)))
# which SNVs have p-values for Cochrane's Q < 0.05
allResForTable %>% filter(Qp < 0.05)
# group-specific p-values for TINF2
allResForTable %>% filter(rsNum == "rs28372734")
allResForTable %>% filter(rsNum == "rs8016076")
phewasEAPRSSig %>% count(group)
phewasEAPRSSig %>% filter(group == "neoplasms") %>% pull(OR)
##############################
### Additional data selections
##############################
## subset JHS, WHI and GeneSTAR
load("~/Research/OneDrive/telomere/gwas/results/allResMerge_forAnalysis_031520.rda")
forAnalysisSubStudies<-forAnalysis %>% filter(study %in% c("JHS", "WHI", "GeneSTAR"))
write.csv(forAnalysisSubStudies, file = "~/Research/OneDrive/telomere/gwas/results/allResMerge_forAnalysis_031520_JHSWHIGeneSTAR.csv", row.names = FALSE, quote=FALSE)
### Table S3: All SNPs with p < 5e-9 in at least one group??
mergedRes<-read.csv(gzfile("~/Research/OneDrive/telomere/gwas/results/MERGED_allChrs_telomere_adjagesexstudyseqctrbatchPCs_minDP0_BRAVODepthDrop_p_lt_5e-8_OASISAnno.csv.gz"), header=TRUE, stringsAsFactors = FALSE)
allWithLocusGroup<-read.csv("~/Research/OneDrive/telomere/gwas/results/ALL_GWASResults/allChrs_telomere_adjagesexstudyseqctrbatchPCs_minDP0_BRAVODepthDrop_p_lt_5e-8_withLocusGroups.csv", header = TRUE, stringsAsFactors = FALSE)
mergedRes <- allWithLocusGroup %>% select(locusGroup, newLOCUS, snpID) %>% right_join(mergedRes)
mergedRes$diffPos <- c(0,diff(mergedRes$pos))
mergedRes$sameChr <- c(TRUE, mergedRes$chr[1:(nrow(mergedRes)-1)] == mergedRes$chr[2:(nrow(mergedRes))] )
contigBreaks<-which(!mergedRes$sameChr | mergedRes$diffPos > 200000)
mergedRes[unique(sort(c(contigBreaks-1, contigBreaks))),1:5]
mergedRes$locusGroupMerged<-rep(1:(length(contigBreaks)+1), times=c(contigBreaks[1]-1, diff(contigBreaks), nrow(mergedRes)-contigBreaks[length(contigBreaks)]+1))
new8OutsideLoci<-mergedRes %>% filter(locusGroupMerged %in% (mergedRes %>% group_by(locusGroupMerged) %>% summarise(allNA = all(is.na(newLOCUS))) %>% filter(allNA==TRUE) %>% pull(locusGroupMerged))) %>% select("snpID", paste(c("freq", "MAC", "Score.pval"), rep(c("Black", "White", "Asian", "HispanicLatino", "Samoan", "Brazilian"), each=3), sep="."))
write.csv(new8OutsideLoci, file="~/Research/OneDrive/telomere/gwas/results/MERGED_allChrs_telomere_adjagesexstudyseqctrbatchPCs_minDP0_BRAVODepthDrop_p_lt_5e-8_forBRAVOCheck_notInLoci.csv", row.names=FALSE, quote=FALSE)
## check against what Rasika pulled
library(readxl)
rasResult<-read_excel("~/Research/OneDrive/telomere/gwas/results/ALL_GWASResults/OASIS QC/Chromosome_BRAVO_confirm.xlsx", sheet="Stratified Anlaysis Lookups")
colnames(rasResult)<-c("snpID", "snpIDMod", "Outcome", "Path")
rasResultOdd<- rasResult %>% filter(Outcome == "odd") %>% pull(snpID)
mergedResOdd<-mergedRes %>% filter(snpID %in% rasResultOdd) %>% select(paste(c("freq", "MAC", "Score.pval"), rep(c("Black", "White", "Asian", "HispanicLatino", "Samoan", "Brazilian"), each=3), sep="."))
sum(mergedResOdd$Score.pval.Black < 5e-8, na.rm=TRUE)
sum(mergedResOdd$Score.pval.White < 5e-8, na.rm=TRUE)
sum(mergedResOdd$Score.pval.Asian < 5e-8, na.rm=TRUE)
sum(mergedResOdd$Score.pval.HispanicLatino < 5e-8, na.rm=TRUE)
sum(mergedResOdd$Score.pval.Brazilian < 5e-8, na.rm=TRUE)
sum(mergedResOdd$Score.pval.Samoan < 5e-8, na.rm=TRUE)
mergedResOdd %>% select("freq.White", "MAC.White", "Score.pval.White")
sum(mergedResOdd$Score.pval.White < 5e-9, na.rm=TRUE)
## want to pull new positions for Rasika to check
new8<-mergedRes %>% filter(Score.pval.ALL > 5e-8) %>% pull(snpID)
write.csv(new8, file="~/Research/OneDrive/telomere/gwas/results/MERGED_allChrs_telomere_adjagesexstudyseqctrbatchPCs_minDP0_BRAVODepthDrop_p_lt_5e-8_forBRAVOCheck.csv", row.names=FALSE, quote=FALSE)
justPvals<-mergedRes[,grep("pval", colnames(mergedRes))]
nrow(justPvals) - sum(justPvals$Score.pval.ALL < 5e-8) ## 247
sum(rowSums(justPvals < 5e-9, na.rm=TRUE)>0)
idx9<-which(rowSums(justPvals < 5e-9, na.rm=TRUE)>0)
new9<-mergedRes[idx9,] %>% filter(Score.pval.ALL > 5e-9) %>% pull(snpID)
write.csv(new9, file="~/Research/OneDrive/telomere/gwas/results/MERGED_allChrs_telomere_adjagesexstudyseqctrbatchPCs_minDP0_BRAVODepthDrop_p_lt_5e-9_forBRAVOCheck.csv", row.names=FALSE, quote=FALSE)
justPvals <- justPvals %>% filter(rowSums(justPvals < 5e-9, na.rm=TRUE)>0)
nrow(justPvals) - sum(justPvals$Score.pval.ALL < 5e-9) ## 149
notSigAll <- justPvals %>% filter(justPvals$Score.pval.ALL > 5e-9)
## of ones not sig in pooled analysis, all are unique to one other group
table(rowSums(notSigAll < 5e-9, na.rm=TRUE))
sum(notSigAll$Score.pval.White < 5e-9, na.rm=TRUE) ## 111
sum(notSigAll$Score.pval.Black < 5e-9, na.rm=TRUE) ## 28
sum(notSigAll$Score.pval.HispanicLatino < 5e-9, na.rm=TRUE) ## 7
sum(notSigAll$Score.pval.Asian < 5e-9, na.rm=TRUE) ## 1
sum(notSigAll$Score.pval.Brazilian < 5e-9, na.rm=TRUE) ## 2
sum(notSigAll$Score.pval.Samoan < 5e-9, na.rm=TRUE) ## 0
idsByRound<-condRes %>% select(chr, grep("snpID_", colnames(condRes))) %>% pivot_longer(-chr, names_to="round", values_to ="snpID") %>% mutate(round = sub("snpID_", "", round))
pvalsByRound <- condRes %>% select(chr, grep("Score.pval_", colnames(condRes))) %>% pivot_longer(-chr, names_to="round", values_to ="Score.pval") %>% mutate(round = sub("Score.pval_", "", round))
posByRound<- condRes %>% select(chr, grep("pos_", colnames(condRes))) %>% pivot_longer(-chr, names_to="round", values_to ="pos") %>% mutate(round = sub("pos_", "", round))
allByRound <- idsByRound %>% left_join(pvalsByRound) %>% left_join(posByRound)
## remove NAs and positions with p>5e-9
allByRound <- allByRound %>% filter(!is.na(Score.pval), Score.pval < 5e-9)
### OLD CODE ####
newPeaks %>% filter(!snpID %in% condRes$snpID)
allWithLocusGroup<-read.csv("~/Research/OneDrive/telomere/gwas/results/ALL_GWASResults/allChrs_telomere_adjagesexstudyseqctrbatchPCs_minDP0_BRAVODepthDrop_p_lt_5e-8_withLocusGroups.csv", header = TRUE, stringsAsFactors = FALSE)
## get start and stop of locus regions
allWithLocusGroup %>% filter(c(locusGroup[1:(nrow(allWithLocusGroup)-1)] != locusGroup[2:nrow(allWithLocusGroup)], TRUE) | c(TRUE, locusGroup[2:nrow(allWithLocusGroup)] != locusGroup[1:(nrow(allWithLocusGroup) - 1)])) %>% select(chr, pos)
condRes <- allWithLocusGroup %>% select(chr, pos, newLOCUS) %>% right_join(condRes, by=c("chr", "pos"))
condRes %>% select(newLOCUS, snpID, roundNum_Cond)
condRes[condRes$snpID == "4:163126692:T:G", "newLOCUS"]<-"NAF1"
condRes[condRes$snpID == "5:1292843:C:T", "newLOCUS"]<-"TERT"
condRes[condRes$snpID == "10:103915847:C:T", "newLOCUS"]<-"SH3PXD2A_OBFC1_SLK"
condRes[condRes$snpID == "18:676473:C:T", "newLOCUS"]<-"TYMS"
condRes[condRes$snpID == "18:650764:C:T", "newLOCUS"]<-"TYMS"
condRes[condRes$snpID == "22:40023952:C:T", "newLOCUS"]<-"TNRC6B"
## want to incorporate locus names
locusInfo <- read_excel("~/Research/OneDrive/telomere/gwas/results/forRasikaOASISLookup-withnovelty_v2.xlsx")
locusInfo <- locusInfo %>% mutate(novelty = ifelse(is.na(novelty), "Known", "Novel")) %>% select(snpID, newLOCUS = LocusName.Final, novelty)
all(locusInfo$snpID %in% condRes$snpID)
condRes <- condRes
# select min p-value per locus
condRes %>% arrange(as.numeric(chr), newLOCUS, roundNum_Cond)
locusOrder <- cbind(condRes %>% group_by(newLOCUS) %>% filter(roundNum_Cond == min(roundNum_Cond)) %>% select(chr, snpID, newLOCUS, roundNum_Cond) %>% arrange(as.numeric(chr), roundNum_Cond) %>% select(newLOCUS), locusOrder = 1:length(unique(condRes$newLOCUS))) %>% print(n=Inf)
condRes <- condRes %>% left_join(locusOrder)
condRes <- condRes %>% arrange(as.numeric(chr), locusOrder, roundNum_Cond)
forTable1 <- condRes %>% mutate(Est_Primary_bp = 1000 * Est_Primary, Est_Cond_bp = 1000 * Est_Cond, position=paste0("Chr", chr, ":", pos)) %>% select(LocusName = newLOCUS, position, ref, alt, freq, Score.pval_Primary, Est_Primary_bp, PVE_Primary, Score.pval_Cond, Est_Cond_bp, PVE_Cond, roundOfConditioning = roundNum_Cond)
write.csv(forTable1, file="~/Research/OneDrive/telomere/gwas/results/Table1.csv", row.names=FALSE, quote=TRUE, na="-")
write.csv(condRes %>% group_by(newLOCUS) %>% summarise(chr = unique(chr), minSNPPos = min(pos), maxSNPPos=max(pos), locusOrder = unique(locusOrder)) %>% arrange(locusOrder), file="~/Research/OneDrive/telomere/gwas/results/lociStartStop.csv", row.names=FALSE, quote=TRUE, na="-")
annoInfo<-read.csv(gzfile("~/Research/OneDrive/telomere/gwas/results/MERGED_allChrs_telomere_adjagesexstudyseqctrbatchPCs_minDP0_BRAVODepthDrop_p_lt_5e-8_OASISAnno.csv.gz"), header = TRUE, stringsAsFactors = FALSE)
condRes <- condRes %>% left_join(annoInfo)
forRasika <- condRes %>% select(LocusName = newLOCUS, snpID, rsNum, Chr38, Pos38, Pos37, freq, Score.pval_Primary, Est_Primary, PVE_Primary, Score.pval_Cond, Est_Cond, PVE_Cond, roundOfConditioning = roundNum_Cond)
write.csv(forRasika, file="~/Research/OneDrive/telomere/gwas/results/forRasikaOASISLookup.csv", row.names=FALSE, quote=TRUE, na="-")
lociRes<- read_excel("~/Research/OneDrive/telomere/gwas/results/forRasikaOASISLookup.xlsx")
locusDef<-lociRes %>% filter(!is.na(`"-Kb"`)) %>% mutate(Pos38 = as.numeric(Pos38), start = Pos38 - `"-Kb"`*1000, end = Pos38 + `"+Kb"`*1000) %>% select(locusFINAL = LocusName.Final, chr = Chr38, start, end)
write.csv(locusDef, file="~/Research/OneDrive/telomere/gwas/results/locusDefinitions.csv", quote=FALSE, row.names=FALSE)
| /9_makeTables.R | permissive | mtaub/TOPMed_Telomere_GWAS | R | false | false | 41,613 | r | library(readxl)
library(GenomicRanges)
library(data.table)
library(dplyr)
library(tidyverse)
############################################
### Table 1: Peak SNPs with p < 5e-9, independent loci as identified from conditional analysis
############################################
## File created by Matt, merging all primary and conditional results for 59 variants
multiVarRes<-read.table("~/Research/OneDrive/telomere/gwas/results/hits_summary_with_ancestry_with_joint_20200512.txt", header = TRUE, stringsAsFactors = FALSE, sep="\t") %>% select(-novelty)
## File from me with all primary results (all + population subgroups) and some conditional results, with OASIS annotation
condRes<-read.csv(gzfile("~/Research/OneDrive/telomere/gwas/results/MERGED_allChrs_telomere_adjagesexstudyseqctrbatchPCs_minDP0_BRAVODepthDrop_p_lt_5e-9_conditionalAllSNPResSub_OASISAnno.csv.gz"), header=TRUE, stringsAsFactors = FALSE)
allRes<-left_join(multiVarRes, condRes)
## File from Matt with results from tests for heterogeneity added
hetRes <- read.csv("~/Research/OneDrive/telomere/gwas/results/hits_summary_with_ancestry_with_joint_20200512_wHetTest.csv", header = TRUE, stringsAsFactors = FALSE)
hetRes <- hetRes %>% select(snpID, Q, Qp, I2, I2CI)
allRes <- left_join(allRes, hetRes)
## File with some locus info, including novelty (do not use column from Matt's results)
moreInfo <- read_excel("~/Research/OneDrive/telomere/gwas/results/forRasikaOASISLookup-withnovelty_v2_20200512.xlsx")
moreInfo <- moreInfo %>% mutate(asterisk = ifelse(is.na(asterisk) , "", asterisk)) %>% select(snpID, asterisk, novelty)
allRes <- left_join(allRes, moreInfo)
allRes <- allRes %>% mutate(annotation = ifelse(Type %in% c("exonic", "intronic") & !(Function == ""), Function, RglmDB)) %>% mutate(annotation = ifelse(annotation == "", NA, annotation), Qp = ifelse(Qp == 1, NA, Qp))
allResForTable <- allRes %>% select(chr, pos, LocusName.Final, rsNum, asterisk, novelty, annotation, MAC, pval_primary, PVE_Primary, pval_joint, pval_joint_white, pval_joint_black, pval_joint_hl, pval_joint_asian, Est_joint, Est_joint_white, Est_joint_black, Est_joint_hl, Est_joint_asian, Qp)
allResForTable <- allResForTable %>% mutate(asterisk = ifelse(pval_primary > 5e-9, "*", asterisk), Est_joint = 1000*Est_joint, Est_joint_white = 1000*Est_joint_white, Est_joint_black = 1000*Est_joint_black, Est_joint_hl = 1000*Est_joint_hl, Est_joint_asian = 1000* Est_joint_asian) %>% select(chr:rsNum, asterisk, novelty:Qp)
allResForTable <- allResForTable %>% arrange(as.numeric(chr), pos)
write.csv(allResForTable, file = "~/Research/OneDrive/telomere/gwas/manuscript/tableCSVs/Table1_20200519.csv", row.names=FALSE, quote = FALSE, na = "-")
write.csv(allRes %>% arrange(as.numeric(chr), pos), file = "~/Research/OneDrive/telomere/gwas/results/MERGED_allChrs_telomere_adjagesexstudyseqctrbatchPCs_minDP0_BRAVODepthDrop_p_lt_5e-9_conditionalAllSNPResSub_OASISAnno_jointResults_20200512.csv", row.names = FALSE, quote = TRUE)
#allResForNancy <- allRes %>% select(chr, pos, snpID, LocusName.Final, rsNum, ref, alt, Est_joint_pooled = Est_joint, SE_joint_pooled = SE_joint, pval_joint_pooled = pval_joint, Est_joint_white, SE_joint_white, pval_joint_white, Est_joint_black, SE_joint_black, pval_joint_black, Est_joint_hl, SE_joint_hl, pval_joint_hl, Est_joint_asian, SE_joint_asian, pval_joint_asian ) %>% mutate_at(vars(Est_joint_pooled, SE_joint_pooled, Est_joint_white, SE_joint_white, Est_joint_black, SE_joint_black, Est_joint_hl, SE_joint_hl, Est_joint_asian, SE_joint_asian), .funs = funs(. * 1000)) %>% arrange(as.numeric(chr), pos)
#write.csv(allResForNancy, file = "~/Research/OneDrive/telomere/gwas/results/jointResultsForNancy_20200512.csv", row.names = FALSE, quote = FALSE)
#######################################################
### Table S1: Counts by study and ancestry group
#######################################################
load("~/Research/OneDrive/telomere/gwas/results/allResMerge_forAnalysis_031520.rda")
load("~/Research/OneDrive/telomere/gwas/results/White_forAnalysis_031920.rda")
load("~/Research/OneDrive/telomere/gwas/results/Black_forAnalysis_031920.rda")
load("~/Research/OneDrive/telomere/gwas/results/Asian_forAnalysis_031920.rda")
load("~/Research/OneDrive/telomere/gwas/results/HL_forAnalysis_032320.rda")
allSubGroups<-rbind(forAnalysisWhiteAmish %>% mutate(ancestryGroup = "White") %>% select(NWDID, ancestryGroup), rbind(forAnalysisBlack %>% mutate(ancestryGroup = "Black") %>% select(NWDID, ancestryGroup), rbind(forAnalysisHL %>% mutate(ancestryGroup = "Hispanic/Latino") %>% select(NWDID, ancestryGroup), forAnalysisAsian %>% mutate(ancestryGroup = "Asian") %>% select(NWDID, ancestryGroup))))
forAnalysis <- left_join(forAnalysis, allSubGroups)
forAnalysis <- forAnalysis %>% mutate(ancestryGroup = ifelse(is.na(ancestryGroup), "Other", ancestryGroup), ancestryGroup = factor(ancestryGroup, levels=c("White", "Black", "Hispanic/Latino", "Asian", "Other")))
demoTable<-left_join(forAnalysis %>% group_by(Study=study) %>% summarise('Total Count'=n(), 'Male Count (Pct)'=paste0(sum(sex == "M"), " (", round(mean(sex=="M"),2), ")"), 'Mean Age (SD, Range)'=paste0(round(mean(age_at_dna_blood_draw_wgs),0), " (", round(sd(age_at_dna_blood_draw_wgs),1), ", ", round(min(age_at_dna_blood_draw_wgs),0), "-", round(max(age_at_dna_blood_draw_wgs),0), ")")), forAnalysis %>% group_by(Study=study, ancestryGroup) %>% summarise(n=n()) %>% spread(ancestryGroup, n))
demoTable<-left_join(demoTable,forAnalysis %>% group_by(Study=study, seq_center_new) %>% summarise(n=n()) %>% spread(seq_center_new, n) )
## add row at bottom with totals
Total<-c("Total", ungroup(forAnalysis) %>% summarise('Total Count'=n(), 'Male Count (Pct)'=paste0(sum(sex == "M"), " (", round(mean(sex=="M"),2), ")"), 'Mean Age (SD, Range)'=paste0(round(mean(age_at_dna_blood_draw_wgs),0), " (", round(sd(age_at_dna_blood_draw_wgs),1), ", ", round(min(age_at_dna_blood_draw_wgs),0), "-", round(max(age_at_dna_blood_draw_wgs),0), ")")), ungroup(forAnalysis) %>% group_by(ancestryGroup) %>% summarise(n=n()) %>% spread(ancestryGroup, n), ungroup(forAnalysis) %>% group_by(seq_center_new) %>% summarise(n=n()) %>% spread(seq_center_new, n))
demoTable<-rbind(demoTable, unlist(Total)) %>% print(n=Inf)
write.csv(demoTable, "~/Research/OneDrive/telomere/gwas/manuscript/tableCSVs/SupplTable1_Demographics.csv", row.names=FALSE, quote=TRUE, na="-")
## want to pull studies used in previous paper
#load("~/Research/telomere/manuscript/results/samplesForReplication.rda")
#load("~/Research/telomere/manuscript/results/samplesForDiscovery_112918.rda")
#prevStudies <- unique(c(forDiscovery$study, forReplication$study))
#currStudies <- unique(forAnalysis$study)
#all(prevStudies %in% currStudies)
#setdiff(currStudies, prevStudies)
#studyInfo<- read_excel("~/Research/OneDrive/telomere/gwas/inputFiles/Access to study on dbGAP.xlsx")
#all(forAnalysis$study %in% studyInfo$Study)
#ctsByStudy <- forAnalysis %>% count(study) %>% mutate(newStudy = !study %in% prevStudies)
#ctsByStudy <- studyInfo %>% select(study = Study, Topmed_project, topmed_phs) %>% right_join(ctsByStudy)
#write.csv(ctsByStudy, file = "~/Research/OneDrive/telomere/gwas/manuscript/basicStudyInfo.csv", row.names=FALSE, quote= FALSE)
##############################
### Table S2: Conditional results
##############################
condResTable <- allRes %>% select("snpID", "chr", "pos", "LocusName.Final", "rsNum", "round_Cond", "Est_primary", "pval_primary", paste(c("Est_cond", "pval_cond"), rep(1:6, each = 2), sep = "_")) %>% mutate_at(vars(contains("Est_")), ~(.*1000)) %>% mutate(round_Cond = ifelse(is.na(round_Cond), "Primary", sub("cond", "Cond_", round_Cond)))
ldTable<-read.csv("~/Research/OneDrive/telomere/gwas/results/sentinels_pairwise_ld_all.csv", header=TRUE, stringsAsFactors = FALSE)
ldTable <- ldTable %>% dplyr::rename(snpID = other_snp)
condResTable <- condResTable %>% left_join(ldTable)
condResTable <- condResTable %>% mutate(r2 = ifelse(round_Cond == "Primary", NA, r2), dprime = ifelse(round_Cond == "Primary", NA, dprime)) %>% select(chr:round_Cond, r2, dprime, Est_primary:pval_cond_6)
write.csv(condResTable, "~/Research/OneDrive/telomere/gwas/manuscript/tableCSVs/SupplTable2_ConditionalRes.csv", row.names=FALSE, quote=TRUE, na="")
##############################
### Table S3: SNPs from previous studies
##############################
## code for pulling variants from primary results
#gunzip -c /dcl01/mathias1/data/telomere_mtaub/gwas/results/ALL_GWASResults/allChrs_telomere_adjagesexstudyseqctrbatchPCs_minDP0_BRAVODepthDrop.csv.gz | awk 'NR==FNR{a[$1,$2]="foo";next}; a[$2,$3]=="foo" {print}' FS=',' /dcl01/mathias1/data/telomere_mtaub/gwas/results/novel_loci/published_sentinels_unique_chrPos.csv FS=',' - > /dcl01/mathias1/data/telomere_mtaub/gwas/results/novel_loci/allChrs_telomere_adjagesexstudyseqctrbatchPCs_minDP0_BRAVODepthDrop_publishedSentinels.csv
#gunzip -c /dcl01/mathias1/data/telomere_mtaub/gwas/results/White_GWASResults/White_allChrs_telomere_adjagesexstudyseqctrbatchPCs_minDP0_BRAVODepthDrop.csv.gz | awk 'NR==FNR{a[$1,$2]="foo";next}; a[$2,$3]=="foo" {print}' FS=',' /dcl01/mathias1/data/telomere_mtaub/gwas/results/novel_loci/published_sentinels_unique_chrPos.csv FS=',' - > /dcl01/mathias1/data/telomere_mtaub/gwas/results/novel_loci/White_allChrs_telomere_adjagesexstudyseqctrbatchPCs_minDP0_BRAVODepthDrop_publishedSentinels.csv
#gunzip -c /dcl01/mathias1/data/telomere_mtaub/gwas/results/Black_GWASResults/Black_allChrs_telomere_adjagesexstudyseqctrbatchPCs_minDP0_BRAVODepthDrop.csv.gz | awk 'NR==FNR{a[$1,$2]="foo";next}; a[$2,$3]=="foo" {print}' FS=',' /dcl01/mathias1/data/telomere_mtaub/gwas/results/novel_loci/published_sentinels_unique_chrPos.csv FS=',' - > /dcl01/mathias1/data/telomere_mtaub/gwas/results/novel_loci/Black_allChrs_telomere_adjagesexstudyseqctrbatchPCs_minDP0_BRAVODepthDrop_publishedSentinels.csv
#gunzip -c /dcl01/mathias1/data/telomere_mtaub/gwas/results/HispanicLatino_GWASResults/HL_allChrs_telomere_adjagesexstudyseqctrbatchPCs_minDP0_BRAVODepthDrop.csv.gz | awk 'NR==FNR{a[$1,$2]="foo";next}; a[$2,$3]=="foo" {print}' FS=',' /dcl01/mathias1/data/telomere_mtaub/gwas/results/novel_loci/published_sentinels_unique_chrPos.csv FS=',' - > /dcl01/mathias1/data/telomere_mtaub/gwas/results/novel_loci/HL_allChrs_telomere_adjagesexstudyseqctrbatchPCs_minDP0_BRAVODepthDrop_publishedSentinels.csv
#gunzip -c /dcl01/mathias1/data/telomere_mtaub/gwas/results/Asian_GWASResults/Asian_allChrs_telomere_adjagesexstudyseqctrbatchPCs_minDP0_BRAVODepthDrop.csv.gz | awk 'NR==FNR{a[$1,$2]="foo";next}; a[$2,$3]=="foo" {print}' FS=',' /dcl01/mathias1/data/telomere_mtaub/gwas/results/novel_loci/published_sentinels_unique_chrPos.csv FS=',' - > /dcl01/mathias1/data/telomere_mtaub/gwas/results/novel_loci/Asian_allChrs_telomere_adjagesexstudyseqctrbatchPCs_minDP0_BRAVODepthDrop_publishedSentinels.csv
#gunzip -c /dcl01/mathias1/data/telomere_mtaub/gwas/results/Asian_GWASResults/Asian_allChrs_telomere_adjagesexstudyseqctrbatchPCs_minDP0_BRAVODepthDrop.csv.gz | head -1 > /dcl01/mathias1/data/telomere_mtaub/gwas/results/novel_loci/allChrs_telomere_adjagesexstudyseqctrbatchPCs_minDP0_BRAVODepthDrop_colNames.csv
prevSentinels <- read_excel("~/Research/OneDrive/telomere/gwas/results/ALL_GWASResults/novel_loci/published_sentinels-betas_PMCID.xlsx") %>% rename(rsNum = SNP) %>% mutate(P = sub("−", "-", P))
prevSentinelsPos <- read.table("~/Research/OneDrive/telomere/gwas/results/ALL_GWASResults/novel_loci/pairwise-sentinel-LD-reqd_files/published_sentinels_positions.txt", header=TRUE, stringsAsFactors = FALSE, sep="\t") %>% rename(rsNum = Variation.ID)
moreInfo <- read_excel("~/Research/OneDrive/telomere/gwas/results/forRasikaOASISLookup-withnovelty_v2_20200512.xlsx")
annotateTable<-moreInfo %>% mutate(chr = gsub("\\:.*", "", snpID), chr = as.numeric(ifelse(chr == "X", 23,chr)), pos = as.numeric(sapply(strsplit(snpID, split=":"), function(x) x[2]))) %>% dplyr::select(SNP=snpID, label=LocusName.Final, chr, pos, negKb = `"-Kb"`, posKb = `"+Kb"`, novelty) %>% filter(!is.na(negKb))
rangesToPull<-GRanges(seqnames=annotateTable$chr, ranges=IRanges(start=annotateTable$pos - 1000*annotateTable$negKb, end=annotateTable$pos + 1000*annotateTable$posKb))
start(ranges(rangesToPull)[5]) <- start(ranges(rangesToPull)[5]) - 101000
end(ranges(rangesToPull)[5]) <- end(ranges(rangesToPull)[5]) + 2950000
#write.csv(prevSentinelsPos %>% mutate(chrPos = paste(Chromosome, Position, sep=":")) %>% filter(!duplicated(chrPos)) %>% select(Chromosome, Position), file = "~/Research/OneDrive/telomere/gwas/results/ALL_GWASResults/novel_loci/pairwise-sentinel-LD-reqd_files/published_sentinels_unique_chrPos.csv", row.names = FALSE, quote = FALSE)
# confirming only 57 unique entries, so will drop any
length(unique(prevSentinelsPos$rsNum))
length(unique(apply(prevSentinelsPos, 1, function(x) paste(x, collapse = ":"))))
prevSentinelsPos <- prevSentinelsPos %>% filter(!duplicated(rsNum))
prevSentinels <- left_join(prevSentinels, prevSentinelsPos)
prevSentinels <- prevSentinels %>% mutate(chrPos = paste(Chromosome, Position, sep=":"))
colNames <- read.csv("~/Research/OneDrive/telomere/gwas/results/ALL_GWASResults/novel_loci/pairwise-sentinel-LD-reqd_files/allChrs_telomere_adjagesexstudyseqctrbatchPCs_minDP0_BRAVODepthDrop_colNames.csv")
colNames <- names(colNames)
prevAll<-read.csv("~/Research/OneDrive/telomere/gwas/results/ALL_GWASResults/novel_loci/pairwise-sentinel-LD-reqd_files/allChrs_telomere_adjagesexstudyseqctrbatchPCs_minDP0_BRAVODepthDrop_publishedSentinels.csv", header=FALSE, stringsAsFactors = FALSE)
names(prevAll)<-c(colNames[1:4], paste0(colNames[5:14], ".ALL"), colNames[15:length(colNames)])
for (ancTag in c("White", "Black", "HL", "Asian")){
prevCurr<-read.csv(paste0("~/Research/OneDrive/telomere/gwas/results/ALL_GWASResults/novel_loci/pairwise-sentinel-LD-reqd_files/", ancTag, "_allChrs_telomere_adjagesexstudyseqctrbatchPCs_minDP0_BRAVODepthDrop_publishedSentinels.csv"), header=FALSE, stringsAsFactors = FALSE)
names(prevCurr)<-c(colNames[1:4], paste0(colNames[5:14], ".", ancTag), colNames[15:length(colNames)])
prevAll <- left_join(prevAll, prevCurr)
}
prevAll <- prevAll %>% mutate(chrPos = paste(chr, pos, sep=":"))
prevAll <- prevAll %>% mutate(Est.ALL = 1000*Est.ALL, Est.White = 1000*Est.White, Est.Black = 1000*Est.Black, Est.HL = 1000*Est.HL, Est.Asian = 1000*Est.Asian)
prevSentinels <- left_join(prevSentinels, prevAll)
# there is one with multi alleles, but only one matches ref/alt from Dorajoo
prevSentinels %>% filter(chrPos == "8:73008648")
prevSentinels <- prevSentinels %>% filter(!snpID == "8:73008648:T:A")
## need to check on signs for betas and remove betas where EA is missing
prevSentinels <- prevSentinels %>% mutate(EA = toupper(EA))
prevSentinels %>% filter(EA != alt) %>% select(rsNum, EA, chr, pos, ref, alt, Beta, Est.ALL)
prevSentinels <- prevSentinels %>% mutate(Beta = ifelse(EA != alt, -Beta, ifelse(is.na(EA), NA, Beta)), Sign = ifelse(is.na(Beta), "N/A", ifelse(Beta < 0, "-", "+")), PMCID = substr(PMCID, 1, 10))
prevSentinelsGR <- GRanges(seqnames=prevSentinels$Chromosome, ranges=IRanges(start=prevSentinels$Position, width = 1))
table1PrevOL <- findOverlaps(prevSentinelsGR, rangesToPull)
prevSentinels$inTable1 <- "Yes"
prevSentinels$inTable1[setdiff(1:nrow(prevSentinels), queryHits(table1PrevOL))] <- "No"
# changed to only include trans-ethnic results
prevSentinelsForTable <- prevSentinels %>% select("chr", "pos", "rsNum", "ref", "alt", "GENE", "Author", "Year", "PMCID", "P", "Sign", "inTable1", paste(c("Score.pval", "Est", "freq"), rep(c("ALL"), each = 3), sep=".")) %>% arrange(chr, pos, Author)
prevSentinelsForTable <- prevSentinelsForTable %>% filter(!as.numeric(P) > 5e-8)
write.csv(prevSentinelsForTable, file = "~/Research/OneDrive/telomere/gwas/manuscript/tableCSVs/SupplTable3_published_sentinels_TOPMedResults.csv", row.names=FALSE, quote = TRUE, na = "-")
## ADD COLUMN: is this in Table 1 or not? Add to the ALL section (and check -- if it is exactly those with p < 5x10-9 or from loci from table 1. Question is really do we identify all these loci?? Or which do we not identify?)
## For replication table summary : correlation of effect sizes for replication of novel loci??
##############################
### Table S4: Annotation of Table 1 variants
##############################
annoColsToPull <- c("chr", "pos", "LocusName.Final", "rsNum", "ref", "alt", "Type", "Function", "AAchg", "GENE", "RglmDB", "eigenPC", "Dnase", "Reg", "SIFT", "PP2_HDIV","PP2_HVAR", "LRT", "MT", "MA", "FATHMM", "metaSVM", "metaLR", "PhyloP", "SiPhy", "GERP..", "CADD", "ClinVar", "Phast")
write.csv(allRes %>% arrange(as.numeric(chr), pos) %>% select(annoColsToPull), file = "~/Research/OneDrive/telomere/gwas/manuscript/tableCSVs/SupplTable4_Annotation.csv", row.names=FALSE, quote = TRUE, na = "")
##############################
### Table S5: Replication of novel findings
##############################
# dorajoo
dorajoo<-fread("~/Research/OneDrive/telomere/gwas/results/ALL_GWASResults/novel_loci/replicated_loci/replication_dependent_files/dorajoo_replication.txt")
colnames(dorajoo)<-c("V1", "chr", "hg37pos", "other_allele", "test_allele", "pvalue", "beta", "se", "p_het")
#dorajoo has a bunch of SNPs where it's rsID:pos:ref:alt
#I need to separate it to just the rsID
library(stringr)
dorajoo$count<-str_count(dorajoo$V1, ':')
zero<-subset(dorajoo, dorajoo$count == 0)
colnames(zero)[1]<-"SNP"
three<-subset(dorajoo, dorajoo$count == 3)
three[,c("SNP", "pos", "ref", "alt"):=tstrsplit(V1, ":", fixed=T)]
three<-three[,c(11,2:10)]
dorajoo<-rbind(zero, three)
#there are some rsID's which say GSA-rsid# get rid of the prefix
dorajoo$SNP<-sub("GSA-", "", dorajoo$SNP)
dor<-dorajoo[,c(1:8)]
colnames(dor)<-c("rsNum", "dorajoo_chr", "dorajoo_hg37_pos", "dorajoo_NEA", "dorajoo_EA", "dorajoo_pvalue",
"dorajoo_beta", "dorajoo_se")
# li
li<-fread("~/Research/OneDrive/telomere/gwas/results/ALL_GWASResults/novel_loci/replicated_loci/replication_dependent_files/li_replication.txt")
colnames(li)<-c("rsNum", "li_chr", "li_hg37pos", "li_EA", "li_NEA", "li_EAF", "li_BETA", "li_SE", "li_pvalue",
"n", "fdr")
li<-li[,c(1:9)]
repTable <- left_join(allResForTable, dor) %>% left_join(li) %>% filter(novelty == "Novel") %>% select( chr, pos, LocusName.Final, rsNum,Est_joint, li_pvalue, li_BETA, li_SE, dorajoo_pvalue, dorajoo_beta, dorajoo_se)
nTests<-repTable %>% filter(!is.na(li_pvalue) | !is.na(dorajoo_pvalue)) %>% nrow()
repTable <- repTable %>% mutate(threshold = ifelse(rowSums(cbind(li_pvalue, dorajoo_pvalue) < 0.05/nTests, na.rm = TRUE)>0, paste0("<", round(0.05/nTests,4)), ifelse(rowSums(cbind(li_pvalue, dorajoo_pvalue) < 0.05, na.rm = TRUE)>0, "<0.05", ifelse(rowSums(is.na(cbind(li_pvalue, dorajoo_pvalue)))==2, NA, ">0.05"))))
write.csv(repTable, file = "~/Research/OneDrive/telomere/gwas/manuscript/tableCSVs/SupplTable5_replication_novel_TOPMedResults.csv", row.names=FALSE, quote = TRUE, na = "-")
##############################
### Table S6: Expanded population level results
##############################
colsForPrimaryTable <- c("chr", "pos", "LocusName.Final", "rsNum", paste0("Est_primary", c("", "_white", "_black", "_hl", "_asian")), paste0("pval_primary", c("", "_white", "_black", "_hl", "_asian")), paste0("freq", c("", ".White", ".Black", ".HispanicLatino", ".Asian")), paste0("PVE", c("_Primary", ".White", ".Black", ".HispanicLatino", ".Asian")))
primaryTable <- allRes %>% select(colsForPrimaryTable) %>% mutate_at(vars(Est_primary, Est_primary_white, Est_primary_black, Est_primary_hl, Est_primary_asian), .funs = funs(. * 1000)) %>% arrange(as.numeric(chr), pos)
write.csv(primaryTable, file = "~/Research/OneDrive/telomere/gwas/manuscript/tableCSVs/SupplTable6_primaryResults.csv", row.names=FALSE, quote = TRUE, na = "-")
##############################
### Table S9: BioVU PheWAS Results
##############################
phewasAAOld <- read_excel("~/Research/OneDrive/telomere/BioVU/AA_PheWAS_combined.xlsx")
phewasAA <- read_excel("~/Research/OneDrive/telomere/BioVU/re/AA_PheWAS_combined.xlsx")
phewasAA <- phewasAA %>% group_by(snp) %>% mutate(nTests = sum(!is.na(OR)), newBonf = ifelse(p < 0.05/nTests, TRUE, FALSE))
phewasAA <- phewasAA %>% group_by(phenotype) %>% mutate(nSnps = sum(!is.na(OR)))
phewasAASub<-phewasAA %>% filter(!is.na(OR))
# they match
table(phewasAASub$bonferroni, phewasAASub$newBonf)
phewasAASig <- phewasAASub %>% filter(newBonf) %>% mutate(nSamps = paste(n_cases, n_controls, sep = " / "), ancGroup = "AA") %>% select(rsNum = snp, phenotype, group, description, OR, p, nSamps, ancGroup)
phewasBySNP<-phewasAASig %>% ungroup() %>% left_join(allRes %>% select(chr, pos, rsNum, LocusName.Final, ref, alt, Est_joint)) %>% mutate(Est_joint = 1000*Est_joint) %>% arrange(as.numeric(chr), pos, p) %>% select(chr, pos, LocusName.Final, rsNum, ref, alt, Est_joint, phenotype, group, description, OR, p, nSamps, ancGroup)
phewasEA <- read_excel("~/Research/OneDrive/telomere/BioVU/re/EA_PheWAS_combined.xlsx")
phewasEA <- phewasEA %>% group_by(snp) %>% mutate(nTests = sum(!is.na(OR)), newBonf = ifelse(p < 0.05/nTests, TRUE, FALSE))
phewasEA <- phewasEA %>% group_by(phenotype) %>% mutate(nSnps = sum(!is.na(OR)))
phewasEASub<-phewasEA %>% filter(!is.na(OR))
# they match
table(phewasEASub$bonferroni, phewasEASub$newBonf)
phewasEASig <- phewasEASub %>% filter(newBonf) %>% mutate(nSamps = paste(n_cases, n_controls, sep = " / "), ancGroup = "EA") %>% select(rsNum = snp, phenotype, group, description, OR, p, nSamps, ancGroup)
phewasBySNPEA<-phewasEASig %>% ungroup() %>% left_join(allRes %>% select(chr, pos, rsNum, LocusName.Final, ref, alt, Est_joint)) %>% mutate(Est_joint = 1000*Est_joint) %>% arrange(as.numeric(chr), pos, p) %>% select(chr, pos, LocusName.Final, rsNum, ref, alt, Est_joint, phenotype, group, description, OR, p, nSamps, ancGroup)
#phewasEA <- phewasEA %>% mutate(OR = exp(beta), nSamps = NA, ancGroup = "EA") %>% select(rsNum, phenotype, group, description, OR, p, nSamps, ancGroup)
#phewasEA <- phewasEA %>% left_join(allRes %>% select(chr, pos, rsNum, LocusName.Final, ref, alt, Est_joint)) %>% mutate(Est_joint = 1000*Est_joint) %>% arrange(as.numeric(chr), pos, p) %>% select(chr, pos, LocusName.Final, rsNum, ref, alt, Est_joint, phenotype, group, description, OR, p, nSamps, ancGroup)
phewasBySNPBoth <- rbind(phewasBySNP, phewasBySNPEA) %>% arrange(as.numeric(chr), pos, p)
write.csv(phewasBySNPBoth, file = "~/Research/OneDrive/telomere/gwas/manuscript/tableCSVs/SupplTable9_Part2_PheWASBySNP.csv", row.names=FALSE, quote = TRUE, na = "-")
#phewasEAPRS <- read_delim("~/Research/OneDrive/telomere/BioVU/Telomere_EA_PRS_Phewas.txt", delim = "\t")
phewasEAPRS <- read_excel("~/Research/OneDrive/telomere/BioVU/re/EA_PRS_Scaled_PheWAS_Results.xlsx")
phewasEAPRSSub <- phewasEAPRS %>% filter(!is.na(OR))
phewasEAPRSSub <- phewasEAPRSSub %>% mutate(newBonf = ifelse(p < 0.05/nrow(phewasEAPRSSub), TRUE, FALSE))
# they match
table(phewasEAPRSSub$bonferroni, phewasEAPRSSub$newBonf)
phewasEAPRSSig <- phewasEAPRSSub %>% filter(newBonf) %>% mutate(nSamps = paste(n_cases, n_controls, sep = " / "), ancGroup = "EA") %>% select(phenotype, group, description, OR, p, nSamps, ancGroup) %>% arrange(phenotype)
phewasAAPRS <- read_excel("~/Research/OneDrive/telomere/BioVU/re/AA_PRS_Scaled_PheWAS_Results.xlsx")
phewasAAPRSSub <- phewasAAPRS %>% filter(!is.na(OR))
phewasAAPRSSub <- phewasAAPRSSub %>% mutate(newBonf = ifelse(p < 0.05/nrow(phewasAAPRSSub), TRUE, FALSE))
# they match
table(phewasAAPRSSub$bonferroni, phewasAAPRSSub$newBonf)
phewasAAPRSSig <- phewasAAPRSSub %>% filter(newBonf) %>% mutate(nSamps = paste(n_cases, n_controls, sep = " / "), ancGroup = "AA") %>% select(phenotype, group, description, OR, p, nSamps, ancGroup)
## no significant AA PRS results
write.csv(phewasEAPRSSig, file = "~/Research/OneDrive/telomere/gwas/manuscript/tableCSVs/SupplTable9_Part1_PheWASByPRS.csv", row.names=FALSE, quote = TRUE, na = "-")
##############################
### Table SXX: BioVU LabWAS Results -- LEAVE OUT FOR NOW
##############################
## my bonferroni correction does not match hers here or for EAs
labwasAA <- read_csv("~/Research/OneDrive/telomere/BioVU/AA_LabWAS_Results_Combined.csv")
labwasAA <- labwasAA %>% group_by(Predictor) %>% mutate(nTests = sum(!is.na(OR)), newBonf = ifelse(p < 0.05/nTests, TRUE, FALSE))
table(labwasAA$Bonferroni, labwasAA$newBonf)
labwasAASig <- labwasAA %>% filter(newBonf) %>% mutate(ancGroup = "AA") %>% select(Predictor, Full_name, Group, N, p, OR, newBonf, ancGroup)
labwasEA <- read_csv("~/Research/OneDrive/telomere/BioVU/EA_LabWAS_Results_Combined.csv")
labwasEA <- labwasEA %>% group_by(Predictor) %>% mutate(nTests = sum(!is.na(OR)), newBonf = ifelse(p < 0.05/nTests, TRUE, FALSE))
table(labwasEA$Bonferroni, labwasEA$newBonf)
labwasEASig <- labwasEA %>% filter(newBonf) %>% mutate(ancGroup = "EA") %>% select(Predictor, Full_name, Group, N, p, OR, newBonf, ancGroup)
labwasAAPRS <- read_csv("~/Research/OneDrive/telomere/BioVU/Telomere_AA_PRS_LabWAS_Results.csv")
labwasAAPRS <- labwasAAPRS %>% mutate(newBonf = ifelse(p < 0.05/nrow(labwasAAPRS), TRUE, FALSE))
table(labwasAAPRS$Bonferroni, labwasAAPRS$newBonf) # nothing significant for AA PRS
labwasEAPRS <- read_csv("~/Research/OneDrive/telomere/BioVU/Telomere_EA_PRS_LabWAS_Results.csv")
labwasEAPRS <- labwasEAPRS %>% mutate(newBonf = ifelse(p < 0.05/nrow(labwasEAPRS), TRUE, FALSE))
table(labwasEAPRS$Bonferroni, labwasEAPRS$newBonf)
labwasEAPRSSig <- labwasEAPRS %>% filter(newBonf) %>% mutate(ancGroup = "EA") %>% select(Full_name, Group, OR, p, N, ancGroup) %>% arrange(p)
write.csv(labwasEAPRSSig, file = "~/Research/OneDrive/telomere/gwas/manuscript/tableCSVs/SupplTable10_Part1_LabWASByPRS.csv", row.names=FALSE, quote = TRUE, na = "-")
labwasBySNP<-rbind(labwasEASig %>% ungroup(), labwasAASig %>% ungroup()) %>% rename(rsNum = Predictor) %>% left_join(allRes %>% select(chr, pos, rsNum, LocusName.Final, ref, alt, Est_joint)) %>% mutate(Est_joint = 1000*Est_joint) %>% arrange(as.numeric(chr), pos, p) %>% select(chr, pos, LocusName.Final, rsNum, ref, alt, Est_joint, Full_name, Group, OR, p, N, ancGroup)
write.csv(labwasBySNP, file = "~/Research/OneDrive/telomere/gwas/manuscript/tableCSVs/SupplTable10_Part2_LabWASBySNP.csv", row.names=FALSE, quote = TRUE, na = "-")
##############################
### Table S10: UKBiobank Results
##############################
## these results use a per-SNP bonferroni cutoff since that is what was done for BioVU
UKBBRes <- read.csv("~/Research/OneDrive/telomere/gwas/results/UKBB/UKBB_SAIGEResSigBySNP.csv", header= TRUE, stringsAsFactors = FALSE)
UKBBRes <- UKBBRes %>% rename(rsNum = ID) %>% left_join(allResForTable %>% select(rsNum, LocusName.Final, Est_joint)) %>% mutate(OR = exp(beta))
UKBBResTable <- UKBBRes %>% mutate(nSamps = paste(Ncases, Ncontrols, sep = " / ")) %>% select(X.CHROM, POS, LocusName.Final, rsNum, REF, ALT, Est_joint, PheCode, Group, Description, OR, pval, nSamps) %>% arrange(as.numeric(X.CHROM), POS, pval)
write.csv(UKBBResTable, file = "~/Research/OneDrive/telomere/gwas/manuscript/tableCSVs/SupplTable11_UKBBResults.csv", row.names=FALSE, quote = TRUE, na = "-")
##############################
### Additional checks of reported paper results
##############################
# count of significant loci
length(unique(allResForTable$LocusName.Final))
# count of novel loci
allResForTable %>% filter(!duplicated(LocusName.Final)) %>% count(novelty) %>% filter(novelty == "Novel")
# count of novel variants
allResForTable %>% count(novelty) %>% filter(novelty == "Novel")
# how many loci replicated
repTable %>% group_by(LocusName.Final) %>% summarise(isSig = any(threshold %in% c("<0.0026", "<0.05"))) %>% count(isSig)
# count of loci with more than one signal
allResForTable %>% count(LocusName.Final) %>% filter(n > 1)
# count that directly match prior signals
# this is based on things labeled ** which was done by hand -- SHOULD BE DOUBLE CHECKED
allResForTable %>% count(asterisk)
# counts of different sample groups
nrow(forAnalysis)
nrow(forAnalysisWhiteAmish)
nrow(forAnalysisBlack)
nrow(forAnalysisHL)
nrow(forAnalysisAsian)
nrow(forAnalysis) - (nrow(forAnalysisWhiteAmish) + nrow(forAnalysisBlack) + nrow(forAnalysisHL) + nrow(forAnalysisAsian))
# percent male
mean(forAnalysis$sex == "M")
# age ranges
range(forAnalysis$age_at_dna_blood_draw_wgs)
# total number of variants computed as
#[mtaub@compute-060 ALL_GWASResults]$ gunzip -c allChrs_telomere_adjagesexstudyseqctrbatchPCs_minDP0.csv.gz | wc -l
#162583130
# count of replicated prior loci and their names
# to get 25 prior loci group ("TERC" "LRRC34 (TERC)") ("OBFC1" "STN1 (OBFC1)") ("ZNF208" "ZNF676") ("RTEL1/STMN3" "RTEL1" "RTEL1/ZBTB46")
# 30 - 5 extras = 25
# so there are 16 total of the 25 prior that are replicated by us
unique(prevSentinelsForTable$GENE)
length(allResForTable %>% filter(!duplicated(LocusName.Final), is.na(novelty)) %>% pull(LocusName.Final))
sub("\\_", "\\/", paste(allResForTable %>% filter(!duplicated(LocusName.Final), is.na(novelty)) %>% pull(LocusName.Final), collapse = ", ") )
# this second one includes PRRC2A which we think is actually not necessarily included in the nearby HSPA1A signal (due to low LD between our sentinel and the previously reported SNV) and is missing ZNF257/ZNF676 just due to naming differences
unique(prevSentinelsForTable %>% filter(Score.pval.ALL<5e-9 | GENE %in% allResForTable$LocusName.Final) %>% pull(GENE))
# close to replicated loci; only SENP7 and CTC1 were not previoulsy mentioned
prevSentinelsForTable %>% filter(Score.pval.ALL < 0.05, Score.pval.ALL > 5e-9) %>% select(GENE, rsNum, Score.pval.ALL)
# distance between PRRC2A and HSPA1A
allResForTable %>% filter(LocusName.Final == "HSPA1A") %>% pull(pos) - prevSentinelsForTable %>% filter(GENE == "PRRC2A") %>% pull(pos)
# not replicated
sub("\\_", "\\/", paste(prevSentinelsForTable %>% filter(inTable1 == "No", Score.pval.ALL > 0.05) %>% pull(GENE), collapse = ", "))
# for known loci, how many have multiple independent SNVs?
allResForTable %>% filter(is.na(novelty)) %>% count(LocusName.Final) %>% filter(n > 1)
allResForTable %>% filter(is.na(novelty)) %>% count(asterisk) %>% filter(asterisk == "**")
# number/list of loci with missense coding variants
allResForTable %>% filter(is.na(novelty), annotation == "missense") %>% pull(LocusName.Final)
# number of variants in known loci with RegulomeDB score < 7
allResForTable %>% filter(is.na(novelty)) %>% count(annotation)
# count of variants falling in novel loci
allResForTable %>% filter(novelty == "Novel") %>% nrow()
# replication results
# note: TYMS is getting counted twice
repTable %>% filter(!is.na(li_pvalue) | !is.na(dorajoo_pvalue)) %>% nrow()
repTable %>% filter(threshold == "<0.0026")
paste(repTable %>% filter(threshold == "<0.0026") %>% pull(LocusName.Final), collapse = ", ")
repTable %>% filter(threshold == "<0.05")
paste(repTable %>% filter(threshold == "<0.05") %>% pull(LocusName.Final), collapse = ", ")
# p-value for SAMHD1
allResForTable %>% filter(LocusName.Final == "SAMHD1") %>% pull(pval_joint)
# correlation numbers for replication results
repTable %>% summarise(corTOPMedLi = cor.test(Est_joint, li_BETA)$estimate, corPTOPMedLi = cor.test(Est_joint, li_BETA)$p.value, corTOPMedDorajoo = cor.test(Est_joint, dorajoo_beta)$estimate, corPTOPMedDorajoo = cor.test(Est_joint, dorajoo_beta)$p.value)
repTable %>% filter(!is.na(threshold), !threshold == ">0.05") %>% summarise(corTOPMedLi = cor.test(Est_joint, li_BETA)$estimate, corPTOPMedLi = cor.test(Est_joint, li_BETA)$p.value, corTOPMedDorajoo = cor.test(Est_joint, dorajoo_beta)$estimate, corPTOPMedDorajoo = cor.test(Est_joint, dorajoo_beta)$p.value)
# min and max effect sizes of common and rare variants
allRes %>% filter(freq >= 0.05 & freq <= 0.95) %>% summarise(minPrimary = min(abs(Est_primary)), maxPrimary = max(abs(Est_primary)))
allRes %>% filter(!(freq >= 0.05 & freq <= 0.95)) %>% summarise(minPrimary = min(abs(Est_primary)), maxPrimary = max(abs(Est_primary)))
# which SNVs have p-values for Cochrane's Q < 0.05
allResForTable %>% filter(Qp < 0.05)
# group-specific p-values for TINF2
allResForTable %>% filter(rsNum == "rs28372734")
allResForTable %>% filter(rsNum == "rs8016076")
phewasEAPRSSig %>% count(group)
phewasEAPRSSig %>% filter(group == "neoplasms") %>% pull(OR)
##############################
### Additional data selections
##############################
## subset JHS, WHI and GeneSTAR
load("~/Research/OneDrive/telomere/gwas/results/allResMerge_forAnalysis_031520.rda")
forAnalysisSubStudies<-forAnalysis %>% filter(study %in% c("JHS", "WHI", "GeneSTAR"))
write.csv(forAnalysisSubStudies, file = "~/Research/OneDrive/telomere/gwas/results/allResMerge_forAnalysis_031520_JHSWHIGeneSTAR.csv", row.names = FALSE, quote=FALSE)
### Table S3: All SNPs with p < 5e-9 in at least one group??
mergedRes<-read.csv(gzfile("~/Research/OneDrive/telomere/gwas/results/MERGED_allChrs_telomere_adjagesexstudyseqctrbatchPCs_minDP0_BRAVODepthDrop_p_lt_5e-8_OASISAnno.csv.gz"), header=TRUE, stringsAsFactors = FALSE)
allWithLocusGroup<-read.csv("~/Research/OneDrive/telomere/gwas/results/ALL_GWASResults/allChrs_telomere_adjagesexstudyseqctrbatchPCs_minDP0_BRAVODepthDrop_p_lt_5e-8_withLocusGroups.csv", header = TRUE, stringsAsFactors = FALSE)
mergedRes <- allWithLocusGroup %>% select(locusGroup, newLOCUS, snpID) %>% right_join(mergedRes)
mergedRes$diffPos <- c(0,diff(mergedRes$pos))
mergedRes$sameChr <- c(TRUE, mergedRes$chr[1:(nrow(mergedRes)-1)] == mergedRes$chr[2:(nrow(mergedRes))] )
contigBreaks<-which(!mergedRes$sameChr | mergedRes$diffPos > 200000)
mergedRes[unique(sort(c(contigBreaks-1, contigBreaks))),1:5]
mergedRes$locusGroupMerged<-rep(1:(length(contigBreaks)+1), times=c(contigBreaks[1]-1, diff(contigBreaks), nrow(mergedRes)-contigBreaks[length(contigBreaks)]+1))
new8OutsideLoci<-mergedRes %>% filter(locusGroupMerged %in% (mergedRes %>% group_by(locusGroupMerged) %>% summarise(allNA = all(is.na(newLOCUS))) %>% filter(allNA==TRUE) %>% pull(locusGroupMerged))) %>% select("snpID", paste(c("freq", "MAC", "Score.pval"), rep(c("Black", "White", "Asian", "HispanicLatino", "Samoan", "Brazilian"), each=3), sep="."))
write.csv(new8OutsideLoci, file="~/Research/OneDrive/telomere/gwas/results/MERGED_allChrs_telomere_adjagesexstudyseqctrbatchPCs_minDP0_BRAVODepthDrop_p_lt_5e-8_forBRAVOCheck_notInLoci.csv", row.names=FALSE, quote=FALSE)
## check against what Rasika pulled
library(readxl)
rasResult<-read_excel("~/Research/OneDrive/telomere/gwas/results/ALL_GWASResults/OASIS QC/Chromosome_BRAVO_confirm.xlsx", sheet="Stratified Anlaysis Lookups")
colnames(rasResult)<-c("snpID", "snpIDMod", "Outcome", "Path")
rasResultOdd<- rasResult %>% filter(Outcome == "odd") %>% pull(snpID)
mergedResOdd<-mergedRes %>% filter(snpID %in% rasResultOdd) %>% select(paste(c("freq", "MAC", "Score.pval"), rep(c("Black", "White", "Asian", "HispanicLatino", "Samoan", "Brazilian"), each=3), sep="."))
sum(mergedResOdd$Score.pval.Black < 5e-8, na.rm=TRUE)
sum(mergedResOdd$Score.pval.White < 5e-8, na.rm=TRUE)
sum(mergedResOdd$Score.pval.Asian < 5e-8, na.rm=TRUE)
sum(mergedResOdd$Score.pval.HispanicLatino < 5e-8, na.rm=TRUE)
sum(mergedResOdd$Score.pval.Brazilian < 5e-8, na.rm=TRUE)
sum(mergedResOdd$Score.pval.Samoan < 5e-8, na.rm=TRUE)
mergedResOdd %>% select("freq.White", "MAC.White", "Score.pval.White")
sum(mergedResOdd$Score.pval.White < 5e-9, na.rm=TRUE)
## want to pull new positions for Rasika to check
new8<-mergedRes %>% filter(Score.pval.ALL > 5e-8) %>% pull(snpID)
write.csv(new8, file="~/Research/OneDrive/telomere/gwas/results/MERGED_allChrs_telomere_adjagesexstudyseqctrbatchPCs_minDP0_BRAVODepthDrop_p_lt_5e-8_forBRAVOCheck.csv", row.names=FALSE, quote=FALSE)
justPvals<-mergedRes[,grep("pval", colnames(mergedRes))]
nrow(justPvals) - sum(justPvals$Score.pval.ALL < 5e-8) ## 247
sum(rowSums(justPvals < 5e-9, na.rm=TRUE)>0)
idx9<-which(rowSums(justPvals < 5e-9, na.rm=TRUE)>0)
new9<-mergedRes[idx9,] %>% filter(Score.pval.ALL > 5e-9) %>% pull(snpID)
write.csv(new9, file="~/Research/OneDrive/telomere/gwas/results/MERGED_allChrs_telomere_adjagesexstudyseqctrbatchPCs_minDP0_BRAVODepthDrop_p_lt_5e-9_forBRAVOCheck.csv", row.names=FALSE, quote=FALSE)
justPvals <- justPvals %>% filter(rowSums(justPvals < 5e-9, na.rm=TRUE)>0)
nrow(justPvals) - sum(justPvals$Score.pval.ALL < 5e-9) ## 149
notSigAll <- justPvals %>% filter(justPvals$Score.pval.ALL > 5e-9)
## of ones not sig in pooled analysis, all are unique to one other group
table(rowSums(notSigAll < 5e-9, na.rm=TRUE))
sum(notSigAll$Score.pval.White < 5e-9, na.rm=TRUE) ## 111
sum(notSigAll$Score.pval.Black < 5e-9, na.rm=TRUE) ## 28
sum(notSigAll$Score.pval.HispanicLatino < 5e-9, na.rm=TRUE) ## 7
sum(notSigAll$Score.pval.Asian < 5e-9, na.rm=TRUE) ## 1
sum(notSigAll$Score.pval.Brazilian < 5e-9, na.rm=TRUE) ## 2
sum(notSigAll$Score.pval.Samoan < 5e-9, na.rm=TRUE) ## 0
idsByRound<-condRes %>% select(chr, grep("snpID_", colnames(condRes))) %>% pivot_longer(-chr, names_to="round", values_to ="snpID") %>% mutate(round = sub("snpID_", "", round))
pvalsByRound <- condRes %>% select(chr, grep("Score.pval_", colnames(condRes))) %>% pivot_longer(-chr, names_to="round", values_to ="Score.pval") %>% mutate(round = sub("Score.pval_", "", round))
posByRound<- condRes %>% select(chr, grep("pos_", colnames(condRes))) %>% pivot_longer(-chr, names_to="round", values_to ="pos") %>% mutate(round = sub("pos_", "", round))
allByRound <- idsByRound %>% left_join(pvalsByRound) %>% left_join(posByRound)
## remove NAs and positions with p>5e-9
allByRound <- allByRound %>% filter(!is.na(Score.pval), Score.pval < 5e-9)
### OLD CODE ####
newPeaks %>% filter(!snpID %in% condRes$snpID)
allWithLocusGroup<-read.csv("~/Research/OneDrive/telomere/gwas/results/ALL_GWASResults/allChrs_telomere_adjagesexstudyseqctrbatchPCs_minDP0_BRAVODepthDrop_p_lt_5e-8_withLocusGroups.csv", header = TRUE, stringsAsFactors = FALSE)
## get start and stop of locus regions
allWithLocusGroup %>% filter(c(locusGroup[1:(nrow(allWithLocusGroup)-1)] != locusGroup[2:nrow(allWithLocusGroup)], TRUE) | c(TRUE, locusGroup[2:nrow(allWithLocusGroup)] != locusGroup[1:(nrow(allWithLocusGroup) - 1)])) %>% select(chr, pos)
condRes <- allWithLocusGroup %>% select(chr, pos, newLOCUS) %>% right_join(condRes, by=c("chr", "pos"))
condRes %>% select(newLOCUS, snpID, roundNum_Cond)
condRes[condRes$snpID == "4:163126692:T:G", "newLOCUS"]<-"NAF1"
condRes[condRes$snpID == "5:1292843:C:T", "newLOCUS"]<-"TERT"
condRes[condRes$snpID == "10:103915847:C:T", "newLOCUS"]<-"SH3PXD2A_OBFC1_SLK"
condRes[condRes$snpID == "18:676473:C:T", "newLOCUS"]<-"TYMS"
condRes[condRes$snpID == "18:650764:C:T", "newLOCUS"]<-"TYMS"
condRes[condRes$snpID == "22:40023952:C:T", "newLOCUS"]<-"TNRC6B"
## want to incorporate locus names
locusInfo <- read_excel("~/Research/OneDrive/telomere/gwas/results/forRasikaOASISLookup-withnovelty_v2.xlsx")
locusInfo <- locusInfo %>% mutate(novelty = ifelse(is.na(novelty), "Known", "Novel")) %>% select(snpID, newLOCUS = LocusName.Final, novelty)
all(locusInfo$snpID %in% condRes$snpID)
condRes <- condRes
# select min p-value per locus
condRes %>% arrange(as.numeric(chr), newLOCUS, roundNum_Cond)
locusOrder <- cbind(condRes %>% group_by(newLOCUS) %>% filter(roundNum_Cond == min(roundNum_Cond)) %>% select(chr, snpID, newLOCUS, roundNum_Cond) %>% arrange(as.numeric(chr), roundNum_Cond) %>% select(newLOCUS), locusOrder = 1:length(unique(condRes$newLOCUS))) %>% print(n=Inf)
condRes <- condRes %>% left_join(locusOrder)
condRes <- condRes %>% arrange(as.numeric(chr), locusOrder, roundNum_Cond)
forTable1 <- condRes %>% mutate(Est_Primary_bp = 1000 * Est_Primary, Est_Cond_bp = 1000 * Est_Cond, position=paste0("Chr", chr, ":", pos)) %>% select(LocusName = newLOCUS, position, ref, alt, freq, Score.pval_Primary, Est_Primary_bp, PVE_Primary, Score.pval_Cond, Est_Cond_bp, PVE_Cond, roundOfConditioning = roundNum_Cond)
write.csv(forTable1, file="~/Research/OneDrive/telomere/gwas/results/Table1.csv", row.names=FALSE, quote=TRUE, na="-")
write.csv(condRes %>% group_by(newLOCUS) %>% summarise(chr = unique(chr), minSNPPos = min(pos), maxSNPPos=max(pos), locusOrder = unique(locusOrder)) %>% arrange(locusOrder), file="~/Research/OneDrive/telomere/gwas/results/lociStartStop.csv", row.names=FALSE, quote=TRUE, na="-")
annoInfo<-read.csv(gzfile("~/Research/OneDrive/telomere/gwas/results/MERGED_allChrs_telomere_adjagesexstudyseqctrbatchPCs_minDP0_BRAVODepthDrop_p_lt_5e-8_OASISAnno.csv.gz"), header = TRUE, stringsAsFactors = FALSE)
condRes <- condRes %>% left_join(annoInfo)
forRasika <- condRes %>% select(LocusName = newLOCUS, snpID, rsNum, Chr38, Pos38, Pos37, freq, Score.pval_Primary, Est_Primary, PVE_Primary, Score.pval_Cond, Est_Cond, PVE_Cond, roundOfConditioning = roundNum_Cond)
write.csv(forRasika, file="~/Research/OneDrive/telomere/gwas/results/forRasikaOASISLookup.csv", row.names=FALSE, quote=TRUE, na="-")
lociRes<- read_excel("~/Research/OneDrive/telomere/gwas/results/forRasikaOASISLookup.xlsx")
locusDef<-lociRes %>% filter(!is.na(`"-Kb"`)) %>% mutate(Pos38 = as.numeric(Pos38), start = Pos38 - `"-Kb"`*1000, end = Pos38 + `"+Kb"`*1000) %>% select(locusFINAL = LocusName.Final, chr = Chr38, start, end)
write.csv(locusDef, file="~/Research/OneDrive/telomere/gwas/results/locusDefinitions.csv", quote=FALSE, row.names=FALSE)
|
library(ggplot2)
library(dplyr)
## REMOVE LATER, TEMPORARY
cat("\014") #clear console
rm(list=ls()) #clear memory
## REMOVE ABOVE
if(!exists("castor_hoest")){
cat("No castor_hoest object found in memory: running prepare.r script!\n")
source("clean.r")
}
castor_hoest <- comb[,c("Anoniem.nummer", "datum_overlijden_vlg_gba", "einddatum_cum_dosis", "Week1Hoest", "Week2Hoest", "Week3Hoest", "Datum1mnd", "Mnd1Hoest", "Datum6mnd", "Mnd6Hoest", "Datum12mnd", "Mnd12Hoest", "Datum24mnd", "Mnd24Hoest")]
castor_hoest[] <- lapply(castor_hoest, function(x) (as.character(x)))
castor_hoest[castor_hoest == ""] <- NA
#castor_hoest$einddatum_cum_dosis <- gsub("/", "-", castor_hoest$einddatum_cum_dosis)
castor_hoest$Datum1mnd <- gsub("/", "-", castor_hoest$Datum1mnd)
castor_hoest$Datum6mnd <- gsub("/", "-", castor_hoest$Datum6mnd)
castor_hoest$Datum12mnd <- gsub("/", "-", castor_hoest$Datum12mnd)
castor_hoest$Datum24mnd <- gsub("/", "-", castor_hoest$Datum24mnd)
castor_hoest$datum_overlijden_vlg_gba <- gsub("/", "-", castor_hoest$datum_overlijden_vlg_gba)
castor_hoest$einddatum_cum_dosis <- gsub("/", "-", castor_hoest$einddatum_cum_dosis)
#castor_hoest$einddatum_cum_dosis <- as.Date(castor_hoest$einddatum_cum_dosis, format = "%d-%m-%Y")
castor_hoest$Datum1mnd <- as.Date(castor_hoest$Datum1mnd, format = "%d-%m-%Y")
castor_hoest$Datum6mnd <- as.Date(castor_hoest$Datum6mnd, format = "%d-%m-%Y")
castor_hoest$Datum12mnd <- as.Date(castor_hoest$Datum12mnd, format = "%d-%m-%Y")
castor_hoest$Datum24mnd <- as.Date(castor_hoest$Datum24mnd, format = "%d-%m-%Y")
castor_hoest$datum_overlijden_vlg_gba <- as.Date(castor_hoest$datum_overlijden_vlg_gba, format = "%Y-%m-%d")
castor_hoest$einddatum_cum_dosis <- as.Date(castor_hoest$einddatum_cum_dosis, format = "%Y-%m-%d")
#castor_hoest$Datum1mnd <- ifelse(castor_hoest$Datum1mnd >= "2018-08-28", 1, castor_hoest$Datum1mnd)
#castor_hoest$Datum6mnd <- ifelse(castor_hoest$Datum6mnd > "2018-08-28", NA, castor_hoest$Datum6mnd)
#castor_hoest$Datum12mnd <- ifelse(castor_hoest$Datum12mnd > "2018-08-28", NA, castor_hoest$Datum12mnd)
#castor_hoest$Datum24mnd <- ifelse(castor_hoest$Datum24mnd > "2018-08-28", NA, castor_hoest$Datum24mnd)
#castor_hoest$Datum1mnd <- ifelse(castor_hoest$Datum1mnd == "2997-01-01", NA, castor_hoest$Datum1mnd)
#castor_hoest$survivalstat <- ifelse(castor_hoest$datum_overlijden_vlg_gba == "2018-10-30", 0, 1)
#castor_hoest$Datum1mnd_stat <- ifelse(castor_hoest$Datum1mnd > "2018-08-28", 0, 1)
#castor_hoest$Datum6mnd_stat <- ifelse(castor_hoest$Datum6mnd > "2018-08-28", 0, 1)
#castor_hoest$Datum12mnd_stat <- ifelse(castor_hoest$Datum12mnd > "2018-08-28", 0, 1)
#castor_hoest$Datum24mnd_stat <- ifelse(castor_hoest$Datum24mnd > "2018-08-28", 0, 1)
castor_hoest$survivalstat <- ifelse(castor_hoest$datum_overlijden_vlg_gba == "2018-10-30", 0, 1)
castor_hoest$diff_in_days <- difftime(castor_hoest$datum_overlijden_vlg_gba, castor_hoest$einddatum_cum_dosis, units = c("days"))
castor_hoest["Datum1Week"] <- 7
castor_hoest["Datum2Week"] <- 14
castor_hoest["Datum3Week"] <- 21
castor_hoest$Datum1mnd <- ifelse(castor_hoest$Datum1mnd > "2018-08-28" | is.na(castor_hoest$Datum1mnd), 31, difftime(castor_hoest$Datum1mnd, castor_hoest$einddatum_cum_dosis, units = c("days")))
castor_hoest$Datum6mnd <- ifelse(castor_hoest$Datum6mnd > "2018-08-28" | is.na(castor_hoest$Datum6mnd), 182, difftime(castor_hoest$Datum6mnd, castor_hoest$einddatum_cum_dosis, units = c("days")))
castor_hoest$Datum12mnd <- ifelse(castor_hoest$Datum12mnd > "2018-08-28" | is.na(castor_hoest$Datum12mnd), 365, difftime(castor_hoest$Datum12mnd, castor_hoest$einddatum_cum_dosis, units = c("days")))
castor_hoest$Datum24mnd <- ifelse(castor_hoest$Datum24mnd > "2018-08-28" | is.na(castor_hoest$Datum24mnd), 730, difftime(castor_hoest$Datum24mnd, castor_hoest$einddatum_cum_dosis, units = c("days")))
castor_hoest["ID"] <- seq.int(nrow(castor_hoest))
castor_hoest["first_hoest"] <- NA
castor_hoest["first_hoest"] <- ifelse(!is.na(castor_hoest$Week1Hoest), castor_hoest$Datum1Week, castor_hoest$first_hoest)
castor_hoest["first_hoest"] <- ifelse(is.na(castor_hoest$first_hoest), castor_hoest$Datum2Week, castor_hoest$first_hoest)
castor_hoest["first_hoest"] <- ifelse(is.na(castor_hoest$first_hoest), castor_hoest$Datum3Week, castor_hoest$first_hoest)
castor_hoest["first_hoest"] <- ifelse(is.na(castor_hoest$first_hoest), castor_hoest$Datum1mnd, castor_hoest$first_hoest)
castor_hoest["first_hoest"] <- ifelse(is.na(castor_hoest$first_hoest), castor_hoest$Datum6mnd, castor_hoest$first_hoest)
castor_hoest["first_hoest"] <- ifelse(is.na(castor_hoest$first_hoest), castor_hoest$Datum12mnd, castor_hoest$first_hoest)
castor_hoest["first_hoest"] <- ifelse(is.na(castor_hoest$first_hoest), castor_hoest$Datum24mnd, castor_hoest$first_hoest)
#castor_month1$Mnd1Hoest <- ifelse(castor_month1$Mnd1Hoest == 0 | is.na(castor_month1$Mnd1Hoest), 0, 1)
#apple <- head(castor_month1)
castor_hoest <- castor_hoest[which(!is.na(castor_hoest$first_hoest) | castor_hoest$first_hoest > 0),]
castor_hoest_on <- castor_hoest[which(castor_hoest$Week1Hoest != 0 | castor_hoest$Week2Hoest != 0 | castor_hoest$Week3Hoest != 0 | castor_hoest$Mnd1Hoest != 0 | castor_hoest$Mnd6Hoest != 0 | castor_hoest$Mnd12Hoest != 0 | castor_hoest$Mnd24Hoest != 0),]
ggplot(castor_hoest, aes(x = ID)) +
# Plot line (dotted for censored time) representing time from t1 to t2
geom_linerange(aes(ymin = 0, ymax = diff_in_days)) +
# geom_linerange(aes(ymin = stop_followup, ymax = 182, linetype = "dashed")) +
# Plot points representing event
# The ifelse() function moves censored marker to middle of interval
geom_point(data = castor_hoest_on, aes(y = first_hoest), size = 1, color = "red") +
# Flip coordinates
coord_flip() +
# scale_linetype_manual(name = "Censoring", values = c(1, 2, 2), labels = c("Not censored", "Interval censored", "Interval censored")) +
# Add custom shape scale. Change the values to get different shapes.
# scale_shape_manual(name = "Mnd1Hoest", values = c(18 ,19, 15)) +
# Add custom name to linetype scale,
# otherwise it will default to "as.factor(censored))"
#scale_linetype_manual(name = "Censoring", values = c(1, 2), labels = c("Not censored", "Interval censored")) +
# Add custom shape scale. Change the values to get different shapes.
#scale_shape_manual(name = "Event", values = c(19, 15)) +
# Add main title and axis labels
#opts(title = "Patient follow-up") + xlab("Patient ID") + ylab("Days") +
# I think the bw theme looks better for this graph,
# but leave it out if you prefer the default theme
theme_bw()
#castor_hoestine all subsets
#rbind ofzo | /plot/not_working/time_to_first_toxicity-hoest.R | no_license | Krijnrien/survival_lungstereo | R | false | false | 6,678 | r | library(ggplot2)
library(dplyr)
## REMOVE LATER, TEMPORARY
cat("\014") #clear console
rm(list=ls()) #clear memory
## REMOVE ABOVE
if(!exists("castor_hoest")){
cat("No castor_hoest object found in memory: running prepare.r script!\n")
source("clean.r")
}
castor_hoest <- comb[,c("Anoniem.nummer", "datum_overlijden_vlg_gba", "einddatum_cum_dosis", "Week1Hoest", "Week2Hoest", "Week3Hoest", "Datum1mnd", "Mnd1Hoest", "Datum6mnd", "Mnd6Hoest", "Datum12mnd", "Mnd12Hoest", "Datum24mnd", "Mnd24Hoest")]
castor_hoest[] <- lapply(castor_hoest, function(x) (as.character(x)))
castor_hoest[castor_hoest == ""] <- NA
#castor_hoest$einddatum_cum_dosis <- gsub("/", "-", castor_hoest$einddatum_cum_dosis)
castor_hoest$Datum1mnd <- gsub("/", "-", castor_hoest$Datum1mnd)
castor_hoest$Datum6mnd <- gsub("/", "-", castor_hoest$Datum6mnd)
castor_hoest$Datum12mnd <- gsub("/", "-", castor_hoest$Datum12mnd)
castor_hoest$Datum24mnd <- gsub("/", "-", castor_hoest$Datum24mnd)
castor_hoest$datum_overlijden_vlg_gba <- gsub("/", "-", castor_hoest$datum_overlijden_vlg_gba)
castor_hoest$einddatum_cum_dosis <- gsub("/", "-", castor_hoest$einddatum_cum_dosis)
#castor_hoest$einddatum_cum_dosis <- as.Date(castor_hoest$einddatum_cum_dosis, format = "%d-%m-%Y")
castor_hoest$Datum1mnd <- as.Date(castor_hoest$Datum1mnd, format = "%d-%m-%Y")
castor_hoest$Datum6mnd <- as.Date(castor_hoest$Datum6mnd, format = "%d-%m-%Y")
castor_hoest$Datum12mnd <- as.Date(castor_hoest$Datum12mnd, format = "%d-%m-%Y")
castor_hoest$Datum24mnd <- as.Date(castor_hoest$Datum24mnd, format = "%d-%m-%Y")
castor_hoest$datum_overlijden_vlg_gba <- as.Date(castor_hoest$datum_overlijden_vlg_gba, format = "%Y-%m-%d")
castor_hoest$einddatum_cum_dosis <- as.Date(castor_hoest$einddatum_cum_dosis, format = "%Y-%m-%d")
#castor_hoest$Datum1mnd <- ifelse(castor_hoest$Datum1mnd >= "2018-08-28", 1, castor_hoest$Datum1mnd)
#castor_hoest$Datum6mnd <- ifelse(castor_hoest$Datum6mnd > "2018-08-28", NA, castor_hoest$Datum6mnd)
#castor_hoest$Datum12mnd <- ifelse(castor_hoest$Datum12mnd > "2018-08-28", NA, castor_hoest$Datum12mnd)
#castor_hoest$Datum24mnd <- ifelse(castor_hoest$Datum24mnd > "2018-08-28", NA, castor_hoest$Datum24mnd)
#castor_hoest$Datum1mnd <- ifelse(castor_hoest$Datum1mnd == "2997-01-01", NA, castor_hoest$Datum1mnd)
#castor_hoest$survivalstat <- ifelse(castor_hoest$datum_overlijden_vlg_gba == "2018-10-30", 0, 1)
#castor_hoest$Datum1mnd_stat <- ifelse(castor_hoest$Datum1mnd > "2018-08-28", 0, 1)
#castor_hoest$Datum6mnd_stat <- ifelse(castor_hoest$Datum6mnd > "2018-08-28", 0, 1)
#castor_hoest$Datum12mnd_stat <- ifelse(castor_hoest$Datum12mnd > "2018-08-28", 0, 1)
#castor_hoest$Datum24mnd_stat <- ifelse(castor_hoest$Datum24mnd > "2018-08-28", 0, 1)
castor_hoest$survivalstat <- ifelse(castor_hoest$datum_overlijden_vlg_gba == "2018-10-30", 0, 1)
castor_hoest$diff_in_days <- difftime(castor_hoest$datum_overlijden_vlg_gba, castor_hoest$einddatum_cum_dosis, units = c("days"))
castor_hoest["Datum1Week"] <- 7
castor_hoest["Datum2Week"] <- 14
castor_hoest["Datum3Week"] <- 21
castor_hoest$Datum1mnd <- ifelse(castor_hoest$Datum1mnd > "2018-08-28" | is.na(castor_hoest$Datum1mnd), 31, difftime(castor_hoest$Datum1mnd, castor_hoest$einddatum_cum_dosis, units = c("days")))
castor_hoest$Datum6mnd <- ifelse(castor_hoest$Datum6mnd > "2018-08-28" | is.na(castor_hoest$Datum6mnd), 182, difftime(castor_hoest$Datum6mnd, castor_hoest$einddatum_cum_dosis, units = c("days")))
castor_hoest$Datum12mnd <- ifelse(castor_hoest$Datum12mnd > "2018-08-28" | is.na(castor_hoest$Datum12mnd), 365, difftime(castor_hoest$Datum12mnd, castor_hoest$einddatum_cum_dosis, units = c("days")))
castor_hoest$Datum24mnd <- ifelse(castor_hoest$Datum24mnd > "2018-08-28" | is.na(castor_hoest$Datum24mnd), 730, difftime(castor_hoest$Datum24mnd, castor_hoest$einddatum_cum_dosis, units = c("days")))
castor_hoest["ID"] <- seq.int(nrow(castor_hoest))
castor_hoest["first_hoest"] <- NA
castor_hoest["first_hoest"] <- ifelse(!is.na(castor_hoest$Week1Hoest), castor_hoest$Datum1Week, castor_hoest$first_hoest)
castor_hoest["first_hoest"] <- ifelse(is.na(castor_hoest$first_hoest), castor_hoest$Datum2Week, castor_hoest$first_hoest)
castor_hoest["first_hoest"] <- ifelse(is.na(castor_hoest$first_hoest), castor_hoest$Datum3Week, castor_hoest$first_hoest)
castor_hoest["first_hoest"] <- ifelse(is.na(castor_hoest$first_hoest), castor_hoest$Datum1mnd, castor_hoest$first_hoest)
castor_hoest["first_hoest"] <- ifelse(is.na(castor_hoest$first_hoest), castor_hoest$Datum6mnd, castor_hoest$first_hoest)
castor_hoest["first_hoest"] <- ifelse(is.na(castor_hoest$first_hoest), castor_hoest$Datum12mnd, castor_hoest$first_hoest)
castor_hoest["first_hoest"] <- ifelse(is.na(castor_hoest$first_hoest), castor_hoest$Datum24mnd, castor_hoest$first_hoest)
#castor_month1$Mnd1Hoest <- ifelse(castor_month1$Mnd1Hoest == 0 | is.na(castor_month1$Mnd1Hoest), 0, 1)
#apple <- head(castor_month1)
castor_hoest <- castor_hoest[which(!is.na(castor_hoest$first_hoest) | castor_hoest$first_hoest > 0),]
castor_hoest_on <- castor_hoest[which(castor_hoest$Week1Hoest != 0 | castor_hoest$Week2Hoest != 0 | castor_hoest$Week3Hoest != 0 | castor_hoest$Mnd1Hoest != 0 | castor_hoest$Mnd6Hoest != 0 | castor_hoest$Mnd12Hoest != 0 | castor_hoest$Mnd24Hoest != 0),]
ggplot(castor_hoest, aes(x = ID)) +
# Plot line (dotted for censored time) representing time from t1 to t2
geom_linerange(aes(ymin = 0, ymax = diff_in_days)) +
# geom_linerange(aes(ymin = stop_followup, ymax = 182, linetype = "dashed")) +
# Plot points representing event
# The ifelse() function moves censored marker to middle of interval
geom_point(data = castor_hoest_on, aes(y = first_hoest), size = 1, color = "red") +
# Flip coordinates
coord_flip() +
# scale_linetype_manual(name = "Censoring", values = c(1, 2, 2), labels = c("Not censored", "Interval censored", "Interval censored")) +
# Add custom shape scale. Change the values to get different shapes.
# scale_shape_manual(name = "Mnd1Hoest", values = c(18 ,19, 15)) +
# Add custom name to linetype scale,
# otherwise it will default to "as.factor(censored))"
#scale_linetype_manual(name = "Censoring", values = c(1, 2), labels = c("Not censored", "Interval censored")) +
# Add custom shape scale. Change the values to get different shapes.
#scale_shape_manual(name = "Event", values = c(19, 15)) +
# Add main title and axis labels
#opts(title = "Patient follow-up") + xlab("Patient ID") + ylab("Days") +
# I think the bw theme looks better for this graph,
# but leave it out if you prefer the default theme
theme_bw()
#castor_hoestine all subsets
#rbind ofzo |
mich.ment <-function (S,V,MMcurve=TRUE) {
library(ggplot2)
if(MMcurve==TRUE){
EHmodel <- lm(enz.data[,2] ~ enz.data[,5])
Vmax <- EHmodel$coefficients[[1]]
Km <- EHmodel$coefficients[[1]]/(-EHmodel$coefficients[[1]]/EHmodel$coefficients[[2]])
fun_MM <- function(x)y=Vmax*x/(x+Km)
endata <-data.frame(S,V)
enplot<-ggplot(endata, aes (x = S, y = V))+ geom_point(color="black") +
xlab("Substrate (mM)") +
ylab("Velocity (nmol/s)") +
theme_bw() +
labs(title ="\nMichaelis-Menten Plot\n")+
theme(plot.title = element_text(hjust=0.5))+
stat_function(fun=fun_MM,color="blue")+ xlim(0, max(S))
return(enplot)}
else {EHmodel <- lm(enz.data[,2] ~ enz.data[,5])
Vmax <- EHmodel$coefficients[[1]]
Km <- EHmodel$coefficients[[1]]/(-EHmodel$coefficients[[1]]/EHmodel$coefficients[[2]])
endata <-data.frame(S,V)
enplot<-ggplot(endata, aes (x = S, y = V))+ geom_point(color="black") +
xlab("Substrate (mM)") +
ylab("Velocity (nmol/s)") +
theme_bw() +
labs(title ="\nMichaelis-Menten Plot\n")+
theme(plot.title = element_text(hjust=0.5))
return(enplot)}
}
#the resulting graph:
mich.ment(S,V,MMcurve = TRUE)
| /Michaelis Menten FINAL.R | no_license | JiangQunyang/Mypackage | R | false | false | 1,231 | r | mich.ment <-function (S,V,MMcurve=TRUE) {
library(ggplot2)
if(MMcurve==TRUE){
EHmodel <- lm(enz.data[,2] ~ enz.data[,5])
Vmax <- EHmodel$coefficients[[1]]
Km <- EHmodel$coefficients[[1]]/(-EHmodel$coefficients[[1]]/EHmodel$coefficients[[2]])
fun_MM <- function(x)y=Vmax*x/(x+Km)
endata <-data.frame(S,V)
enplot<-ggplot(endata, aes (x = S, y = V))+ geom_point(color="black") +
xlab("Substrate (mM)") +
ylab("Velocity (nmol/s)") +
theme_bw() +
labs(title ="\nMichaelis-Menten Plot\n")+
theme(plot.title = element_text(hjust=0.5))+
stat_function(fun=fun_MM,color="blue")+ xlim(0, max(S))
return(enplot)}
else {EHmodel <- lm(enz.data[,2] ~ enz.data[,5])
Vmax <- EHmodel$coefficients[[1]]
Km <- EHmodel$coefficients[[1]]/(-EHmodel$coefficients[[1]]/EHmodel$coefficients[[2]])
endata <-data.frame(S,V)
enplot<-ggplot(endata, aes (x = S, y = V))+ geom_point(color="black") +
xlab("Substrate (mM)") +
ylab("Velocity (nmol/s)") +
theme_bw() +
labs(title ="\nMichaelis-Menten Plot\n")+
theme(plot.title = element_text(hjust=0.5))
return(enplot)}
}
#the resulting graph:
mich.ment(S,V,MMcurve = TRUE)
|
# Looker API 3.0 Reference
#
# ### Authorization The Looker API uses Looker **API3** credentials for authorization and access control. Looker admins can create API3 credentials on Looker's **Admin/Users** page. Pass API3 credentials to the **/login** endpoint to obtain a temporary access_token. Include that access_token in the Authorization header of Looker API requests. For details, see [Looker API Authorization](https://looker.com/docs/r/api/authorization) ### Client SDKs The Looker API is a RESTful system that should be usable by any programming language capable of making HTTPS requests. Client SDKs for a variety of programming languages can be generated from the Looker API's Swagger JSON metadata to streamline use of the Looker API in your applications. A client SDK for Ruby is available as an example. For more information, see [Looker API Client SDKs](https://looker.com/docs/r/api/client_sdks) ### Try It Out! The 'api-docs' page served by the Looker instance includes 'Try It Out!' buttons for each API method. After logging in with API3 credentials, you can use the \"Try It Out!\" buttons to call the API directly from the documentation page to interactively explore API features and responses. ### Versioning Future releases of Looker will expand this API release-by-release to securely expose more and more of the core power of Looker to API client applications. API endpoints marked as \"beta\" may receive breaking changes without warning. Stable (non-beta) API endpoints should not receive breaking changes in future releases. For more information, see [Looker API Versioning](https://looker.com/docs/r/api/versioning)
#
# OpenAPI spec version: 3.0.0
#
# Generated by: https://github.com/swagger-api/swagger-codegen.git
#' SqlQueryCreate Class
#'
#' @field connection_id
#' @field sql
#' @field can
#'
#' @importFrom R6 R6Class
#' @importFrom jsonlite fromJSON toJSON
#' @export
SqlQueryCreate <- R6::R6Class(
'SqlQueryCreate',
public = list(
`connection_id` = NULL,
`sql` = NULL,
`can` = NULL,
initialize = function(`connection_id`, `sql`, `can`){
if (!missing(`connection_id`)) {
stopifnot(is.character(`connection_id`), length(`connection_id`) == 1)
self$`connection_id` <- `connection_id`
}
if (!missing(`sql`)) {
stopifnot(is.character(`sql`), length(`sql`) == 1)
self$`sql` <- `sql`
}
if (!missing(`can`)) {
self$`can` <- `can`
}
},
toJSON = function() {
SqlQueryCreateObject <- list()
if (!is.null(self$`connection_id`)) {
SqlQueryCreateObject[['connection_id']] <- self$`connection_id`
}
if (!is.null(self$`sql`)) {
SqlQueryCreateObject[['sql']] <- self$`sql`
}
if (!is.null(self$`can`)) {
SqlQueryCreateObject[['can']] <- self$`can`
}
SqlQueryCreateObject
},
fromJSON = function(SqlQueryCreateJson) {
SqlQueryCreateObject <- jsonlite::fromJSON(SqlQueryCreateJson)
if (!is.null(SqlQueryCreateObject$`connection_id`)) {
self$`connection_id` <- SqlQueryCreateObject$`connection_id`
}
if (!is.null(SqlQueryCreateObject$`sql`)) {
self$`sql` <- SqlQueryCreateObject$`sql`
}
if (!is.null(SqlQueryCreateObject$`can`)) {
self$`can` <- SqlQueryCreateObject$`can`
}
},
toJSONString = function() {
sprintf(
'{
"connection_id": %s,
"sql": %s,
"can": %s
}',
self$`connection_id`,
self$`sql`,
self$`can`
)
},
fromJSONString = function(SqlQueryCreateJson) {
SqlQueryCreateObject <- jsonlite::fromJSON(SqlQueryCreateJson)
self$`connection_id` <- SqlQueryCreateObject$`connection_id`
self$`sql` <- SqlQueryCreateObject$`sql`
self$`can` <- SqlQueryCreateObject$`can`
}
)
)
| /R/SqlQueryCreate.r | permissive | grepinsight/lookr | R | false | false | 3,888 | r | # Looker API 3.0 Reference
#
# ### Authorization The Looker API uses Looker **API3** credentials for authorization and access control. Looker admins can create API3 credentials on Looker's **Admin/Users** page. Pass API3 credentials to the **/login** endpoint to obtain a temporary access_token. Include that access_token in the Authorization header of Looker API requests. For details, see [Looker API Authorization](https://looker.com/docs/r/api/authorization) ### Client SDKs The Looker API is a RESTful system that should be usable by any programming language capable of making HTTPS requests. Client SDKs for a variety of programming languages can be generated from the Looker API's Swagger JSON metadata to streamline use of the Looker API in your applications. A client SDK for Ruby is available as an example. For more information, see [Looker API Client SDKs](https://looker.com/docs/r/api/client_sdks) ### Try It Out! The 'api-docs' page served by the Looker instance includes 'Try It Out!' buttons for each API method. After logging in with API3 credentials, you can use the \"Try It Out!\" buttons to call the API directly from the documentation page to interactively explore API features and responses. ### Versioning Future releases of Looker will expand this API release-by-release to securely expose more and more of the core power of Looker to API client applications. API endpoints marked as \"beta\" may receive breaking changes without warning. Stable (non-beta) API endpoints should not receive breaking changes in future releases. For more information, see [Looker API Versioning](https://looker.com/docs/r/api/versioning)
#
# OpenAPI spec version: 3.0.0
#
# Generated by: https://github.com/swagger-api/swagger-codegen.git
#' SqlQueryCreate Class
#'
#' @field connection_id
#' @field sql
#' @field can
#'
#' @importFrom R6 R6Class
#' @importFrom jsonlite fromJSON toJSON
#' @export
SqlQueryCreate <- R6::R6Class(
'SqlQueryCreate',
public = list(
`connection_id` = NULL,
`sql` = NULL,
`can` = NULL,
initialize = function(`connection_id`, `sql`, `can`){
if (!missing(`connection_id`)) {
stopifnot(is.character(`connection_id`), length(`connection_id`) == 1)
self$`connection_id` <- `connection_id`
}
if (!missing(`sql`)) {
stopifnot(is.character(`sql`), length(`sql`) == 1)
self$`sql` <- `sql`
}
if (!missing(`can`)) {
self$`can` <- `can`
}
},
toJSON = function() {
SqlQueryCreateObject <- list()
if (!is.null(self$`connection_id`)) {
SqlQueryCreateObject[['connection_id']] <- self$`connection_id`
}
if (!is.null(self$`sql`)) {
SqlQueryCreateObject[['sql']] <- self$`sql`
}
if (!is.null(self$`can`)) {
SqlQueryCreateObject[['can']] <- self$`can`
}
SqlQueryCreateObject
},
fromJSON = function(SqlQueryCreateJson) {
SqlQueryCreateObject <- jsonlite::fromJSON(SqlQueryCreateJson)
if (!is.null(SqlQueryCreateObject$`connection_id`)) {
self$`connection_id` <- SqlQueryCreateObject$`connection_id`
}
if (!is.null(SqlQueryCreateObject$`sql`)) {
self$`sql` <- SqlQueryCreateObject$`sql`
}
if (!is.null(SqlQueryCreateObject$`can`)) {
self$`can` <- SqlQueryCreateObject$`can`
}
},
toJSONString = function() {
sprintf(
'{
"connection_id": %s,
"sql": %s,
"can": %s
}',
self$`connection_id`,
self$`sql`,
self$`can`
)
},
fromJSONString = function(SqlQueryCreateJson) {
SqlQueryCreateObject <- jsonlite::fromJSON(SqlQueryCreateJson)
self$`connection_id` <- SqlQueryCreateObject$`connection_id`
self$`sql` <- SqlQueryCreateObject$`sql`
self$`can` <- SqlQueryCreateObject$`can`
}
)
)
|
dataFile <- "./household_power_consumption.txt"
data <- read.table(dataFile, header=TRUE, sep=";", stringsAsFactors=FALSE, dec=".")
subSetData <- data[data$Date %in% c("1/2/2007","2/2/2007") ,]
#str(subSetData)
datetime <- strptime(paste(subSetData$Date, subSetData$Time, sep=" "), "%d/%m/%Y %H:%M:%S")
globalActivePower <- as.numeric(subSetData$Global_active_power)
png("plot2.png", width=480, height=480)
plot(datetime, globalActivePower, type="l", xlab="", ylab="Global Active Power (kilowatts)")
dev.off()
| /Plot2.R | no_license | hitanshupande/ExData_Plotting1 | R | false | false | 513 | r |
dataFile <- "./household_power_consumption.txt"
data <- read.table(dataFile, header=TRUE, sep=";", stringsAsFactors=FALSE, dec=".")
subSetData <- data[data$Date %in% c("1/2/2007","2/2/2007") ,]
#str(subSetData)
datetime <- strptime(paste(subSetData$Date, subSetData$Time, sep=" "), "%d/%m/%Y %H:%M:%S")
globalActivePower <- as.numeric(subSetData$Global_active_power)
png("plot2.png", width=480, height=480)
plot(datetime, globalActivePower, type="l", xlab="", ylab="Global Active Power (kilowatts)")
dev.off()
|
#Sequence of shopping carts in-depth analysis with R - Sequence of events
#http://analyzecore.com/2015/01/28/sequence-carts-in-depth-analysis-with-r-events/
df.evseq <- seqecreate(df.seq, tevent='state') # converting state object to event sequence
head(df.evseq)
df.subseq <- seqefsub(df.evseq, pMinSupport=0.01) # searching for frequent event subsequences
plot(df.subseq[1:10], col="cyan", ylab="Frequency", xlab="Subsequences", cex=1.5) # plotting
# In order to do some custom analysis, in addition to the minimum support, TraMineR also allows to control the search of frequent subsequences with time constraints. For instance, we can specify:
#
# maxGap: maximum time gap between two transitions;
# windowSize: maximum window size, that is the maximum time taken by a subsequence;
# ageMin: Minimum age at the beginning of the subsequences;
# ageMax: Maximum age at the beginning of the subsequences;
# ageMaxEnd: Maximum age at the end of the subsequences.
time.constraint <- seqeconstraint(maxGap=10, windowSize=30) # creating variable with conditions
df.subseq.time.constr <- seqefsub(df.evseq, pMinSupport=0.01, constraint=time.constraint) # searching for frequent event subsequences
plot(df.subseq.time.constr[1:10], col="cyan", ylab="Frequency", xlab="Subsequences", cex=1.5) # plotting
discrseq <- seqecmpgroup(df.subseq, group=df.feat$sex) # searching for frequent sequences that are related to gender
head(discrseq)
plot(discrseq[1:10], cex=1.5) # plotting 10 frequent subsequences
plot(discrseq[1:10], ptype="resid", cex=1.5) # plotting 10 residuals
rules <- TraMineR:::seqerules(df.subseq) # searching for rules
head(rules)
| /Sequential Mining/Shopping Cart Analysis/Sequence of shopping carts in-depth analysis with R Sequence of events.R | no_license | nandeda89/Machine_Learning_Concepts | R | false | false | 1,650 | r | #Sequence of shopping carts in-depth analysis with R - Sequence of events
#http://analyzecore.com/2015/01/28/sequence-carts-in-depth-analysis-with-r-events/
df.evseq <- seqecreate(df.seq, tevent='state') # converting state object to event sequence
head(df.evseq)
df.subseq <- seqefsub(df.evseq, pMinSupport=0.01) # searching for frequent event subsequences
plot(df.subseq[1:10], col="cyan", ylab="Frequency", xlab="Subsequences", cex=1.5) # plotting
# In order to do some custom analysis, in addition to the minimum support, TraMineR also allows to control the search of frequent subsequences with time constraints. For instance, we can specify:
#
# maxGap: maximum time gap between two transitions;
# windowSize: maximum window size, that is the maximum time taken by a subsequence;
# ageMin: Minimum age at the beginning of the subsequences;
# ageMax: Maximum age at the beginning of the subsequences;
# ageMaxEnd: Maximum age at the end of the subsequences.
time.constraint <- seqeconstraint(maxGap=10, windowSize=30) # creating variable with conditions
df.subseq.time.constr <- seqefsub(df.evseq, pMinSupport=0.01, constraint=time.constraint) # searching for frequent event subsequences
plot(df.subseq.time.constr[1:10], col="cyan", ylab="Frequency", xlab="Subsequences", cex=1.5) # plotting
discrseq <- seqecmpgroup(df.subseq, group=df.feat$sex) # searching for frequent sequences that are related to gender
head(discrseq)
plot(discrseq[1:10], cex=1.5) # plotting 10 frequent subsequences
plot(discrseq[1:10], ptype="resid", cex=1.5) # plotting 10 residuals
rules <- TraMineR:::seqerules(df.subseq) # searching for rules
head(rules)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/wt_stat.R
\name{wt_cov}
\alias{wt_cov}
\alias{wt_cor}
\title{Compute weighted covariances}
\usage{
wt_cov(x, y = NULL, wt = NULL, as_cor = FALSE,
use = c("everything", "listwise", "pairwise"), unbiased = TRUE,
df_type = c("count", "sum_wts"))
wt_cor(x, y = NULL, wt = NULL, use = "everything")
}
\arguments{
\item{x}{Vector or matrix of x variables.}
\item{y}{Vector or matrix of y variables}
\item{wt}{Vector of weights}
\item{as_cor}{Logical scalar that determines whether the covariances should be standardized (TRUE) or unstandardized (FALSE).}
\item{use}{Method for handling missing values. "everything" uses all values and does not account for missingness, "listwise" uses only complete cases, and "pairwise" uses pairwise deletion.}
\item{unbiased}{Logical scalar determining whether variance should be unbiased (TRUE) or maximum-likelihood (FALSE).}
\item{df_type}{Character scalar determining whether the degrees of freedom for unbiased estimates should be based on numbers of cases (n - 1; "count"; default) or squared sums of weights (1 - sum(w^2); "sum_wts").}
}
\value{
Scalar, vector, or matrix of covariances.
}
\description{
Compute the weighted covariance among variables in a matrix or between the variables in two separate matrices/vectors.
}
\examples{
wt_cov(x = c(1, 0, 2), y = c(1, 2, 3), wt = c(1, 2, 2), as_cor = FALSE, use = "everything")
wt_cov(x = c(1, 0, 2), y = c(1, 2, 3), wt = c(1, 2, 2), as_cor = TRUE, use = "everything")
wt_cov(x = cbind(c(1, 0, 2), c(1, 2, 3)), wt = c(1, 2, 2), as_cor = FALSE, use = "everything")
wt_cov(x = cbind(c(1, 0, 2), c(1, 2, 3)), wt = c(1, 2, 2), as_cor = TRUE, use = "everything")
wt_cov(x = cbind(c(1, 0, 2, NA), c(1, 2, 3, 3)),
wt = c(1, 2, 2, 1), as_cor = FALSE, use = "listwise")
wt_cov(x = cbind(c(1, 0, 2, NA), c(1, 2, 3, 3)),
wt = c(1, 2, 2, 1), as_cor = TRUE, use = "listwise")
}
| /man/wt_cov.Rd | no_license | yungly/psychmeta | R | false | true | 1,955 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/wt_stat.R
\name{wt_cov}
\alias{wt_cov}
\alias{wt_cor}
\title{Compute weighted covariances}
\usage{
wt_cov(x, y = NULL, wt = NULL, as_cor = FALSE,
use = c("everything", "listwise", "pairwise"), unbiased = TRUE,
df_type = c("count", "sum_wts"))
wt_cor(x, y = NULL, wt = NULL, use = "everything")
}
\arguments{
\item{x}{Vector or matrix of x variables.}
\item{y}{Vector or matrix of y variables}
\item{wt}{Vector of weights}
\item{as_cor}{Logical scalar that determines whether the covariances should be standardized (TRUE) or unstandardized (FALSE).}
\item{use}{Method for handling missing values. "everything" uses all values and does not account for missingness, "listwise" uses only complete cases, and "pairwise" uses pairwise deletion.}
\item{unbiased}{Logical scalar determining whether variance should be unbiased (TRUE) or maximum-likelihood (FALSE).}
\item{df_type}{Character scalar determining whether the degrees of freedom for unbiased estimates should be based on numbers of cases (n - 1; "count"; default) or squared sums of weights (1 - sum(w^2); "sum_wts").}
}
\value{
Scalar, vector, or matrix of covariances.
}
\description{
Compute the weighted covariance among variables in a matrix or between the variables in two separate matrices/vectors.
}
\examples{
wt_cov(x = c(1, 0, 2), y = c(1, 2, 3), wt = c(1, 2, 2), as_cor = FALSE, use = "everything")
wt_cov(x = c(1, 0, 2), y = c(1, 2, 3), wt = c(1, 2, 2), as_cor = TRUE, use = "everything")
wt_cov(x = cbind(c(1, 0, 2), c(1, 2, 3)), wt = c(1, 2, 2), as_cor = FALSE, use = "everything")
wt_cov(x = cbind(c(1, 0, 2), c(1, 2, 3)), wt = c(1, 2, 2), as_cor = TRUE, use = "everything")
wt_cov(x = cbind(c(1, 0, 2, NA), c(1, 2, 3, 3)),
wt = c(1, 2, 2, 1), as_cor = FALSE, use = "listwise")
wt_cov(x = cbind(c(1, 0, 2, NA), c(1, 2, 3, 3)),
wt = c(1, 2, 2, 1), as_cor = TRUE, use = "listwise")
}
|
# ---------------------------------------------------------------------------------------
# WEB DATA COLLECTION WITH R - Project
# Silke Husse
# Winter Semester 2020/21
#
# scraping of bundestag website (bundestag.de)
# ---------------------------------------------------------------------------------------
### preparations ###
rm(list=ls(all=TRUE))
setwd("/Web Data Collection with R/") # set working directory
source("R code/packages.R") # install and load packages
# set user-agent
str_c("email", # insert email here
"collecting data for study purposes",
R.version$platform,
R.version$version.string,
sep = ", ") %>%
httr::user_agent() %>%
httr::set_config()
### data collection ###
url <- "https://www.bundestag.de/abgeordnete/biografien"
#browseURL(url)
url_parsed <- xml2::url_parse(url) # parse url
# check permission
bundestag_robotstxt <- url_parsed$server %>%
robotstxt::robotstxt()
bundestag_robotstxt$permissions
bundestag_robotstxt$check(url_parsed$path)
bundestag_robotstxt$crawl_delay
# connect to server
remote_driver <- remoteDriver(
remoteServerAddr = "localhost",
port = 4445L,
browserName = "firefox")
remote_driver$open()
#remote_driver$getStatus()
remote_driver$navigate("https://www.bundestag.de/abgeordnete/biografien")
final_data <- list()
# note : currently 709 members à 12 per view -> 60 button clicks
# additional 1 button clicks as 34 members dropped out but are still presented on website
for (i in 1:62) {
# web element containing 12 members
person_element <- remote_driver$findElement(using = "xpath", "//div[@class = 'slick-track']")
final_data[[i]] <- person_element$getElementText()[[1]]
# press button to get next web element
button_element <- remote_driver$findElement(using = "xpath", "//button[@class = 'slick-next slick-arrow']")
button_element$sendKeysToElement(list("\uE007")) # encoding for key 'enter'
Sys.sleep(3) # voluntary crawl delay
}
# save data
save(final_data, file="WebScraping/code/df_bundestag_raw.Rda")
remote_driver$close()
| /code/bundestag.R | no_license | SilkeHusse/WebScraping | R | false | false | 2,051 | r | # ---------------------------------------------------------------------------------------
# WEB DATA COLLECTION WITH R - Project
# Silke Husse
# Winter Semester 2020/21
#
# scraping of bundestag website (bundestag.de)
# ---------------------------------------------------------------------------------------
### preparations ###
rm(list=ls(all=TRUE))
setwd("/Web Data Collection with R/") # set working directory
source("R code/packages.R") # install and load packages
# set user-agent
str_c("email", # insert email here
"collecting data for study purposes",
R.version$platform,
R.version$version.string,
sep = ", ") %>%
httr::user_agent() %>%
httr::set_config()
### data collection ###
url <- "https://www.bundestag.de/abgeordnete/biografien"
#browseURL(url)
url_parsed <- xml2::url_parse(url) # parse url
# check permission
bundestag_robotstxt <- url_parsed$server %>%
robotstxt::robotstxt()
bundestag_robotstxt$permissions
bundestag_robotstxt$check(url_parsed$path)
bundestag_robotstxt$crawl_delay
# connect to server
remote_driver <- remoteDriver(
remoteServerAddr = "localhost",
port = 4445L,
browserName = "firefox")
remote_driver$open()
#remote_driver$getStatus()
remote_driver$navigate("https://www.bundestag.de/abgeordnete/biografien")
final_data <- list()
# note : currently 709 members à 12 per view -> 60 button clicks
# additional 1 button clicks as 34 members dropped out but are still presented on website
for (i in 1:62) {
# web element containing 12 members
person_element <- remote_driver$findElement(using = "xpath", "//div[@class = 'slick-track']")
final_data[[i]] <- person_element$getElementText()[[1]]
# press button to get next web element
button_element <- remote_driver$findElement(using = "xpath", "//button[@class = 'slick-next slick-arrow']")
button_element$sendKeysToElement(list("\uE007")) # encoding for key 'enter'
Sys.sleep(3) # voluntary crawl delay
}
# save data
save(final_data, file="WebScraping/code/df_bundestag_raw.Rda")
remote_driver$close()
|
context("Errors in optional parameters")
################################################################################
######################## VALUES MUST HAVE CORRECT TYPE #########################
################################################################################
test_that("alternative must be a character", {
er <- '"alternative" must be a character vector with length 1 or nrow\\(x\\)'
mat <- matrix(1:12, nrow=3)
# NULL
expect_error(row_t_onesample(x=mat, alternative=NULL), er)
expect_error(row_t_equalvar(x=mat, y=mat, alternative=NULL), er)
expect_error(row_t_welch(x=mat, y=mat, alternative=NULL), er)
expect_error(row_t_paired(x=mat, y=mat, alternative=NULL), er)
expect_error(row_cor_pearson(x=mat, y=mat, alternative=NULL), er)
# NA
expect_error(row_t_onesample(x=mat, alternative=NA), er)
expect_error(row_t_equalvar(x=mat, y=mat, alternative=NA), er)
expect_error(row_t_welch(x=mat, y=mat, alternative=NA), er)
expect_error(row_cor_pearson(x=mat, y=mat, alternative=NA), er)
# numeric
expect_error(row_t_onesample(x=mat, alternative=1), er)
expect_error(row_t_equalvar(x=mat, y=mat, alternative=1), er)
expect_error(row_t_welch(x=mat, y=mat, alternative=1), er)
expect_error(row_t_paired(x=mat, y=mat, alternative=1), er)
expect_error(row_cor_pearson(x=mat, y=mat, alternative=1), er)
# complex
expect_error(row_t_onesample(x=mat, alternative=complex(1)), er)
expect_error(row_t_equalvar(x=mat, y=mat, alternative=complex(1)), er)
expect_error(row_t_welch(x=mat, y=mat, alternative=complex(1)), er)
expect_error(row_t_paired(x=mat, y=mat, alternative=complex(1)), er)
expect_error(row_cor_pearson(x=mat, y=mat, alternative=complex(1)), er)
# in list
expect_error(row_t_onesample(x=mat, alternative=list("less")), er)
expect_error(row_t_equalvar(x=mat, y=mat, alternative=list("less")), er)
expect_error(row_t_welch(x=mat, y=mat, alternative=list("less")), er)
expect_error(row_t_paired(x=mat, y=mat, alternative=list("less")), er)
expect_error(row_cor_pearson(x=mat, y=mat, alternative=list("less")), er)
# data frame
expect_error(row_t_onesample(x=mat, alternative=data.frame("less")), er)
expect_error(row_t_equalvar(x=mat, y=mat, alternative=data.frame("less")), er)
expect_error(row_t_welch(x=mat, y=mat, alternative=data.frame("less")), er)
expect_error(row_t_paired(x=mat, y=mat, alternative=data.frame("less")), er)
expect_error(row_cor_pearson(x=mat, y=mat, alternative=data.frame("less")), er)
})
test_that("mu must be numeric", {
er <- '"mu" must be a numeric vector with length 1 or nrow\\(x\\)'
mat <- matrix(1:12, nrow=3)
# NULL
expect_error(row_t_onesample(x=mat, mu=NULL), er)
expect_error(row_t_equalvar(x=mat, y=mat, mu=NULL), er)
expect_error(row_t_welch(x=mat, y=mat, mu=NULL), er)
expect_error(row_t_paired(x=mat, y=mat, mu=NULL), er)
# NA
expect_error(row_t_onesample(x=mat, mu=NA), er)
expect_error(row_t_equalvar(x=mat, y=mat, mu=NA), er)
expect_error(row_t_welch(x=mat, y=mat, mu=NA), er)
expect_error(row_t_paired(x=mat, y=mat, mu=NA), er)
# character
expect_error(row_t_onesample(x=mat, mu="1"), er)
expect_error(row_t_equalvar(x=mat, y=mat, mu="1"), er)
expect_error(row_t_welch(x=mat, y=mat, mu="1"), er)
expect_error(row_t_paired(x=mat, y=mat, mu="1"), er)
# complex
expect_error(row_t_onesample(x=mat, mu=complex(1)), er)
expect_error(row_t_equalvar(x=mat, y=mat, mu=complex(1)), er)
expect_error(row_t_welch(x=mat, y=mat, mu=complex(1)), er)
expect_error(row_t_paired(x=mat, y=mat, mu=complex(1)), er)
# in list
expect_error(row_t_onesample(x=mat, mu=list(1)), er)
expect_error(row_t_equalvar(x=mat, y=mat, mu=list(1)), er)
expect_error(row_t_welch(x=mat, y=mat, mu=list(1)), er)
expect_error(row_t_paired(x=mat, y=mat, mu=list(1)), er)
# data frame
expect_error(row_t_onesample(x=mat, mu=data.frame(1)), er)
expect_error(row_t_equalvar(x=mat, y=mat, mu=data.frame(1)), er)
expect_error(row_t_welch(x=mat, y=mat, mu=data.frame(1)), er)
expect_error(row_t_paired(x=mat, y=mat, mu=data.frame(1)), er)
})
test_that("conf.level must be numeric", {
er <- '"conf.level" must be a numeric vector with length 1 or nrow\\(x\\)'
mat <- matrix(1:12, nrow=3)
# NULL
expect_error(row_t_onesample(x=mat, conf.level=NULL), er)
expect_error(row_t_equalvar(x=mat, y=mat, conf.level=NULL), er)
expect_error(row_t_welch(x=mat, y=mat, conf.level=NULL), er)
expect_error(row_t_paired(x=mat, y=mat, conf.level=NULL), er)
expect_error(row_cor_pearson(x=mat, y=mat, conf.level=NULL), er)
# NA
expect_error(row_t_onesample(x=mat, conf.level=NA), er)
expect_error(row_t_equalvar(x=mat, y=mat, conf.level=NA), er)
expect_error(row_t_welch(x=mat, y=mat, conf.level=NA), er)
expect_error(row_t_paired(x=mat, y=mat, conf.level=NA), er)
expect_error(row_cor_pearson(x=mat, y=mat, conf.level=NA), er)
# character
expect_error(row_t_onesample(x=mat, conf.level="1"), er)
expect_error(row_t_equalvar(x=mat, y=mat, conf.level="1"), er)
expect_error(row_t_welch(x=mat, y=mat, conf.level="1"), er)
expect_error(row_t_paired(x=mat, y=mat, conf.level="1"), er)
expect_error(row_cor_pearson(x=mat, y=mat, conf.level="1"), er)
# complex
expect_error(row_t_onesample(x=mat, conf.level=complex(1)), er)
expect_error(row_t_equalvar(x=mat, y=mat, conf.level=complex(1)), er)
expect_error(row_t_welch(x=mat, y=mat, conf.level=complex(1)), er)
expect_error(row_t_paired(x=mat, y=mat, conf.level=complex(1)), er)
expect_error(row_cor_pearson(x=mat, y=mat, conf.level=complex(1)), er)
# in list
expect_error(row_t_onesample(x=mat, conf.level=list(1)), er)
expect_error(row_t_equalvar(x=mat, y=mat, conf.level=list(1)), er)
expect_error(row_t_welch(x=mat, y=mat, conf.level=list(1)), er)
expect_error(row_t_paired(x=mat, y=mat, conf.level=list(1)), er)
expect_error(row_cor_pearson(x=mat, y=mat, conf.level=list(1)), er)
# data frame
expect_error(row_t_onesample(x=mat, conf.level=data.frame(1)), er)
expect_error(row_t_equalvar(x=mat, y=mat, conf.level=data.frame(1)), er)
expect_error(row_t_welch(x=mat, y=mat, conf.level=data.frame(1)), er)
expect_error(row_t_paired(x=mat, y=mat, conf.level=data.frame(1)), er)
expect_error(row_cor_pearson(x=mat, y=mat, conf.level=data.frame(1)), er)
})
test_that("p-value cutoffs must be numeric", {
er1 <- '"cutT" must be a numeric vector with length 1'
er2 <- '"cutBfdr" must be a numeric vector with length 1'
mat <- matrix(1:12, nrow=3)
grp <- c(1,1,0,0)
# NULL
expect_error(row_ievora(x=mat, g=grp, cutT=NULL), er1)
expect_error(row_ievora(x=mat, g=grp, cutBfdr=NULL), er2)
# NA
expect_error(row_ievora(x=mat, g=grp, cutT=NA), er1)
expect_error(row_ievora(x=mat, g=grp, cutBfdr=NA), er2)
# character
expect_error(row_ievora(x=mat, g=grp, cutT="0.05"), er1)
expect_error(row_ievora(x=mat, g=grp, cutBfdr="0.05"), er2)
# complex
expect_error(row_ievora(x=mat, g=grp, cutT=complex(1)), er1)
expect_error(row_ievora(x=mat, g=grp, cutBfdr=complex(1)), er2)
# in list
expect_error(row_ievora(x=mat, g=grp, cutT=list(1)), er1)
expect_error(row_ievora(x=mat, g=grp, cutBfdr=list(1)), er2)
# data frame
expect_error(row_ievora(x=mat, g=grp, cutT=data.frame(1)), er1)
expect_error(row_ievora(x=mat, g=grp, cutBfdr=data.frame(1)), er2)
})
################################################################################
##################### VALUES MUST HAVE CORRECT DIMENSIONS ######################
################################################################################
test_that("alternative has correct dimensions", {
er <- '"alternative" must be a character vector with length 1 or nrow\\(x\\)'
mat <- matrix(1:12, ncol=3)
# too short
expect_error(row_t_onesample(x=mat, alternative=c("less", "greater")), er)
expect_error(row_t_equalvar(x=mat, y=mat, alternative=c("less", "greater")), er)
expect_error(row_t_welch(x=mat, y=mat, alternative=c("less", "greater")), er)
expect_error(row_t_paired(x=mat, y=mat, alternative=c("less", "greater")), er)
expect_error(row_cor_pearson(x=mat, y=mat, alternative=c("less", "greater")), er)
# too long
expect_error(row_t_onesample(x=mat, alternative=rep("less", 5)), er)
expect_error(row_t_equalvar(x=mat, y=mat, alternative=rep("less", 5)), er)
expect_error(row_t_welch(x=mat, y=mat, alternative=rep("less", 5)), er)
expect_error(row_t_paired(x=mat, y=mat, alternative=rep("less", 5)), er)
expect_error(row_cor_pearson(x=mat, y=mat, alternative=rep("less", 5)), er)
# matrix format
alt <- matrix(rep("less", 4), ncol=2)
expect_error(row_t_onesample(x=mat, alternative=alt), er)
expect_error(row_t_equalvar(x=mat, y=mat, alternative=alt), er)
expect_error(row_t_welch(x=mat, y=mat, alternative=alt), er)
expect_error(row_t_paired(x=mat, y=mat, alternative=alt), er)
expect_error(row_cor_pearson(x=mat, y=mat, alternative=alt), er)
})
test_that("mu has correct dimensions", {
er <- '"mu" must be a numeric vector with length 1 or nrow\\(x\\)'
mat <- matrix(1:12, ncol=3)
# too short
expect_error(row_t_onesample(x=mat, mu=c(1,2)), er)
expect_error(row_t_equalvar(x=mat, y=mat, mu=c(1,2)), er)
expect_error(row_t_welch(x=mat, y=mat, mu=c(1,2)), er)
expect_error(row_t_paired(x=mat, y=mat, mu=c(1,2)), er)
# too long
expect_error(row_t_onesample(x=mat, mu=rep(0, 5)), er)
expect_error(row_t_equalvar(x=mat, y=mat, mu=rep(0, 5)), er)
expect_error(row_t_welch(x=mat, y=mat, mu=rep(0, 5)), er)
expect_error(row_t_paired(x=mat, y=mat, mu=rep(0, 5)), er)
# matrix format
mus <- matrix(rep(1, 4), ncol=2)
expect_error(row_t_onesample(x=mat, mu=mus), er)
expect_error(row_t_equalvar(x=mat, y=mat, mu=mus), er)
expect_error(row_t_welch(x=mat, y=mat, mu=mus), er)
expect_error(row_t_paired(x=mat, y=mat, mu=mus), er)
})
test_that("conf.level has correct dimensions", {
er <- '"conf.level" must be a numeric vector with length 1 or nrow\\(x\\)'
mat <- matrix(1:12, ncol=3)
# too short
expect_error(row_t_onesample(x=mat, conf.level=c(1,2)), er)
expect_error(row_t_equalvar(x=mat, y=mat, conf.level=c(1,2)), er)
expect_error(row_t_welch(x=mat, y=mat, conf.level=c(1,2)), er)
expect_error(row_t_paired(x=mat, y=mat, conf.level=c(1,2)), er)
expect_error(row_cor_pearson(x=mat, y=mat, conf.level=c(1,2)), er)
# too long
expect_error(row_t_onesample(x=mat, conf.level=rep(0, 5)), er)
expect_error(row_t_equalvar(x=mat, y=mat, conf.level=rep(0, 5)), er)
expect_error(row_t_welch(x=mat, y=mat, conf.level=rep(0, 5)), er)
expect_error(row_t_paired(x=mat, y=mat, conf.level=rep(0, 5)), er)
expect_error(row_cor_pearson(x=mat, y=mat, conf.level=rep(0, 5)), er)
# matrix format
cfs <- matrix(rep(1, 4), ncol=2)
expect_error(row_t_onesample(x=mat, conf.level=cfs), er)
expect_error(row_t_equalvar(x=mat, y=mat, conf.level=cfs), er)
expect_error(row_t_welch(x=mat, y=mat, conf.level=cfs), er)
expect_error(row_t_paired(x=mat, y=mat, conf.level=cfs), er)
expect_error(row_cor_pearson(x=mat, y=mat, conf.level=cfs), er)
})
test_that("p-value cutoffs have the right dimensions", {
er1 <- '"cutT" must be a numeric vector with length 1'
er2 <- '"cutBfdr" must be a numeric vector with length 1'
mat <- matrix(1:12, ncol=3)
grp <- c(0,0,1)
# too long
expect_error(row_ievora(x=mat, g=grp, cutT=c(1,2)), er1)
expect_error(row_ievora(x=mat, g=grp, cutBfdr=c(1,2)), er2)
# matrix format
cts <- matrix(rep(1, 4), ncol=2)
expect_error(row_ievora(x=mat, g=grp, cutT=cts), er1)
expect_error(row_ievora(x=mat, g=grp, cutBfdr=cts), er2)
})
################################################################################
######################## VALUES MUST BE IN CORRECT SET #########################
################################################################################
test_that("alternative is in: less, greater, two-sided)", {
er <- 'all "alternative" values must be in: two\\.sided, less, greater'
mat <- matrix(1:12, nrow=3)
# one value
expect_error(row_t_onesample(x=mat, alternative="ga"), er)
expect_error(row_t_equalvar(x=mat, y=mat, alternative="ga"), er)
expect_error(row_t_welch(x=mat, y=mat, alternative="ga"), er)
expect_error(row_t_paired(x=mat, y=mat, alternative="ga"), er)
expect_error(row_cor_pearson(x=mat, y=mat, alternative="ga"), er)
# for each row and one incorrect
expect_error(row_t_onesample(x=mat, alternative=c("t","l","c")), er)
expect_error(row_t_equalvar(x=mat, y=mat, alternative=c("t","l","c")), er)
expect_error(row_t_welch(x=mat, y=mat, alternative=c("t","l","c")), er)
expect_error(row_t_paired(x=mat, y=mat, alternative=c("t","l","c")), er)
expect_error(row_cor_pearson(x=mat, y=mat, alternative=c("t","l","c")), er)
})
test_that("conf.level is in: 0-1)", {
er <- 'all "conf.level" values must be between: 0 and 1'
mat <- matrix(1:12, nrow=3)
# slightly below
expect_error(row_t_onesample(x=mat, conf.level=-0.001), er)
expect_error(row_t_equalvar(x=mat, y=mat, conf.level=-0.001), er)
expect_error(row_t_welch(x=mat, y=mat, conf.level=-0.001), er)
expect_error(row_t_paired(x=mat, y=mat, conf.level=-0.001), er)
expect_error(row_cor_pearson(x=mat, y=mat, conf.level=-0.001), er)
# slightly above
expect_error(row_t_onesample(x=mat, conf.level=1.001), er)
expect_error(row_t_equalvar(x=mat, y=mat, conf.level=1.001), er)
expect_error(row_t_welch(x=mat, y=mat, conf.level=1.001), er)
expect_error(row_t_paired(x=mat, y=mat, conf.level=1.001), er)
expect_error(row_cor_pearson(x=mat, y=mat, conf.level=1.001), er)
# special values
expect_error(row_t_onesample(x=mat, conf.level=NA_integer_), er)
expect_error(row_t_equalvar(x=mat, y=mat, conf.level=NaN), er)
expect_error(row_t_welch(x=mat, y=mat, conf.level=Inf), er)
expect_error(row_t_paired(x=mat, y=mat, conf.level=-Inf), er)
expect_error(row_cor_pearson(x=mat, y=mat, conf.level=-Inf), er)
})
test_that("mu is in: -Inf:Inf)", {
er <- 'all "mu" values must be between: -Inf and Inf'
mat <- matrix(1:12, nrow=3)
# NA
expect_error(row_t_onesample(x=mat, mu=NA_integer_), er)
expect_error(row_t_equalvar(x=mat, y=mat, mu=NA_integer_), er)
expect_error(row_t_welch(x=mat, y=mat, mu=NA_integer_), er)
expect_error(row_t_paired(x=mat, y=mat, mu=NA_integer_), er)
# NaN
expect_error(row_t_onesample(x=mat, mu=NaN), er)
expect_error(row_t_equalvar(x=mat, y=mat, mu=NaN), er)
expect_error(row_t_welch(x=mat, y=mat, mu=NaN), er)
expect_error(row_t_paired(x=mat, y=mat, mu=NaN), er)
})
test_that("p-value cut-offs must be in 0-1", {
er1 <- 'all "cutT" values must be between: 0 and 1'
er2 <- 'all "cutBfdr" values must be between: 0 and 1'
mat <- matrix(1:12, nrow=3)
grp <- c(0,0,1,1)
# slightly below
expect_error(row_ievora(x=mat, g=grp, cutT=-0.001), er1)
expect_error(row_ievora(x=mat, g=grp, cutBfdr=-0.001), er2)
# slightly above
expect_error(row_ievora(x=mat, g=grp, cutT=1.001), er1)
expect_error(row_ievora(x=mat, g=grp, cutBfdr=1.001), er2)
# special values
expect_error(row_ievora(x=mat, g=grp, cutT=NA_integer_), er1)
expect_error(row_ievora(x=mat, g=grp, cutBfdr=NaN), er2)
expect_error(row_ievora(x=mat, g=grp, cutT=Inf), er1)
expect_error(row_ievora(x=mat, g=grp, cutBfdr=-Inf), er2)
})
| /tests/testthat/test-parameters-optional-errors.R | no_license | ShenghuSang/matrixTests | R | false | false | 15,298 | r | context("Errors in optional parameters")
################################################################################
######################## VALUES MUST HAVE CORRECT TYPE #########################
################################################################################
test_that("alternative must be a character", {
er <- '"alternative" must be a character vector with length 1 or nrow\\(x\\)'
mat <- matrix(1:12, nrow=3)
# NULL
expect_error(row_t_onesample(x=mat, alternative=NULL), er)
expect_error(row_t_equalvar(x=mat, y=mat, alternative=NULL), er)
expect_error(row_t_welch(x=mat, y=mat, alternative=NULL), er)
expect_error(row_t_paired(x=mat, y=mat, alternative=NULL), er)
expect_error(row_cor_pearson(x=mat, y=mat, alternative=NULL), er)
# NA
expect_error(row_t_onesample(x=mat, alternative=NA), er)
expect_error(row_t_equalvar(x=mat, y=mat, alternative=NA), er)
expect_error(row_t_welch(x=mat, y=mat, alternative=NA), er)
expect_error(row_cor_pearson(x=mat, y=mat, alternative=NA), er)
# numeric
expect_error(row_t_onesample(x=mat, alternative=1), er)
expect_error(row_t_equalvar(x=mat, y=mat, alternative=1), er)
expect_error(row_t_welch(x=mat, y=mat, alternative=1), er)
expect_error(row_t_paired(x=mat, y=mat, alternative=1), er)
expect_error(row_cor_pearson(x=mat, y=mat, alternative=1), er)
# complex
expect_error(row_t_onesample(x=mat, alternative=complex(1)), er)
expect_error(row_t_equalvar(x=mat, y=mat, alternative=complex(1)), er)
expect_error(row_t_welch(x=mat, y=mat, alternative=complex(1)), er)
expect_error(row_t_paired(x=mat, y=mat, alternative=complex(1)), er)
expect_error(row_cor_pearson(x=mat, y=mat, alternative=complex(1)), er)
# in list
expect_error(row_t_onesample(x=mat, alternative=list("less")), er)
expect_error(row_t_equalvar(x=mat, y=mat, alternative=list("less")), er)
expect_error(row_t_welch(x=mat, y=mat, alternative=list("less")), er)
expect_error(row_t_paired(x=mat, y=mat, alternative=list("less")), er)
expect_error(row_cor_pearson(x=mat, y=mat, alternative=list("less")), er)
# data frame
expect_error(row_t_onesample(x=mat, alternative=data.frame("less")), er)
expect_error(row_t_equalvar(x=mat, y=mat, alternative=data.frame("less")), er)
expect_error(row_t_welch(x=mat, y=mat, alternative=data.frame("less")), er)
expect_error(row_t_paired(x=mat, y=mat, alternative=data.frame("less")), er)
expect_error(row_cor_pearson(x=mat, y=mat, alternative=data.frame("less")), er)
})
test_that("mu must be numeric", {
er <- '"mu" must be a numeric vector with length 1 or nrow\\(x\\)'
mat <- matrix(1:12, nrow=3)
# NULL
expect_error(row_t_onesample(x=mat, mu=NULL), er)
expect_error(row_t_equalvar(x=mat, y=mat, mu=NULL), er)
expect_error(row_t_welch(x=mat, y=mat, mu=NULL), er)
expect_error(row_t_paired(x=mat, y=mat, mu=NULL), er)
# NA
expect_error(row_t_onesample(x=mat, mu=NA), er)
expect_error(row_t_equalvar(x=mat, y=mat, mu=NA), er)
expect_error(row_t_welch(x=mat, y=mat, mu=NA), er)
expect_error(row_t_paired(x=mat, y=mat, mu=NA), er)
# character
expect_error(row_t_onesample(x=mat, mu="1"), er)
expect_error(row_t_equalvar(x=mat, y=mat, mu="1"), er)
expect_error(row_t_welch(x=mat, y=mat, mu="1"), er)
expect_error(row_t_paired(x=mat, y=mat, mu="1"), er)
# complex
expect_error(row_t_onesample(x=mat, mu=complex(1)), er)
expect_error(row_t_equalvar(x=mat, y=mat, mu=complex(1)), er)
expect_error(row_t_welch(x=mat, y=mat, mu=complex(1)), er)
expect_error(row_t_paired(x=mat, y=mat, mu=complex(1)), er)
# in list
expect_error(row_t_onesample(x=mat, mu=list(1)), er)
expect_error(row_t_equalvar(x=mat, y=mat, mu=list(1)), er)
expect_error(row_t_welch(x=mat, y=mat, mu=list(1)), er)
expect_error(row_t_paired(x=mat, y=mat, mu=list(1)), er)
# data frame
expect_error(row_t_onesample(x=mat, mu=data.frame(1)), er)
expect_error(row_t_equalvar(x=mat, y=mat, mu=data.frame(1)), er)
expect_error(row_t_welch(x=mat, y=mat, mu=data.frame(1)), er)
expect_error(row_t_paired(x=mat, y=mat, mu=data.frame(1)), er)
})
test_that("conf.level must be numeric", {
er <- '"conf.level" must be a numeric vector with length 1 or nrow\\(x\\)'
mat <- matrix(1:12, nrow=3)
# NULL
expect_error(row_t_onesample(x=mat, conf.level=NULL), er)
expect_error(row_t_equalvar(x=mat, y=mat, conf.level=NULL), er)
expect_error(row_t_welch(x=mat, y=mat, conf.level=NULL), er)
expect_error(row_t_paired(x=mat, y=mat, conf.level=NULL), er)
expect_error(row_cor_pearson(x=mat, y=mat, conf.level=NULL), er)
# NA
expect_error(row_t_onesample(x=mat, conf.level=NA), er)
expect_error(row_t_equalvar(x=mat, y=mat, conf.level=NA), er)
expect_error(row_t_welch(x=mat, y=mat, conf.level=NA), er)
expect_error(row_t_paired(x=mat, y=mat, conf.level=NA), er)
expect_error(row_cor_pearson(x=mat, y=mat, conf.level=NA), er)
# character
expect_error(row_t_onesample(x=mat, conf.level="1"), er)
expect_error(row_t_equalvar(x=mat, y=mat, conf.level="1"), er)
expect_error(row_t_welch(x=mat, y=mat, conf.level="1"), er)
expect_error(row_t_paired(x=mat, y=mat, conf.level="1"), er)
expect_error(row_cor_pearson(x=mat, y=mat, conf.level="1"), er)
# complex
expect_error(row_t_onesample(x=mat, conf.level=complex(1)), er)
expect_error(row_t_equalvar(x=mat, y=mat, conf.level=complex(1)), er)
expect_error(row_t_welch(x=mat, y=mat, conf.level=complex(1)), er)
expect_error(row_t_paired(x=mat, y=mat, conf.level=complex(1)), er)
expect_error(row_cor_pearson(x=mat, y=mat, conf.level=complex(1)), er)
# in list
expect_error(row_t_onesample(x=mat, conf.level=list(1)), er)
expect_error(row_t_equalvar(x=mat, y=mat, conf.level=list(1)), er)
expect_error(row_t_welch(x=mat, y=mat, conf.level=list(1)), er)
expect_error(row_t_paired(x=mat, y=mat, conf.level=list(1)), er)
expect_error(row_cor_pearson(x=mat, y=mat, conf.level=list(1)), er)
# data frame
expect_error(row_t_onesample(x=mat, conf.level=data.frame(1)), er)
expect_error(row_t_equalvar(x=mat, y=mat, conf.level=data.frame(1)), er)
expect_error(row_t_welch(x=mat, y=mat, conf.level=data.frame(1)), er)
expect_error(row_t_paired(x=mat, y=mat, conf.level=data.frame(1)), er)
expect_error(row_cor_pearson(x=mat, y=mat, conf.level=data.frame(1)), er)
})
test_that("p-value cutoffs must be numeric", {
er1 <- '"cutT" must be a numeric vector with length 1'
er2 <- '"cutBfdr" must be a numeric vector with length 1'
mat <- matrix(1:12, nrow=3)
grp <- c(1,1,0,0)
# NULL
expect_error(row_ievora(x=mat, g=grp, cutT=NULL), er1)
expect_error(row_ievora(x=mat, g=grp, cutBfdr=NULL), er2)
# NA
expect_error(row_ievora(x=mat, g=grp, cutT=NA), er1)
expect_error(row_ievora(x=mat, g=grp, cutBfdr=NA), er2)
# character
expect_error(row_ievora(x=mat, g=grp, cutT="0.05"), er1)
expect_error(row_ievora(x=mat, g=grp, cutBfdr="0.05"), er2)
# complex
expect_error(row_ievora(x=mat, g=grp, cutT=complex(1)), er1)
expect_error(row_ievora(x=mat, g=grp, cutBfdr=complex(1)), er2)
# in list
expect_error(row_ievora(x=mat, g=grp, cutT=list(1)), er1)
expect_error(row_ievora(x=mat, g=grp, cutBfdr=list(1)), er2)
# data frame
expect_error(row_ievora(x=mat, g=grp, cutT=data.frame(1)), er1)
expect_error(row_ievora(x=mat, g=grp, cutBfdr=data.frame(1)), er2)
})
################################################################################
##################### VALUES MUST HAVE CORRECT DIMENSIONS ######################
################################################################################
test_that("alternative has correct dimensions", {
er <- '"alternative" must be a character vector with length 1 or nrow\\(x\\)'
mat <- matrix(1:12, ncol=3)
# too short
expect_error(row_t_onesample(x=mat, alternative=c("less", "greater")), er)
expect_error(row_t_equalvar(x=mat, y=mat, alternative=c("less", "greater")), er)
expect_error(row_t_welch(x=mat, y=mat, alternative=c("less", "greater")), er)
expect_error(row_t_paired(x=mat, y=mat, alternative=c("less", "greater")), er)
expect_error(row_cor_pearson(x=mat, y=mat, alternative=c("less", "greater")), er)
# too long
expect_error(row_t_onesample(x=mat, alternative=rep("less", 5)), er)
expect_error(row_t_equalvar(x=mat, y=mat, alternative=rep("less", 5)), er)
expect_error(row_t_welch(x=mat, y=mat, alternative=rep("less", 5)), er)
expect_error(row_t_paired(x=mat, y=mat, alternative=rep("less", 5)), er)
expect_error(row_cor_pearson(x=mat, y=mat, alternative=rep("less", 5)), er)
# matrix format
alt <- matrix(rep("less", 4), ncol=2)
expect_error(row_t_onesample(x=mat, alternative=alt), er)
expect_error(row_t_equalvar(x=mat, y=mat, alternative=alt), er)
expect_error(row_t_welch(x=mat, y=mat, alternative=alt), er)
expect_error(row_t_paired(x=mat, y=mat, alternative=alt), er)
expect_error(row_cor_pearson(x=mat, y=mat, alternative=alt), er)
})
test_that("mu has correct dimensions", {
er <- '"mu" must be a numeric vector with length 1 or nrow\\(x\\)'
mat <- matrix(1:12, ncol=3)
# too short
expect_error(row_t_onesample(x=mat, mu=c(1,2)), er)
expect_error(row_t_equalvar(x=mat, y=mat, mu=c(1,2)), er)
expect_error(row_t_welch(x=mat, y=mat, mu=c(1,2)), er)
expect_error(row_t_paired(x=mat, y=mat, mu=c(1,2)), er)
# too long
expect_error(row_t_onesample(x=mat, mu=rep(0, 5)), er)
expect_error(row_t_equalvar(x=mat, y=mat, mu=rep(0, 5)), er)
expect_error(row_t_welch(x=mat, y=mat, mu=rep(0, 5)), er)
expect_error(row_t_paired(x=mat, y=mat, mu=rep(0, 5)), er)
# matrix format
mus <- matrix(rep(1, 4), ncol=2)
expect_error(row_t_onesample(x=mat, mu=mus), er)
expect_error(row_t_equalvar(x=mat, y=mat, mu=mus), er)
expect_error(row_t_welch(x=mat, y=mat, mu=mus), er)
expect_error(row_t_paired(x=mat, y=mat, mu=mus), er)
})
test_that("conf.level has correct dimensions", {
er <- '"conf.level" must be a numeric vector with length 1 or nrow\\(x\\)'
mat <- matrix(1:12, ncol=3)
# too short
expect_error(row_t_onesample(x=mat, conf.level=c(1,2)), er)
expect_error(row_t_equalvar(x=mat, y=mat, conf.level=c(1,2)), er)
expect_error(row_t_welch(x=mat, y=mat, conf.level=c(1,2)), er)
expect_error(row_t_paired(x=mat, y=mat, conf.level=c(1,2)), er)
expect_error(row_cor_pearson(x=mat, y=mat, conf.level=c(1,2)), er)
# too long
expect_error(row_t_onesample(x=mat, conf.level=rep(0, 5)), er)
expect_error(row_t_equalvar(x=mat, y=mat, conf.level=rep(0, 5)), er)
expect_error(row_t_welch(x=mat, y=mat, conf.level=rep(0, 5)), er)
expect_error(row_t_paired(x=mat, y=mat, conf.level=rep(0, 5)), er)
expect_error(row_cor_pearson(x=mat, y=mat, conf.level=rep(0, 5)), er)
# matrix format
cfs <- matrix(rep(1, 4), ncol=2)
expect_error(row_t_onesample(x=mat, conf.level=cfs), er)
expect_error(row_t_equalvar(x=mat, y=mat, conf.level=cfs), er)
expect_error(row_t_welch(x=mat, y=mat, conf.level=cfs), er)
expect_error(row_t_paired(x=mat, y=mat, conf.level=cfs), er)
expect_error(row_cor_pearson(x=mat, y=mat, conf.level=cfs), er)
})
test_that("p-value cutoffs have the right dimensions", {
er1 <- '"cutT" must be a numeric vector with length 1'
er2 <- '"cutBfdr" must be a numeric vector with length 1'
mat <- matrix(1:12, ncol=3)
grp <- c(0,0,1)
# too long
expect_error(row_ievora(x=mat, g=grp, cutT=c(1,2)), er1)
expect_error(row_ievora(x=mat, g=grp, cutBfdr=c(1,2)), er2)
# matrix format
cts <- matrix(rep(1, 4), ncol=2)
expect_error(row_ievora(x=mat, g=grp, cutT=cts), er1)
expect_error(row_ievora(x=mat, g=grp, cutBfdr=cts), er2)
})
################################################################################
######################## VALUES MUST BE IN CORRECT SET #########################
################################################################################
test_that("alternative is in: less, greater, two-sided)", {
er <- 'all "alternative" values must be in: two\\.sided, less, greater'
mat <- matrix(1:12, nrow=3)
# one value
expect_error(row_t_onesample(x=mat, alternative="ga"), er)
expect_error(row_t_equalvar(x=mat, y=mat, alternative="ga"), er)
expect_error(row_t_welch(x=mat, y=mat, alternative="ga"), er)
expect_error(row_t_paired(x=mat, y=mat, alternative="ga"), er)
expect_error(row_cor_pearson(x=mat, y=mat, alternative="ga"), er)
# for each row and one incorrect
expect_error(row_t_onesample(x=mat, alternative=c("t","l","c")), er)
expect_error(row_t_equalvar(x=mat, y=mat, alternative=c("t","l","c")), er)
expect_error(row_t_welch(x=mat, y=mat, alternative=c("t","l","c")), er)
expect_error(row_t_paired(x=mat, y=mat, alternative=c("t","l","c")), er)
expect_error(row_cor_pearson(x=mat, y=mat, alternative=c("t","l","c")), er)
})
test_that("conf.level is in: 0-1)", {
er <- 'all "conf.level" values must be between: 0 and 1'
mat <- matrix(1:12, nrow=3)
# slightly below
expect_error(row_t_onesample(x=mat, conf.level=-0.001), er)
expect_error(row_t_equalvar(x=mat, y=mat, conf.level=-0.001), er)
expect_error(row_t_welch(x=mat, y=mat, conf.level=-0.001), er)
expect_error(row_t_paired(x=mat, y=mat, conf.level=-0.001), er)
expect_error(row_cor_pearson(x=mat, y=mat, conf.level=-0.001), er)
# slightly above
expect_error(row_t_onesample(x=mat, conf.level=1.001), er)
expect_error(row_t_equalvar(x=mat, y=mat, conf.level=1.001), er)
expect_error(row_t_welch(x=mat, y=mat, conf.level=1.001), er)
expect_error(row_t_paired(x=mat, y=mat, conf.level=1.001), er)
expect_error(row_cor_pearson(x=mat, y=mat, conf.level=1.001), er)
# special values
expect_error(row_t_onesample(x=mat, conf.level=NA_integer_), er)
expect_error(row_t_equalvar(x=mat, y=mat, conf.level=NaN), er)
expect_error(row_t_welch(x=mat, y=mat, conf.level=Inf), er)
expect_error(row_t_paired(x=mat, y=mat, conf.level=-Inf), er)
expect_error(row_cor_pearson(x=mat, y=mat, conf.level=-Inf), er)
})
test_that("mu is in: -Inf:Inf)", {
er <- 'all "mu" values must be between: -Inf and Inf'
mat <- matrix(1:12, nrow=3)
# NA
expect_error(row_t_onesample(x=mat, mu=NA_integer_), er)
expect_error(row_t_equalvar(x=mat, y=mat, mu=NA_integer_), er)
expect_error(row_t_welch(x=mat, y=mat, mu=NA_integer_), er)
expect_error(row_t_paired(x=mat, y=mat, mu=NA_integer_), er)
# NaN
expect_error(row_t_onesample(x=mat, mu=NaN), er)
expect_error(row_t_equalvar(x=mat, y=mat, mu=NaN), er)
expect_error(row_t_welch(x=mat, y=mat, mu=NaN), er)
expect_error(row_t_paired(x=mat, y=mat, mu=NaN), er)
})
test_that("p-value cut-offs must be in 0-1", {
er1 <- 'all "cutT" values must be between: 0 and 1'
er2 <- 'all "cutBfdr" values must be between: 0 and 1'
mat <- matrix(1:12, nrow=3)
grp <- c(0,0,1,1)
# slightly below
expect_error(row_ievora(x=mat, g=grp, cutT=-0.001), er1)
expect_error(row_ievora(x=mat, g=grp, cutBfdr=-0.001), er2)
# slightly above
expect_error(row_ievora(x=mat, g=grp, cutT=1.001), er1)
expect_error(row_ievora(x=mat, g=grp, cutBfdr=1.001), er2)
# special values
expect_error(row_ievora(x=mat, g=grp, cutT=NA_integer_), er1)
expect_error(row_ievora(x=mat, g=grp, cutBfdr=NaN), er2)
expect_error(row_ievora(x=mat, g=grp, cutT=Inf), er1)
expect_error(row_ievora(x=mat, g=grp, cutBfdr=-Inf), er2)
})
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/crop_CHM_plots.R
\name{crop_CHM_plots}
\alias{crop_CHM_plots}
\title{Clip CHM Data Based on Neon Plots}
\usage{
crop_CHM_plots(site_name = "TEAK", year = "2018")
}
\arguments{
\item{site_name}{NEON site abbreviation (e.g. "HARV")}
}
\value{
Saved tif files for each plot
}
\description{
\code{crop_CHM_plots} overlays the polygons of the NEON plots with the derived CHM image
}
| /man/crop_CHM_plots.Rd | no_license | weecology/TreeSegmentation | R | false | true | 456 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/crop_CHM_plots.R
\name{crop_CHM_plots}
\alias{crop_CHM_plots}
\title{Clip CHM Data Based on Neon Plots}
\usage{
crop_CHM_plots(site_name = "TEAK", year = "2018")
}
\arguments{
\item{site_name}{NEON site abbreviation (e.g. "HARV")}
}
\value{
Saved tif files for each plot
}
\description{
\code{crop_CHM_plots} overlays the polygons of the NEON plots with the derived CHM image
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/datashield.admin.R
\name{dsadmin.perm_delete}
\alias{dsadmin.perm_delete}
\title{Delete a DataSHIELD permission}
\usage{
dsadmin.perm_delete(opal, subject, type = "user")
}
\arguments{
\item{opal}{Opal connection object.}
\item{subject}{A vector of subject identifiers: user names or group names (depending on the type).}
\item{type}{The type of subject: user (default) or group.}
}
\description{
Delete a permission that was applied to the DataSHIELD service. Silently returns when there is no such permission.
}
\examples{
\dontrun{
o <- opal.login('administrator','password', url='https://opal-demo.obiba.org')
dsadmin.perm_add(o, c('andrei', 'valentina'), 'user', 'use')
dsadmin.perm(o)
dsadmin.perm_delete(o, c('andrei', 'valentina'), 'user')
opal.logout(o)
}
}
| /man/dsadmin.perm_delete.Rd | no_license | obiba/opalr | R | false | true | 847 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/datashield.admin.R
\name{dsadmin.perm_delete}
\alias{dsadmin.perm_delete}
\title{Delete a DataSHIELD permission}
\usage{
dsadmin.perm_delete(opal, subject, type = "user")
}
\arguments{
\item{opal}{Opal connection object.}
\item{subject}{A vector of subject identifiers: user names or group names (depending on the type).}
\item{type}{The type of subject: user (default) or group.}
}
\description{
Delete a permission that was applied to the DataSHIELD service. Silently returns when there is no such permission.
}
\examples{
\dontrun{
o <- opal.login('administrator','password', url='https://opal-demo.obiba.org')
dsadmin.perm_add(o, c('andrei', 'valentina'), 'user', 'use')
dsadmin.perm(o)
dsadmin.perm_delete(o, c('andrei', 'valentina'), 'user')
opal.logout(o)
}
}
|
#Use this to transpose with 1 key and multiple values. See "Situational Pitching to Contact.R" for example
multi_spread <- function(df, key, value) {
# quote key
keyq <- rlang::enquo(key)
# break value vector into quotes
valueq <- rlang::enquo(value)
s <- rlang::quos(!!valueq)
df %>% gather(variable, value, !!!s) %>%
unite(temp, !!keyq, variable) %>%
spread(temp, value)
} | /Situational-Pitching/Code/multi_spread.R | no_license | judah-axelrod/Baseball-Research | R | false | false | 406 | r | #Use this to transpose with 1 key and multiple values. See "Situational Pitching to Contact.R" for example
multi_spread <- function(df, key, value) {
# quote key
keyq <- rlang::enquo(key)
# break value vector into quotes
valueq <- rlang::enquo(value)
s <- rlang::quos(!!valueq)
df %>% gather(variable, value, !!!s) %>%
unite(temp, !!keyq, variable) %>%
spread(temp, value)
} |
\name{minimumCycleRatio}
\alias{minimumCycleRatio}
\title{minimumCycleRatio}
\description{minimumCycleRatio description }
\usage{
minimumCycleRatio(g)
}
\arguments{
\item{g}{ instance of class graphNEL from Bioconductor graph class}
}
\details{
}
\value{
}
\references{
Boost Graph Library ( www.boost.org/libs/graph/doc/index.html )
The Boost Graph Library: User Guide and Reference Manual;
by Jeremy G. Siek, Lie-Quan Lee, and Andrew Lumsdaine;
(Addison-Wesley, Pearson Education Inc., 2002), xxiv+321pp.
ISBN 0-201-72914-8
}
\author{ Li Long <li.long@isb-sib.ch> }
\examples{
}
\keyword{ graphs }
| /man/minimumCycleRatio.Rd | permissive | cran/RBGL | R | false | false | 616 | rd | \name{minimumCycleRatio}
\alias{minimumCycleRatio}
\title{minimumCycleRatio}
\description{minimumCycleRatio description }
\usage{
minimumCycleRatio(g)
}
\arguments{
\item{g}{ instance of class graphNEL from Bioconductor graph class}
}
\details{
}
\value{
}
\references{
Boost Graph Library ( www.boost.org/libs/graph/doc/index.html )
The Boost Graph Library: User Guide and Reference Manual;
by Jeremy G. Siek, Lie-Quan Lee, and Andrew Lumsdaine;
(Addison-Wesley, Pearson Education Inc., 2002), xxiv+321pp.
ISBN 0-201-72914-8
}
\author{ Li Long <li.long@isb-sib.ch> }
\examples{
}
\keyword{ graphs }
|
### Create IF model using the important variables only
### Does this model perform similarily to the full model
## Marc Sze
#Load needed libraries
source('code/functions.R')
loadLibs(c("randomForest", "dplyr", "scales", "caret"))
#Load needed data
test_data <- read.csv("data/process/tables/adn_treatment_test_tune_data.csv", header = TRUE)
lesion_imp_vars <- read.csv("data/process/tables/adn_treatment_imp_vars_summary.csv", header = T, stringsAsFactors = F) %>%
rename(variable = Variable)
mda_summary <- read.csv("data/process/tables/adn_treatment_top_vars_MDA_Summary.csv", header = T, stringsAsFactors = F)
#Merge MDA and rank counts together to get top 10.
combined_ranks <- inner_join(lesion_imp_vars, mda_summary, by = "variable") %>%
arrange(desc(total_appearance), desc(mean_MDA))
#Create data table with only reduced features (impvars only)
vars_to_keep <- slice(combined_ranks, 1:10)[, "variable"]
test_data_imps <- select(test_data, lesion, one_of(vars_to_keep))
#################################################################################
# #
# #
# Model Training and Parameter Tuning #
# #
#################################################################################
#Create Overall specifications for model tuning
fitControl <- trainControl(## 10-fold CV
method = "cv",
number = 10,
## repeated twenty times
p = 0.8,
classProbs = TRUE,
savePredictions = TRUE,
summaryFunction = twoClassSummary)
set.seed(3457)
#Set up lists to store the data
test_tune_list <- list()
test_predictions <- list()
for(i in 1:100){
#Get test data
train_test_data <- test_data_imps
#Train the model
test_tune_list[[paste("data_split", i, sep = "")]] <-
train(lesion ~ ., data = train_test_data,
method = "rf",
ntree = 100,
trControl = fitControl,
metric = "ROC",
na.action = na.omit,
verbose = FALSE)
test_predictions[[paste("data_split", i, sep = "")]] <-
predict(test_tune_list[[i]], test_data_imps)
}
# Save image with data and relevant parameters
save.image("exploratory/adn_treatment_reduced_RF_model_Imp_OTU.RData")
| /code/old/adn_treatment_reduce_feature_model.R | permissive | SchlossLab/Sze_FollowUps_Microbiome_2017 | R | false | false | 2,521 | r | ### Create IF model using the important variables only
### Does this model perform similarily to the full model
## Marc Sze
#Load needed libraries
source('code/functions.R')
loadLibs(c("randomForest", "dplyr", "scales", "caret"))
#Load needed data
test_data <- read.csv("data/process/tables/adn_treatment_test_tune_data.csv", header = TRUE)
lesion_imp_vars <- read.csv("data/process/tables/adn_treatment_imp_vars_summary.csv", header = T, stringsAsFactors = F) %>%
rename(variable = Variable)
mda_summary <- read.csv("data/process/tables/adn_treatment_top_vars_MDA_Summary.csv", header = T, stringsAsFactors = F)
#Merge MDA and rank counts together to get top 10.
combined_ranks <- inner_join(lesion_imp_vars, mda_summary, by = "variable") %>%
arrange(desc(total_appearance), desc(mean_MDA))
#Create data table with only reduced features (impvars only)
vars_to_keep <- slice(combined_ranks, 1:10)[, "variable"]
test_data_imps <- select(test_data, lesion, one_of(vars_to_keep))
#################################################################################
# #
# #
# Model Training and Parameter Tuning #
# #
#################################################################################
#Create Overall specifications for model tuning
fitControl <- trainControl(## 10-fold CV
method = "cv",
number = 10,
## repeated twenty times
p = 0.8,
classProbs = TRUE,
savePredictions = TRUE,
summaryFunction = twoClassSummary)
set.seed(3457)
#Set up lists to store the data
test_tune_list <- list()
test_predictions <- list()
for(i in 1:100){
#Get test data
train_test_data <- test_data_imps
#Train the model
test_tune_list[[paste("data_split", i, sep = "")]] <-
train(lesion ~ ., data = train_test_data,
method = "rf",
ntree = 100,
trControl = fitControl,
metric = "ROC",
na.action = na.omit,
verbose = FALSE)
test_predictions[[paste("data_split", i, sep = "")]] <-
predict(test_tune_list[[i]], test_data_imps)
}
# Save image with data and relevant parameters
save.image("exploratory/adn_treatment_reduced_RF_model_Imp_OTU.RData")
|
#!/usr/bin/env Rscript
# R 3.3
# USAGE: plot_avg_coverage.R data/depth.averages.txt /path/to/outdir
# DESCRIPTION:
# This script will read in a text file generated with the 'average_coverages.py' script
# and output a plot showing the average coverage per chromosome per genome (column)
library("reshape2")
library("ggplot2")
library("plotly")
# get commands passed to the script
# get commands passed to the script
args <- commandArgs(TRUE)
cov_file <- args[1]
outdir <- args[2]
# cov_file <- "/home/devsci4/Structural_Variants_CSHL/10x_Read_Simulator/test_output/depth13Genome.depth.averages.txt"
# outdir <- "/home/devsci4/Structural_Variants_CSHL/10x_Read_Simulator/test_output/"
# > load("/home/devsci4/Structural_Variants_CSHL/10x_Read_Simulator/test_output/plot_avg-manual.Rdata")
# read in the file
coverage_df <- read.table(cov_file)
# fix colnames
colnames(coverage_df)[1] <- "chrom"
# colnames(coverage_df)[-1] <- paste("genome_", seq_along(colnames(coverage_df)[-1]), sep="")
sample_names <- c("HG00512"
, "HG00513"
, "HG00514"
, "HG00731"
, "HG00732"
, "HG00733"
, "NA19238"
, "NA19239"
, "NA19240"
, "NA24143"
, "NA24149"
, "NA24385"
, "NA12878")
colnames(coverage_df)[-1] <- sample_names
# chrom_vec <- dput(scan("hg38_chrom_list.txt",what = ""))
# chrom_vec <- c("chr1", "chr2", "chr3", "chr4", "chr5", "chr6", "chr7", "chr8",
# "chr9", "chr10", "chr11", "chr12", "chr13", "chr14", "chr15",
# "chr16", "chr17", "chr18", "chr19", "chr20", "chr21", "chr22",
# "chrX", "chrY")
# chrom_vec <- factor(chrom_vec, levels = chrom_vec)
# print(chrom_vec)
chrom_vec <- c("chr1", "chr2", "chr3", "chr4", "chr5", "chr6", "chr7", "chr8", "chr9", "chr10", "chr11", "chr12", "chr13", "chr14", "chr15", "chr16", "chr17", "chr18", "chr19", "chr20", "chr21", "chr22", "chrX", "chrY")
multi_grep <- function(source_data, patterns){
# find multiple patterns in a char vector
# make a regex to search with
if (length(patterns) > 1){
search_pattern <- paste(patterns,
collapse = "|")
} else {
search_pattern <- patterns
}
matches <- grep(x = source_data,
pattern = search_pattern,
value = TRUE)
return(matches)
}
# get only the desired chrom's
new_df <- data.frame()
for(chrmatch in multi_grep(chrom_vec, coverage_df[["chrom"]])){
# print(chrmatch)
new_df <- rbind(new_df, subset(coverage_df, chrom == chrmatch))
}
coverage_df <- droplevels(new_df)
coverage_df[["chrom"]] <- factor(coverage_df[["chrom"]], levels = chrom_vec)
# coverage_df <- coverage_df[!grepl("_", coverage_df[["chrom"]]),]
# coverage_df <- coverage_df[!grepl("chrEBV", coverage_df[["chrom"]]),]
# coverage_df <- coverage_df[!grepl("hs38d1", coverage_df[["chrom"]]),]
# coverage_df <- coverage_df[!grepl("chrM", coverage_df[["chrom"]]),]
# melt into long format
coverage_df <- reshape2::melt(coverage_df, id.vars = "chrom", value.name = "coverage", variable.name = "sample")
# fix chrom order for plot
# coverage_df <- coverage_df[with(coverage_df, order(chrom)), ]
# peel off the coverage stats column and turn into a grouping factor
stat_strings <- strsplit(as.character(coverage_df$coverage), ',')
stats_df <- data.frame(matrix(as.numeric(unlist(stat_strings)), nrow=length(stat_strings), byrow=T))
colnames(stats_df) <- c("average", "std_dev", "count")
coverage_df <- cbind(coverage_df[c("chrom", "sample")], stats_df)
# print(head(coverage_df))
# quit()
# plot by genome
# coverage_df_avg <- subset(coverage_df, statistic == "average")
pdf(file = file.path(outdir, "avg_cov_byGenome-manual.pdf"), height = 8, width = 8)
chrom_plot <- ggplot(coverage_df, aes(x = sample, y = average, fill = factor(chrom)))
chrom_plot <-chrom_plot + geom_bar(stat="identity", position="dodge")
chrom_plot <-chrom_plot + coord_flip()
# chrom_plot <-chrom_plot + scale_x_discrete(limits = rev(levels(coverage_df[["chrom"]])))
chrom_plot <-chrom_plot + labs(title="Average Coverage Per Chromosome\nPer Samples", x="Sample", y = "Average Coverage", fill="Chromosome")
print(chrom_plot)
dev.off()
save.image(file=file.path(outdir, "plot_avg-manual.Rdata"),compress = TRUE)
# load("/home/devsci4/Structural_Variants_CSHL/10x_Read_Simulator/test_output/plot_avg-manual.Rdata")
# plotly
# chrom_plotly <- ggplotly(chrom_plot)
# htmlwidgets::saveWidget(as.widget(chrom_plotly), file.path(outdir, "avg_cov_byGenome-manual.html"))
# coverage_df
# make horizontal stacked grouped barplot
# plot by chrom
# pdf(file = file.path(outdir, "avg_cov_byChrom.pdf"), height = 8, width = 8)
# ggplot(coverage_df, aes(x = chrom, y = coverage, fill = factor(sample))) +
# geom_bar(stat="identity", position="dodge") + # remove 'position' for stacked plot
# coord_flip() +
# labs(title="Average Coverage Per Chromosome\nPer Samples", x="Chromosome", y = "Average Coverage")
# dev.off()
| /10x_Read_Simulator/code/plot_avg_coverage-manual.R | permissive | stevekm/Structural_Variants_CSHL | R | false | false | 4,866 | r | #!/usr/bin/env Rscript
# R 3.3
# USAGE: plot_avg_coverage.R data/depth.averages.txt /path/to/outdir
# DESCRIPTION:
# This script will read in a text file generated with the 'average_coverages.py' script
# and output a plot showing the average coverage per chromosome per genome (column)
library("reshape2")
library("ggplot2")
library("plotly")
# get commands passed to the script
# get commands passed to the script
args <- commandArgs(TRUE)
cov_file <- args[1]
outdir <- args[2]
# cov_file <- "/home/devsci4/Structural_Variants_CSHL/10x_Read_Simulator/test_output/depth13Genome.depth.averages.txt"
# outdir <- "/home/devsci4/Structural_Variants_CSHL/10x_Read_Simulator/test_output/"
# > load("/home/devsci4/Structural_Variants_CSHL/10x_Read_Simulator/test_output/plot_avg-manual.Rdata")
# read in the file
coverage_df <- read.table(cov_file)
# fix colnames
colnames(coverage_df)[1] <- "chrom"
# colnames(coverage_df)[-1] <- paste("genome_", seq_along(colnames(coverage_df)[-1]), sep="")
sample_names <- c("HG00512"
, "HG00513"
, "HG00514"
, "HG00731"
, "HG00732"
, "HG00733"
, "NA19238"
, "NA19239"
, "NA19240"
, "NA24143"
, "NA24149"
, "NA24385"
, "NA12878")
colnames(coverage_df)[-1] <- sample_names
# chrom_vec <- dput(scan("hg38_chrom_list.txt",what = ""))
# chrom_vec <- c("chr1", "chr2", "chr3", "chr4", "chr5", "chr6", "chr7", "chr8",
# "chr9", "chr10", "chr11", "chr12", "chr13", "chr14", "chr15",
# "chr16", "chr17", "chr18", "chr19", "chr20", "chr21", "chr22",
# "chrX", "chrY")
# chrom_vec <- factor(chrom_vec, levels = chrom_vec)
# print(chrom_vec)
chrom_vec <- c("chr1", "chr2", "chr3", "chr4", "chr5", "chr6", "chr7", "chr8", "chr9", "chr10", "chr11", "chr12", "chr13", "chr14", "chr15", "chr16", "chr17", "chr18", "chr19", "chr20", "chr21", "chr22", "chrX", "chrY")
multi_grep <- function(source_data, patterns){
# find multiple patterns in a char vector
# make a regex to search with
if (length(patterns) > 1){
search_pattern <- paste(patterns,
collapse = "|")
} else {
search_pattern <- patterns
}
matches <- grep(x = source_data,
pattern = search_pattern,
value = TRUE)
return(matches)
}
# get only the desired chrom's
new_df <- data.frame()
for(chrmatch in multi_grep(chrom_vec, coverage_df[["chrom"]])){
# print(chrmatch)
new_df <- rbind(new_df, subset(coverage_df, chrom == chrmatch))
}
coverage_df <- droplevels(new_df)
coverage_df[["chrom"]] <- factor(coverage_df[["chrom"]], levels = chrom_vec)
# coverage_df <- coverage_df[!grepl("_", coverage_df[["chrom"]]),]
# coverage_df <- coverage_df[!grepl("chrEBV", coverage_df[["chrom"]]),]
# coverage_df <- coverage_df[!grepl("hs38d1", coverage_df[["chrom"]]),]
# coverage_df <- coverage_df[!grepl("chrM", coverage_df[["chrom"]]),]
# melt into long format
coverage_df <- reshape2::melt(coverage_df, id.vars = "chrom", value.name = "coverage", variable.name = "sample")
# fix chrom order for plot
# coverage_df <- coverage_df[with(coverage_df, order(chrom)), ]
# peel off the coverage stats column and turn into a grouping factor
stat_strings <- strsplit(as.character(coverage_df$coverage), ',')
stats_df <- data.frame(matrix(as.numeric(unlist(stat_strings)), nrow=length(stat_strings), byrow=T))
colnames(stats_df) <- c("average", "std_dev", "count")
coverage_df <- cbind(coverage_df[c("chrom", "sample")], stats_df)
# print(head(coverage_df))
# quit()
# plot by genome
# coverage_df_avg <- subset(coverage_df, statistic == "average")
pdf(file = file.path(outdir, "avg_cov_byGenome-manual.pdf"), height = 8, width = 8)
chrom_plot <- ggplot(coverage_df, aes(x = sample, y = average, fill = factor(chrom)))
chrom_plot <-chrom_plot + geom_bar(stat="identity", position="dodge")
chrom_plot <-chrom_plot + coord_flip()
# chrom_plot <-chrom_plot + scale_x_discrete(limits = rev(levels(coverage_df[["chrom"]])))
chrom_plot <-chrom_plot + labs(title="Average Coverage Per Chromosome\nPer Samples", x="Sample", y = "Average Coverage", fill="Chromosome")
print(chrom_plot)
dev.off()
save.image(file=file.path(outdir, "plot_avg-manual.Rdata"),compress = TRUE)
# load("/home/devsci4/Structural_Variants_CSHL/10x_Read_Simulator/test_output/plot_avg-manual.Rdata")
# plotly
# chrom_plotly <- ggplotly(chrom_plot)
# htmlwidgets::saveWidget(as.widget(chrom_plotly), file.path(outdir, "avg_cov_byGenome-manual.html"))
# coverage_df
# make horizontal stacked grouped barplot
# plot by chrom
# pdf(file = file.path(outdir, "avg_cov_byChrom.pdf"), height = 8, width = 8)
# ggplot(coverage_df, aes(x = chrom, y = coverage, fill = factor(sample))) +
# geom_bar(stat="identity", position="dodge") + # remove 'position' for stacked plot
# coord_flip() +
# labs(title="Average Coverage Per Chromosome\nPer Samples", x="Chromosome", y = "Average Coverage")
# dev.off()
|
testlist <- list(latLongs = structure(8.30822468639613e-313, .Dim = c(1L, 1L)), r = -1.61836625588379e+260)
result <- do.call(MGDrivE::calcCos,testlist)
str(result) | /MGDrivE/inst/testfiles/calcCos/libFuzzer_calcCos/calcCos_valgrind_files/1612726898-test.R | no_license | akhikolla/updatedatatype-list2 | R | false | false | 165 | r | testlist <- list(latLongs = structure(8.30822468639613e-313, .Dim = c(1L, 1L)), r = -1.61836625588379e+260)
result <- do.call(MGDrivE::calcCos,testlist)
str(result) |
require(dplyr)
#require(data.table)
#fd <- readRDS('../data/fulldatasetintegration_correct.rds')$nonsinglegenepairs
#fd[,6:30] <- matrix(NA, nrow = nrow(fd),ncol = 25)
#jj <- 1
#fd <- readRDS(paste0('../data/integration/int',jj,'/int',jj,'_1.rds'))
# thr <- 3
# fd <- readRDS(file = paste0('../data/integration/intall/int',1,'.rds')) %>%
# #filter(sumnlogpvbest >= 3)
# filter(sumnlogpv1 >= thr)
# for(ii in c(2:906)){
# print(ii)
# fd <- rbind(fd,readRDS(file = paste0('../data/integration/intall/int',ii,'.rds')) %>%
# filter(sumnlogpvbest >= thr))
# }
# write.table(fd, file = '../data/toperc/int.sumnlogpvbest.gteq.10.tsv',
# quote = F, sep = '\t',row.names = F)
x<-1
fd <- readRDS(paste0('../data/integration/intall/int',x,'.rds'))
thr <- 2
alldf <- lapply(1:906, function(x){
print(x)
readRDS(paste0('../data/integration/intall/int',x,'.rds')) %>%
filter(sumnlogpv1 >= thr)
})
fd <- do.call(rbind, alldf)
write.table(fd, file = paste0('../data/toperc/int.sumnlogpv1.gteq.',thr,'.tsv'),
quote = F, sep = '\t',row.names = F)
thr <- 0.2
alldf <- lapply(1:906, function(x){
print(x)
readRDS(paste0('../data/integration/intall/int',x,'.rds')) %>%
filter(sumnlogpv1 < thr)
})
fd <- do.call(rbind, alldf)
write.table(fd, file = paste0('../data/toperc/int.sumnlogpv1.lt.',thr,'.tsv'),
quote = F, sep = '\t',row.names = F)
# fd <- readRDS('../data/int99_5.rds') %>%
# na.omit()
# View(fd[1:1000,])
#vis egs
# library(RERconverge)
# source('~/Documents/RERc/RERconverge/R/RERfuncs.R')
# source('~/Documents/RERc/RERconverge/R/RcppExports.R')
# source('~/Documents/RERc/RERconverge/R/projection_coevo.R')
# source('~/Documents/erc/code/funcsERCfromProjections.R')
# require(gridExtra)
# gns <- c('TSN','MYL12A')
# mtre <- readRDS('~/Documents/erc/data/saves/mamm63nt.trees.rds')
# mwts <- readRDS('~/Documents/erc/data/saves/mamm63nt.trees.weights.rds')
# mrers <- readRDS('~/Documents/rermethods/data/mamm63nt.trees.scaledrers.sqrt.wt.rds')
# correlateERCGeneListTrees(mtre, gns, weighted = T, weights = mwts)
# grid.arrange(plotRers(mrers,'TSN'),plotRers(mrers,'MYL12A'))
for(ii in c(1:906)){
print(ii)
#ii <- 1
#jj <- 1
for(jj in c(1:5)){
assign(paste0('fd',jj), readRDS(paste0('../data/integration/int',jj,'/int',ii,'_',jj,'.rds')))
}
fd <- cbind(fd1,fd2[,6:10],fd3[,6:10],fd4[,6:10],fd5[,6:10])
fd$sumnlogpv1 <- rowSums(fd[,c('nlogpv11','nlogpv12','nlogpv13','nlogpv14','nlogpv15')], na.rm = T)
fd$sumnlogpvbest <- rowSums(fd[,c('nlogpvbest1','nlogpvbest2','nlogpvbest3','nlogpvbest4','nlogpvbest5')], na.rm = T)
fd <- select(fd,c(1:5,31,32,6:30)) %>%
arrange(desc(sumnlogpvbest))
saveRDS(fd, file = paste0('../data/integration/intall/int',ii,'.rds'))
}
#View(fd[1:5,])
| /combineh2pintegration.R | no_license | raghavendranpartha/coevolution | R | false | false | 2,874 | r | require(dplyr)
#require(data.table)
#fd <- readRDS('../data/fulldatasetintegration_correct.rds')$nonsinglegenepairs
#fd[,6:30] <- matrix(NA, nrow = nrow(fd),ncol = 25)
#jj <- 1
#fd <- readRDS(paste0('../data/integration/int',jj,'/int',jj,'_1.rds'))
# thr <- 3
# fd <- readRDS(file = paste0('../data/integration/intall/int',1,'.rds')) %>%
# #filter(sumnlogpvbest >= 3)
# filter(sumnlogpv1 >= thr)
# for(ii in c(2:906)){
# print(ii)
# fd <- rbind(fd,readRDS(file = paste0('../data/integration/intall/int',ii,'.rds')) %>%
# filter(sumnlogpvbest >= thr))
# }
# write.table(fd, file = '../data/toperc/int.sumnlogpvbest.gteq.10.tsv',
# quote = F, sep = '\t',row.names = F)
x<-1
fd <- readRDS(paste0('../data/integration/intall/int',x,'.rds'))
thr <- 2
alldf <- lapply(1:906, function(x){
print(x)
readRDS(paste0('../data/integration/intall/int',x,'.rds')) %>%
filter(sumnlogpv1 >= thr)
})
fd <- do.call(rbind, alldf)
write.table(fd, file = paste0('../data/toperc/int.sumnlogpv1.gteq.',thr,'.tsv'),
quote = F, sep = '\t',row.names = F)
thr <- 0.2
alldf <- lapply(1:906, function(x){
print(x)
readRDS(paste0('../data/integration/intall/int',x,'.rds')) %>%
filter(sumnlogpv1 < thr)
})
fd <- do.call(rbind, alldf)
write.table(fd, file = paste0('../data/toperc/int.sumnlogpv1.lt.',thr,'.tsv'),
quote = F, sep = '\t',row.names = F)
# fd <- readRDS('../data/int99_5.rds') %>%
# na.omit()
# View(fd[1:1000,])
#vis egs
# library(RERconverge)
# source('~/Documents/RERc/RERconverge/R/RERfuncs.R')
# source('~/Documents/RERc/RERconverge/R/RcppExports.R')
# source('~/Documents/RERc/RERconverge/R/projection_coevo.R')
# source('~/Documents/erc/code/funcsERCfromProjections.R')
# require(gridExtra)
# gns <- c('TSN','MYL12A')
# mtre <- readRDS('~/Documents/erc/data/saves/mamm63nt.trees.rds')
# mwts <- readRDS('~/Documents/erc/data/saves/mamm63nt.trees.weights.rds')
# mrers <- readRDS('~/Documents/rermethods/data/mamm63nt.trees.scaledrers.sqrt.wt.rds')
# correlateERCGeneListTrees(mtre, gns, weighted = T, weights = mwts)
# grid.arrange(plotRers(mrers,'TSN'),plotRers(mrers,'MYL12A'))
for(ii in c(1:906)){
print(ii)
#ii <- 1
#jj <- 1
for(jj in c(1:5)){
assign(paste0('fd',jj), readRDS(paste0('../data/integration/int',jj,'/int',ii,'_',jj,'.rds')))
}
fd <- cbind(fd1,fd2[,6:10],fd3[,6:10],fd4[,6:10],fd5[,6:10])
fd$sumnlogpv1 <- rowSums(fd[,c('nlogpv11','nlogpv12','nlogpv13','nlogpv14','nlogpv15')], na.rm = T)
fd$sumnlogpvbest <- rowSums(fd[,c('nlogpvbest1','nlogpvbest2','nlogpvbest3','nlogpvbest4','nlogpvbest5')], na.rm = T)
fd <- select(fd,c(1:5,31,32,6:30)) %>%
arrange(desc(sumnlogpvbest))
saveRDS(fd, file = paste0('../data/integration/intall/int',ii,'.rds'))
}
#View(fd[1:5,])
|
\name{Dist}
\title{Distance Matrix Computation}
\usage{
Dist(x, method = "euclidean", nbproc = 2, diag = FALSE, upper = FALSE)
}
\alias{Dist}
\arguments{
\item{x}{numeric matrix or (data frame) or an object of class
"exprSet".
Distances between the rows of
\code{x} will be computed.}
\item{method}{the distance measure to be used. This must be one of
\code{"euclidean"}, \code{"maximum"}, \code{"manhattan"},
\code{"canberra"}, \code{"binary"}, \code{"pearson"},
\code{"abspearson"}, \code{"correlation"},
\code{"abscorrelation"}, \code{"spearman"} or \code{"kendall"}.
Any unambiguous substring can be given.}
\item{nbproc}{integer, Number of subprocess for parallelization}
\item{diag}{logical value indicating whether the diagonal of the
distance matrix should be printed by \code{print.dist}.}
\item{upper}{logical value indicating whether the upper triangle of the
distance matrix should be printed by \code{print.dist}.}
}
\description{
This function computes and returns the distance matrix computed by
using the specified distance measure to compute the distances between
the rows of a data matrix.
}
\details{
Available distance measures are (written for two vectors \eqn{x} and
\eqn{y}):
\describe{
\item{\code{euclidean}:}{Usual square distance between the two
vectors (2 norm).}
\item{\code{maximum}:}{Maximum distance between two components of \eqn{x}
and \eqn{y} (supremum norm)}
\item{\code{manhattan}:}{Absolute distance between the two vectors
(1 norm).}
\item{\code{canberra}:}{\eqn{\sum_i |x_i - y_i| / |x_i + y_i|}{%
sum(|x_i - y_i| / |x_i + y_i|)}. Terms with zero numerator and
denominator are omitted from the sum and treated as if the values
were missing.
}
\item{\code{binary}:}{(aka \emph{asymmetric binary}): The vectors
are regarded as binary bits, so non-zero elements are `on' and zero
elements are `off'. The distance is the \emph{proportion} of
bits in which only one is on amongst those in which at least one is on.}
\item{\code{pearson}:}{Also named "not centered Pearson"
\eqn{1 - \frac{\sum_i x_i y_i}{\sqrt{\sum_i x_i^2 %
\sum_i y_i^2}}}{%
1 - sum(x_i y_i) / sqrt [sum(x_i^2) sum(y_i^2)]}.
}
\item{\code{abspearson}:}{Absolute Pearson
\eqn{1 - \left| \frac{\sum_i x_i y_i}{\sqrt{\sum_i x_i^2 %
\sum_i y_i^2}} \right| }{%
1 - |sum(x_i y_i) / sqrt [sum(x_i^2) sum(y_i^2)] |}.
}
\item{\code{correlation}:}{Also named "Centered Pearson"
\eqn{1 - corr(x,y)}.
}
\item{\code{abscorrelation}:}{Absolute correlation
\eqn{1 - | corr(x,y) |}
with
\eqn{ corr(x,y) = \frac{\sum_i x_i y_i -\frac1n \sum_i x_i \sum_i%
y_i}{% frac: 2nd part
\sqrt{\left(\sum_i x_i^2 -\frac1n \left( \sum_i x_i \right)^2 %
\right)%
\left( \sum_i y_i^2 -\frac1n \left( \sum_i y_i \right)^2 %
\right)} }}.
}
\item{\code{spearman}:}{Compute a distance based on rank.
\eqn{\sum(d_i^2)}{sum (d_i^2)} where \eqn{d_i} is the difference
in rank between \eqn{x_i} and \eqn{y_i}.
\code{Dist(x,method="spearman")[i,j] =}
\code{cor.test(x[i,],x[j,],method="spearman")$statistic}
}
\item{\code{kendall}:}{Compute a distance based on rank.
\eqn{\sum_{i,j} K_{i,j}(x,y)} with \eqn{K_{i,j}(x,y)}
is 0 if \eqn{x_i, x_j} in same order as \eqn{y_i,y_j},
1 if not.
}
}
Missing values are allowed, and are excluded from all computations
involving the rows within which they occur. If some columns are
excluded in calculating a Euclidean, Manhattan or Canberra distance,
the sum is scaled up proportionally to the number of columns used.
If all pairs are excluded when calculating a particular distance,
the value is \code{NA}.
The functions \code{as.matrix.dist()} and \code{as.dist()} can be used
for conversion between objects of class \code{"dist"} and conventional
distance matrices and vice versa.
}
\value{
An object of class \code{"dist"}.
The lower triangle of the distance matrix stored by columns in a
vector, say \code{do}. If \code{n} is the number of
observations, i.e., \code{n <- attr(do, "Size")}, then
for \eqn{i < j <= n}, the dissimilarity between (row) i and j is
\code{do[n*(i-1) - i*(i-1)/2 + j-i]}.
The length of the vector is \eqn{n*(n-1)/2}, i.e., of order \eqn{n^2}.
The object has the following attributes (besides \code{"class"} equal
to \code{"dist"}):
\item{Size}{integer, the number of observations in the dataset.}
\item{Labels}{optionally, contains the labels, if any, of the
observations of the dataset.}
\item{Diag, Upper}{logicals corresponding to the arguments \code{diag}
and \code{upper} above, specifying how the object should be printed.}
\item{call}{optionally, the \code{\link{call}} used to create the
object.}
\item{methods}{optionally, the distance method used; resulting form
\code{\link{dist}()}, the (\code{\link{match.arg}()}ed) \code{method}
argument.}
}
\references{
Mardia, K. V., Kent, J. T. and Bibby, J. M. (1979)
\emph{Multivariate Analysis.} London: Academic Press.
Wikipedia
\url{https://en.wikipedia.org/wiki/Kendall_tau_distance}
}
\note{Multi-thread (parallelisation) is disable on Windows.}
\seealso{
\code{\link[cluster]{daisy}} in the \file{cluster} package with more
possibilities in the case of \emph{mixed} (contiuous / categorical)
variables.
\code{\link[stats]{dist}} \code{\link{hcluster}}.
}
\examples{
x <- matrix(rnorm(100), nrow=5)
Dist(x)
Dist(x, diag = TRUE)
Dist(x, upper = TRUE)
## compute dist with 8 threads
Dist(x,nbproc=8)
Dist(x,method="abscorrelation")
Dist(x,method="kendall")
}
\keyword{multivariate}
\keyword{cluster}
| /man/dist.Rd | no_license | cran/amap | R | false | false | 5,796 | rd | \name{Dist}
\title{Distance Matrix Computation}
\usage{
Dist(x, method = "euclidean", nbproc = 2, diag = FALSE, upper = FALSE)
}
\alias{Dist}
\arguments{
\item{x}{numeric matrix or (data frame) or an object of class
"exprSet".
Distances between the rows of
\code{x} will be computed.}
\item{method}{the distance measure to be used. This must be one of
\code{"euclidean"}, \code{"maximum"}, \code{"manhattan"},
\code{"canberra"}, \code{"binary"}, \code{"pearson"},
\code{"abspearson"}, \code{"correlation"},
\code{"abscorrelation"}, \code{"spearman"} or \code{"kendall"}.
Any unambiguous substring can be given.}
\item{nbproc}{integer, Number of subprocess for parallelization}
\item{diag}{logical value indicating whether the diagonal of the
distance matrix should be printed by \code{print.dist}.}
\item{upper}{logical value indicating whether the upper triangle of the
distance matrix should be printed by \code{print.dist}.}
}
\description{
This function computes and returns the distance matrix computed by
using the specified distance measure to compute the distances between
the rows of a data matrix.
}
\details{
Available distance measures are (written for two vectors \eqn{x} and
\eqn{y}):
\describe{
\item{\code{euclidean}:}{Usual square distance between the two
vectors (2 norm).}
\item{\code{maximum}:}{Maximum distance between two components of \eqn{x}
and \eqn{y} (supremum norm)}
\item{\code{manhattan}:}{Absolute distance between the two vectors
(1 norm).}
\item{\code{canberra}:}{\eqn{\sum_i |x_i - y_i| / |x_i + y_i|}{%
sum(|x_i - y_i| / |x_i + y_i|)}. Terms with zero numerator and
denominator are omitted from the sum and treated as if the values
were missing.
}
\item{\code{binary}:}{(aka \emph{asymmetric binary}): The vectors
are regarded as binary bits, so non-zero elements are `on' and zero
elements are `off'. The distance is the \emph{proportion} of
bits in which only one is on amongst those in which at least one is on.}
\item{\code{pearson}:}{Also named "not centered Pearson"
\eqn{1 - \frac{\sum_i x_i y_i}{\sqrt{\sum_i x_i^2 %
\sum_i y_i^2}}}{%
1 - sum(x_i y_i) / sqrt [sum(x_i^2) sum(y_i^2)]}.
}
\item{\code{abspearson}:}{Absolute Pearson
\eqn{1 - \left| \frac{\sum_i x_i y_i}{\sqrt{\sum_i x_i^2 %
\sum_i y_i^2}} \right| }{%
1 - |sum(x_i y_i) / sqrt [sum(x_i^2) sum(y_i^2)] |}.
}
\item{\code{correlation}:}{Also named "Centered Pearson"
\eqn{1 - corr(x,y)}.
}
\item{\code{abscorrelation}:}{Absolute correlation
\eqn{1 - | corr(x,y) |}
with
\eqn{ corr(x,y) = \frac{\sum_i x_i y_i -\frac1n \sum_i x_i \sum_i%
y_i}{% frac: 2nd part
\sqrt{\left(\sum_i x_i^2 -\frac1n \left( \sum_i x_i \right)^2 %
\right)%
\left( \sum_i y_i^2 -\frac1n \left( \sum_i y_i \right)^2 %
\right)} }}.
}
\item{\code{spearman}:}{Compute a distance based on rank.
\eqn{\sum(d_i^2)}{sum (d_i^2)} where \eqn{d_i} is the difference
in rank between \eqn{x_i} and \eqn{y_i}.
\code{Dist(x,method="spearman")[i,j] =}
\code{cor.test(x[i,],x[j,],method="spearman")$statistic}
}
\item{\code{kendall}:}{Compute a distance based on rank.
\eqn{\sum_{i,j} K_{i,j}(x,y)} with \eqn{K_{i,j}(x,y)}
is 0 if \eqn{x_i, x_j} in same order as \eqn{y_i,y_j},
1 if not.
}
}
Missing values are allowed, and are excluded from all computations
involving the rows within which they occur. If some columns are
excluded in calculating a Euclidean, Manhattan or Canberra distance,
the sum is scaled up proportionally to the number of columns used.
If all pairs are excluded when calculating a particular distance,
the value is \code{NA}.
The functions \code{as.matrix.dist()} and \code{as.dist()} can be used
for conversion between objects of class \code{"dist"} and conventional
distance matrices and vice versa.
}
\value{
An object of class \code{"dist"}.
The lower triangle of the distance matrix stored by columns in a
vector, say \code{do}. If \code{n} is the number of
observations, i.e., \code{n <- attr(do, "Size")}, then
for \eqn{i < j <= n}, the dissimilarity between (row) i and j is
\code{do[n*(i-1) - i*(i-1)/2 + j-i]}.
The length of the vector is \eqn{n*(n-1)/2}, i.e., of order \eqn{n^2}.
The object has the following attributes (besides \code{"class"} equal
to \code{"dist"}):
\item{Size}{integer, the number of observations in the dataset.}
\item{Labels}{optionally, contains the labels, if any, of the
observations of the dataset.}
\item{Diag, Upper}{logicals corresponding to the arguments \code{diag}
and \code{upper} above, specifying how the object should be printed.}
\item{call}{optionally, the \code{\link{call}} used to create the
object.}
\item{methods}{optionally, the distance method used; resulting form
\code{\link{dist}()}, the (\code{\link{match.arg}()}ed) \code{method}
argument.}
}
\references{
Mardia, K. V., Kent, J. T. and Bibby, J. M. (1979)
\emph{Multivariate Analysis.} London: Academic Press.
Wikipedia
\url{https://en.wikipedia.org/wiki/Kendall_tau_distance}
}
\note{Multi-thread (parallelisation) is disable on Windows.}
\seealso{
\code{\link[cluster]{daisy}} in the \file{cluster} package with more
possibilities in the case of \emph{mixed} (contiuous / categorical)
variables.
\code{\link[stats]{dist}} \code{\link{hcluster}}.
}
\examples{
x <- matrix(rnorm(100), nrow=5)
Dist(x)
Dist(x, diag = TRUE)
Dist(x, upper = TRUE)
## compute dist with 8 threads
Dist(x,nbproc=8)
Dist(x,method="abscorrelation")
Dist(x,method="kendall")
}
\keyword{multivariate}
\keyword{cluster}
|
library(dplyr)
rootDir <- "./"
dataDir <- paste0(rootDir, "data/")
gwasData <- "SYounkin_MayoGWAS_09-05-08"
gwasDir <- paste0(dataDir, "gwas_results/", gwasData, ".b37/")
resultsDir <- paste0(gwasDir, "impute_intervals/")
if (!file.exists(resultsDir)) {
dir.create(resultsDir)
}
chrInts <- data.frame()
for (chr in 1:22) {
chrBim <- read.table(paste0(gwasDir, gwasData, ".chr", chr, ".b37.map"))
nSNPs <- nrow(chrBim)
nInts <- ceiling(nSNPs / 200)
intLength <- nSNPs / nInts
ints <- findInterval(1:nSNPs, seq(1, nSNPs, intLength))
chrBim <- chrBim %>%
mutate(chr = V1,
interval = factor(ints)) %>%
group_by(chr, interval) %>%
summarise(numSNPs = length(V1),
start = min(V4),
end = max(V4)) %>%
mutate(interval = as.numeric(interval))
write.table(chrBim, paste0(gwasDir, "impute_intervals/chr", chr, ".ints"))
numInts <- nrow(chrBim)
chrInts <- rbind(chrInts, data.frame(chr = chr, numInts = numInts))
}
write.table(chrInts, paste0(gwasDir, "impute_intervals/num_ints.txt"))
| /genomeImputing/R/impute_ranges.R | no_license | jaeddy/ampSynapseProjects | R | false | false | 1,143 | r | library(dplyr)
rootDir <- "./"
dataDir <- paste0(rootDir, "data/")
gwasData <- "SYounkin_MayoGWAS_09-05-08"
gwasDir <- paste0(dataDir, "gwas_results/", gwasData, ".b37/")
resultsDir <- paste0(gwasDir, "impute_intervals/")
if (!file.exists(resultsDir)) {
dir.create(resultsDir)
}
chrInts <- data.frame()
for (chr in 1:22) {
chrBim <- read.table(paste0(gwasDir, gwasData, ".chr", chr, ".b37.map"))
nSNPs <- nrow(chrBim)
nInts <- ceiling(nSNPs / 200)
intLength <- nSNPs / nInts
ints <- findInterval(1:nSNPs, seq(1, nSNPs, intLength))
chrBim <- chrBim %>%
mutate(chr = V1,
interval = factor(ints)) %>%
group_by(chr, interval) %>%
summarise(numSNPs = length(V1),
start = min(V4),
end = max(V4)) %>%
mutate(interval = as.numeric(interval))
write.table(chrBim, paste0(gwasDir, "impute_intervals/chr", chr, ".ints"))
numInts <- nrow(chrBim)
chrInts <- rbind(chrInts, data.frame(chr = chr, numInts = numInts))
}
write.table(chrInts, paste0(gwasDir, "impute_intervals/num_ints.txt"))
|
#----------------------------------------------------------------------
# Load Euler-Maruyama and Splitting
#----------------------------------------------------------------------
#Rcpp
library(Rcpp)
library(RcppNumerical)
library(devtools)
find_rtools(T)
sourceCpp(file="Hawkes_EM_Cpp.cpp")
sourceCpp(file="Hawkes_Splitting_Cpp.cpp")
source(file="Hawkes_Matrices.R")
#--------------------------------------
T<-10^5
c1<--1
c2<-1
ny1<-1
ny2<-1
N<-100
N1<-N/2
N2<-N/2
p1<-N1/N
p2<-N2/N
eta1<-3 #3
eta2<-2 #2
kappa<-eta1+eta2+2
#for density
start_P1<--8
end_P1<-3
start_P2<--1
end_P2<-6
num<-10000
#plot-range
xl<--5
xr<-4
#--------------------------------------
pdf(width=12,height=3.3,"Fig_density_EM_SP_Sub1.pdf")
par(mfrow=c(1,3))
par(mar=c(0.3,1, 1,3), oma=c(4,1.5,2,0), mai = c(0.1, 0.35, 0.1, 0.1))
#Euler-Maruyama: h1
set.seed(1)
h<-10^-2
grid<-seq(0,T,h)
startv1<-rep(0,kappa)
sol<-Hawkes_EM_Cpp_(grid,h,startv1,c1,c2,ny1,ny2,p1,p2,N,eta1,eta2)
plot(density(sol[1,],from=start_P1,to=end_P1,n=num)$x,density(sol[1,],from=start_P1,to=end_P1,n=num)$y,type="l",col="black",xlim=c(xl,xr),ylim=c(0,1),xlab="",ylab="",xaxt="n")
lines(density(sol[2,],from=start_P1,to=end_P1,n=num),type="l",col="grey",xlim=c(xl,xr),ylim=c(0,1))
lines(density(sol[3,],from=start_P1,to=end_P1,n=num),type="l",col="grey",xlim=c(xl,xr),ylim=c(0,1))
lines(density(sol[4,],from=start_P1,to=end_P1,n=num),type="l",col="grey",xlim=c(xl,xr),ylim=c(0,1))
lines(density(sol[5,],from=start_P2,to=end_P2,n=num),type="l",col="black",xlim=c(xl,xr),ylim=c(0,1))
lines(density(sol[6,],from=start_P2,to=end_P2,n=num),type="l",col="grey",xlim=c(xl,xr),ylim=c(0,1))
lines(density(sol[7,],from=start_P2,to=end_P2,n=num),type="l",col="grey",xlim=c(xl,xr),ylim=c(0,1))
abline(h=0,col="grey",lwd=1)
mtext(expression(paste(pi[X],"(",x,")")), line = 2, side = 2, outer = F,cex=1.3)
legend("topright", legend=c("EM",expression(Delta==0.01)),
col=c("red","green"), cex=1.7,bg = "white",lty=NULL,bty="n")
#Euler-Maruyama: h2
set.seed(1)
h<-5*10^-1
grid<-seq(0,T,h)
startv1<-rep(0,kappa)
sol<-Hawkes_EM_Cpp_(grid,h,startv1,c1,c2,ny1,ny2,p1,p2,N,eta1,eta2)
plot(density(sol[1,],from=start_P1,to=end_P1,n=num)$x,density(sol[1,],from=start_P1,to=end_P1,n=num)$y,type="l",col="black",xlim=c(xl,xr),ylim=c(0,1),xlab="",ylab="",xaxt="n",yaxt="n")
lines(density(sol[2,],from=start_P1,to=end_P1,n=num),type="l",col="grey",xlim=c(xl,xr),ylim=c(0,1))
lines(density(sol[3,],from=start_P1,to=end_P1,n=num),type="l",col="grey",xlim=c(xl,xr),ylim=c(0,1))
lines(density(sol[4,],from=start_P1,to=end_P1,n=num),type="l",col="grey",xlim=c(xl,xr),ylim=c(0,1))
lines(density(sol[5,],from=start_P2,to=end_P2,n=num),type="l",col="black",xlim=c(xl,xr),ylim=c(0,1))
lines(density(sol[6,],from=start_P2,to=end_P2,n=num),type="l",col="grey",xlim=c(xl,xr),ylim=c(0,1))
lines(density(sol[7,],from=start_P2,to=end_P2,n=num),type="l",col="grey",xlim=c(xl,xr),ylim=c(0,1))
abline(h=0,col="grey",lwd=1)
legend("topright", legend=c("EM",expression(Delta==0.5)),
col=c("red","green"), cex=1.7,bg = "white",lty=NULL,bty="n")
#Euler-Maruyama: h3
set.seed(1)
h<-7*10^-1
grid<-seq(0,T,h)
startv1<-rep(0,kappa)
sol<-Hawkes_EM_Cpp_(grid,h,startv1,c1,c2,ny1,ny2,p1,p2,N,eta1,eta2)
plot(density(sol[1,],from=start_P1,to=end_P1,n=num)$x,density(sol[1,],from=start_P1,to=end_P1,n=num)$y,type="l",col="black",xlim=c(xl,xr),ylim=c(0,1),xlab="",ylab="",xaxt="n",yaxt="n")
lines(density(sol[2,],from=start_P1,to=end_P1,n=num),type="l",col="grey",xlim=c(xl,xr),ylim=c(0,1))
lines(density(sol[3,],from=start_P1,to=end_P1,n=num),type="l",col="grey",xlim=c(xl,xr),ylim=c(0,1))
lines(density(sol[4,],from=start_P1,to=end_P1,n=num),type="l",col="grey",xlim=c(xl,xr),ylim=c(0,1))
lines(density(sol[5,],from=start_P2,to=end_P2,n=num),type="l",col="black",xlim=c(xl,xr),ylim=c(0,1))
lines(density(sol[6,],from=start_P2,to=end_P2,n=num),type="l",col="grey",xlim=c(xl,xr),ylim=c(0,1))
lines(density(sol[7,],from=start_P2,to=end_P2,n=num),type="l",col="grey",xlim=c(xl,xr),ylim=c(0,1))
abline(h=0,col="grey",lwd=1)
legend("topright", legend=c("EM",expression(Delta==0.7)),
col=c("red","green"), cex=1.7,bg = "white",lty=NULL,bty="n")
dev.off()
#--------------------------
pdf(width=12,height=3.3,"Fig_density_EM_SP_Sub2.pdf")
par(mfrow=c(1,3))
par(mar=c(0.3,1, 1,3), oma=c(4,1.5,2,0), mai = c(0.1, 0.35, 0.1, 0.1))
#Splitting LT: h1
set.seed(1)
h<-10^-2
grid<-seq(0,T,h)
expmatA<-expmatA_SP(h,ny1,ny2,eta1,eta2)
startv1<-rep(0,kappa)
sol<-Hawkes_SP_LT_Cpp_(grid,h,startv1,c1,c2,ny1,ny2,p1,p2,N,expmatA,eta1,eta2)
plot(density(sol[1,],from=start_P1,to=end_P1,n=num)$x,density(sol[1,],from=start_P1,to=end_P1,n=num)$y,type="l",col="black",xlim=c(xl,xr),ylim=c(0,1),xlab="",ylab="",xaxt="n")
lines(density(sol[2,],from=start_P1,to=end_P1,n=num),type="l",col="grey",xlim=c(xl,xr),ylim=c(0,1))
lines(density(sol[3,],from=start_P1,to=end_P1,n=num),type="l",col="grey",xlim=c(xl,xr),ylim=c(0,1))
lines(density(sol[4,],from=start_P1,to=end_P1,n=num),type="l",col="grey",xlim=c(xl,xr),ylim=c(0,1))
lines(density(sol[5,],from=start_P2,to=end_P2,n=num),type="l",col="black",xlim=c(xl,xr),ylim=c(0,1))
lines(density(sol[6,],from=start_P2,to=end_P2,n=num),type="l",col="grey",xlim=c(xl,xr),ylim=c(0,1))
lines(density(sol[7,],from=start_P2,to=end_P2,n=num),type="l",col="grey",xlim=c(xl,xr),ylim=c(0,1))
abline(h=0,col="grey",lwd=1)
mtext(expression(paste(pi[X],"(",x,")")), line = 2, side = 2, outer = F,cex=1.3)
legend("topright", legend=c("LT",expression(Delta==0.01)),
col=c("red","green"), cex=1.7,bg = "white",lty=NULL,bty="n")
#Splitting LT: h2
set.seed(1)
h<-5*10^-1
grid<-seq(0,T,h)
expmatA<-expmatA_SP(h,ny1,ny2,eta1,eta2)
startv1<-rep(0,kappa)
sol<-Hawkes_SP_LT_Cpp_(grid,h,startv1,c1,c2,ny1,ny2,p1,p2,N,expmatA,eta1,eta2)
plot(density(sol[1,],from=start_P1,to=end_P1,n=num)$x,density(sol[1,],from=start_P1,to=end_P1,n=num)$y,type="l",col="black",xlim=c(xl,xr),ylim=c(0,1),xlab="",ylab="",xaxt="n",yaxt="n")
lines(density(sol[2,],from=start_P1,to=end_P1,n=num),type="l",col="grey",xlim=c(xl,xr),ylim=c(0,1))
lines(density(sol[3,],from=start_P1,to=end_P1,n=num),type="l",col="grey",xlim=c(xl,xr),ylim=c(0,1))
lines(density(sol[4,],from=start_P1,to=end_P1,n=num),type="l",col="grey",xlim=c(xl,xr),ylim=c(0,1))
lines(density(sol[5,],from=start_P2,to=end_P2,n=num),type="l",col="black",xlim=c(xl,xr),ylim=c(0,1))
lines(density(sol[6,],from=start_P2,to=end_P2,n=num),type="l",col="grey",xlim=c(xl,xr),ylim=c(0,1))
lines(density(sol[7,],from=start_P2,to=end_P2,n=num),type="l",col="grey",xlim=c(xl,xr),ylim=c(0,1))
abline(h=0,col="grey",lwd=1)
legend("topright", legend=c("LT",expression(Delta==0.5)),
col=c("red","green"), cex=1.7,bg = "white",lty=NULL,bty="n")
#Splitting LT: h3
set.seed(1)
h<-7*10^-1
grid<-seq(0,T,h)
expmatA<-expmatA_SP(h,ny1,ny2,eta1,eta2)
startv1<-rep(0,kappa)
sol<-Hawkes_SP_LT_Cpp_(grid,h,startv1,c1,c2,ny1,ny2,p1,p2,N,expmatA,eta1,eta2)
plot(density(sol[1,],from=start_P1,to=end_P1,n=num)$x,density(sol[1,],from=start_P1,to=end_P1,n=num)$y,type="l",col="black",xlim=c(xl,xr),ylim=c(0,1),xlab="y",ylab="",xaxt="n",yaxt="n")
lines(density(sol[2,],from=start_P1,to=end_P1,n=num),type="l",col="grey",xlim=c(xl,xr),ylim=c(0,1))
lines(density(sol[3,],from=start_P1,to=end_P1,n=num),type="l",col="grey",xlim=c(xl,xr),ylim=c(0,1))
lines(density(sol[4,],from=start_P1,to=end_P1,n=num),type="l",col="grey",xlim=c(xl,xr),ylim=c(0,1))
lines(density(sol[5,],from=start_P2,to=end_P2,n=num),type="l",col="black",xlim=c(xl,xr),ylim=c(0,1))
lines(density(sol[6,],from=start_P2,to=end_P2,n=num),type="l",col="grey",xlim=c(xl,xr),ylim=c(0,1))
lines(density(sol[7,],from=start_P2,to=end_P2,n=num),type="l",col="grey",xlim=c(xl,xr),ylim=c(0,1))
abline(h=0,col="grey",lwd=1)
legend("topright", legend=c("LT",expression(Delta==0.7)),
col=c("red","green"), cex=1.7,bg = "white",lty=NULL,bty="n")
dev.off()
#--------------------------
pdf(width=12,height=3.3,"Fig_density_EM_SP_Sub3.pdf")
par(mfrow=c(1,3))
par(mar=c(0.3,1, 1,3), oma=c(4,1.5,2,0), mai = c(0.1, 0.35, 0.1, 0.1))
#Splitting ST: h1
set.seed(1)
h<-10^-2
grid<-seq(0,T,h)
expmatA<-expmatA_SP(h/2,ny1,ny2,eta1,eta2)
startv1<-rep(0,kappa)
sol<-Hawkes_SP_ST_Cpp_(grid,h,startv1,c1,c2,ny1,ny2,p1,p2,N,expmatA,eta1,eta2)
plot(density(sol[1,],from=start_P1,to=end_P1,n=num)$x,density(sol[1,],from=start_P1,to=end_P1,n=num)$y,type="l",col="black",xlim=c(xl,xr),ylim=c(0,1),xlab="",ylab="")
lines(density(sol[2,],from=start_P1,to=end_P1,n=num),type="l",col="grey",xlim=c(xl,xr),ylim=c(0,1))
lines(density(sol[3,],from=start_P1,to=end_P1,n=num),type="l",col="grey",xlim=c(xl,xr),ylim=c(0,1))
lines(density(sol[4,],from=start_P1,to=end_P1,n=num),type="l",col="grey",xlim=c(xl,xr),ylim=c(0,1))
lines(density(sol[5,],from=start_P2,to=end_P2,n=num),type="l",col="black",xlim=c(xl,xr),ylim=c(0,1))
lines(density(sol[6,],from=start_P2,to=end_P2,n=num),type="l",col="black",xlim=c(xl,xr),ylim=c(0,1))
lines(density(sol[7,],from=start_P2,to=end_P2,n=num),type="l",col="grey",xlim=c(xl,xr),ylim=c(0,1))
abline(h=0,col="grey",lwd=1)
mtext(expression("x"), line = 3, side = 1, outer = F,cex=1.3)
mtext(expression(paste(pi[X],"(",x,")")), line = 2, side = 2, outer = F,cex=1.3)
legend("topright", legend=c("ST",expression(Delta==0.01)),
col=c("red","green"), cex=1.7,bg = "white",lty=NULL,bty="n")
#Splitting ST: h2
set.seed(1)
h<-5*10^-1
grid<-seq(0,T,h)
expmatA<-expmatA_SP(h/2,ny1,ny2,eta1,eta2)
startv1<-rep(0,kappa)
sol<-Hawkes_SP_ST_Cpp_(grid,h,startv1,c1,c2,ny1,ny2,p1,p2,N,expmatA,eta1,eta2)
plot(density(sol[1,],from=start_P1,to=end_P1,n=num)$x,density(sol[1,],from=start_P1,to=end_P1,n=num)$y,type="l",col="black",xlim=c(xl,xr),ylim=c(0,1),xlab="",ylab="",yaxt="n")
lines(density(sol[2,],from=start_P1,to=end_P1,n=num),type="l",col="grey",xlim=c(xl,xr),ylim=c(0,1))
lines(density(sol[3,],from=start_P1,to=end_P1,n=num),type="l",col="grey",xlim=c(xl,xr),ylim=c(0,1))
lines(density(sol[4,],from=start_P1,to=end_P1,n=num),type="l",col="grey",xlim=c(xl,xr),ylim=c(0,1))
lines(density(sol[5,],from=start_P2,to=end_P2,n=num),type="l",col="black",xlim=c(xl,xr),ylim=c(0,1))
lines(density(sol[6,],from=start_P2,to=end_P2,n=num),type="l",col="grey",xlim=c(xl,xr),ylim=c(0,1))
lines(density(sol[7,],from=start_P2,to=end_P2,n=num),type="l",col="grey",xlim=c(xl,xr),ylim=c(0,1))
abline(h=0,col="grey",lwd=1)
mtext(expression("x"), line = 3, side = 1, outer = F,cex=1.3)
legend("topright", legend=c("ST",expression(Delta==0.5)),
col=c("red","green"), cex=1.7,bg = "white",lty=NULL,bty="n")
#Splitting ST: h3
set.seed(1)
h<-7*10^-1
grid<-seq(0,T,h)
expmatA<-expmatA_SP(h/2,ny1,ny2,eta1,eta2)
startv1<-rep(0,kappa)
sol<-Hawkes_SP_ST_Cpp_(grid,h,startv1,c1,c2,ny1,ny2,p1,p2,N,expmatA,eta1,eta2)
plot(density(sol[1,],from=start_P1,to=end_P1,n=num)$x,density(sol[1,],from=start_P1,to=end_P1,n=num)$y,type="l",col="black",xlim=c(xl,xr),ylim=c(0,1),xlab="",ylab="",yaxt="n")
lines(density(sol[2,],from=start_P1,to=end_P1,n=num),type="l",col="grey",xlim=c(xl,xr),ylim=c(0,1))
lines(density(sol[3,],from=start_P1,to=end_P1,n=num),type="l",col="grey",xlim=c(xl,xr),ylim=c(0,1))
lines(density(sol[4,],from=start_P1,to=end_P1,n=num),type="l",col="grey",xlim=c(xl,xr),ylim=c(0,1))
lines(density(sol[5,],from=start_P2,to=end_P2,n=num),type="l",col="black",xlim=c(xl,xr),ylim=c(0,1))
lines(density(sol[6,],from=start_P2,to=end_P2,n=num),type="l",col="grey",xlim=c(xl,xr),ylim=c(0,1))
lines(density(sol[7,],from=start_P2,to=end_P2,n=num),type="l",col="grey",xlim=c(xl,xr),ylim=c(0,1))
abline(h=0,col="grey",lwd=1)
mtext(expression("x"), line = 3, side = 1, outer = F,cex=1.3)
legend("topright", legend=c("ST",expression(Delta==0.7)),
col=c("red","green"), cex=1.7,bg = "white",lty=NULL,bty="n")
dev.off()
| /Hawkes_Erlang_Rcpp_code/test_densities.R | no_license | melnyashka/Hawkes_process_with_Erlang_kernels | R | false | false | 11,935 | r |
#----------------------------------------------------------------------
# Load Euler-Maruyama and Splitting
#----------------------------------------------------------------------
#Rcpp
library(Rcpp)
library(RcppNumerical)
library(devtools)
find_rtools(T)
sourceCpp(file="Hawkes_EM_Cpp.cpp")
sourceCpp(file="Hawkes_Splitting_Cpp.cpp")
source(file="Hawkes_Matrices.R")
#--------------------------------------
T<-10^5
c1<--1
c2<-1
ny1<-1
ny2<-1
N<-100
N1<-N/2
N2<-N/2
p1<-N1/N
p2<-N2/N
eta1<-3 #3
eta2<-2 #2
kappa<-eta1+eta2+2
#for density
start_P1<--8
end_P1<-3
start_P2<--1
end_P2<-6
num<-10000
#plot-range
xl<--5
xr<-4
#--------------------------------------
pdf(width=12,height=3.3,"Fig_density_EM_SP_Sub1.pdf")
par(mfrow=c(1,3))
par(mar=c(0.3,1, 1,3), oma=c(4,1.5,2,0), mai = c(0.1, 0.35, 0.1, 0.1))
#Euler-Maruyama: h1
set.seed(1)
h<-10^-2
grid<-seq(0,T,h)
startv1<-rep(0,kappa)
sol<-Hawkes_EM_Cpp_(grid,h,startv1,c1,c2,ny1,ny2,p1,p2,N,eta1,eta2)
plot(density(sol[1,],from=start_P1,to=end_P1,n=num)$x,density(sol[1,],from=start_P1,to=end_P1,n=num)$y,type="l",col="black",xlim=c(xl,xr),ylim=c(0,1),xlab="",ylab="",xaxt="n")
lines(density(sol[2,],from=start_P1,to=end_P1,n=num),type="l",col="grey",xlim=c(xl,xr),ylim=c(0,1))
lines(density(sol[3,],from=start_P1,to=end_P1,n=num),type="l",col="grey",xlim=c(xl,xr),ylim=c(0,1))
lines(density(sol[4,],from=start_P1,to=end_P1,n=num),type="l",col="grey",xlim=c(xl,xr),ylim=c(0,1))
lines(density(sol[5,],from=start_P2,to=end_P2,n=num),type="l",col="black",xlim=c(xl,xr),ylim=c(0,1))
lines(density(sol[6,],from=start_P2,to=end_P2,n=num),type="l",col="grey",xlim=c(xl,xr),ylim=c(0,1))
lines(density(sol[7,],from=start_P2,to=end_P2,n=num),type="l",col="grey",xlim=c(xl,xr),ylim=c(0,1))
abline(h=0,col="grey",lwd=1)
mtext(expression(paste(pi[X],"(",x,")")), line = 2, side = 2, outer = F,cex=1.3)
legend("topright", legend=c("EM",expression(Delta==0.01)),
col=c("red","green"), cex=1.7,bg = "white",lty=NULL,bty="n")
#Euler-Maruyama: h2
set.seed(1)
h<-5*10^-1
grid<-seq(0,T,h)
startv1<-rep(0,kappa)
sol<-Hawkes_EM_Cpp_(grid,h,startv1,c1,c2,ny1,ny2,p1,p2,N,eta1,eta2)
plot(density(sol[1,],from=start_P1,to=end_P1,n=num)$x,density(sol[1,],from=start_P1,to=end_P1,n=num)$y,type="l",col="black",xlim=c(xl,xr),ylim=c(0,1),xlab="",ylab="",xaxt="n",yaxt="n")
lines(density(sol[2,],from=start_P1,to=end_P1,n=num),type="l",col="grey",xlim=c(xl,xr),ylim=c(0,1))
lines(density(sol[3,],from=start_P1,to=end_P1,n=num),type="l",col="grey",xlim=c(xl,xr),ylim=c(0,1))
lines(density(sol[4,],from=start_P1,to=end_P1,n=num),type="l",col="grey",xlim=c(xl,xr),ylim=c(0,1))
lines(density(sol[5,],from=start_P2,to=end_P2,n=num),type="l",col="black",xlim=c(xl,xr),ylim=c(0,1))
lines(density(sol[6,],from=start_P2,to=end_P2,n=num),type="l",col="grey",xlim=c(xl,xr),ylim=c(0,1))
lines(density(sol[7,],from=start_P2,to=end_P2,n=num),type="l",col="grey",xlim=c(xl,xr),ylim=c(0,1))
abline(h=0,col="grey",lwd=1)
legend("topright", legend=c("EM",expression(Delta==0.5)),
col=c("red","green"), cex=1.7,bg = "white",lty=NULL,bty="n")
#Euler-Maruyama: h3
set.seed(1)
h<-7*10^-1
grid<-seq(0,T,h)
startv1<-rep(0,kappa)
sol<-Hawkes_EM_Cpp_(grid,h,startv1,c1,c2,ny1,ny2,p1,p2,N,eta1,eta2)
plot(density(sol[1,],from=start_P1,to=end_P1,n=num)$x,density(sol[1,],from=start_P1,to=end_P1,n=num)$y,type="l",col="black",xlim=c(xl,xr),ylim=c(0,1),xlab="",ylab="",xaxt="n",yaxt="n")
lines(density(sol[2,],from=start_P1,to=end_P1,n=num),type="l",col="grey",xlim=c(xl,xr),ylim=c(0,1))
lines(density(sol[3,],from=start_P1,to=end_P1,n=num),type="l",col="grey",xlim=c(xl,xr),ylim=c(0,1))
lines(density(sol[4,],from=start_P1,to=end_P1,n=num),type="l",col="grey",xlim=c(xl,xr),ylim=c(0,1))
lines(density(sol[5,],from=start_P2,to=end_P2,n=num),type="l",col="black",xlim=c(xl,xr),ylim=c(0,1))
lines(density(sol[6,],from=start_P2,to=end_P2,n=num),type="l",col="grey",xlim=c(xl,xr),ylim=c(0,1))
lines(density(sol[7,],from=start_P2,to=end_P2,n=num),type="l",col="grey",xlim=c(xl,xr),ylim=c(0,1))
abline(h=0,col="grey",lwd=1)
legend("topright", legend=c("EM",expression(Delta==0.7)),
col=c("red","green"), cex=1.7,bg = "white",lty=NULL,bty="n")
dev.off()
#--------------------------
pdf(width=12,height=3.3,"Fig_density_EM_SP_Sub2.pdf")
par(mfrow=c(1,3))
par(mar=c(0.3,1, 1,3), oma=c(4,1.5,2,0), mai = c(0.1, 0.35, 0.1, 0.1))
#Splitting LT: h1
set.seed(1)
h<-10^-2
grid<-seq(0,T,h)
expmatA<-expmatA_SP(h,ny1,ny2,eta1,eta2)
startv1<-rep(0,kappa)
sol<-Hawkes_SP_LT_Cpp_(grid,h,startv1,c1,c2,ny1,ny2,p1,p2,N,expmatA,eta1,eta2)
plot(density(sol[1,],from=start_P1,to=end_P1,n=num)$x,density(sol[1,],from=start_P1,to=end_P1,n=num)$y,type="l",col="black",xlim=c(xl,xr),ylim=c(0,1),xlab="",ylab="",xaxt="n")
lines(density(sol[2,],from=start_P1,to=end_P1,n=num),type="l",col="grey",xlim=c(xl,xr),ylim=c(0,1))
lines(density(sol[3,],from=start_P1,to=end_P1,n=num),type="l",col="grey",xlim=c(xl,xr),ylim=c(0,1))
lines(density(sol[4,],from=start_P1,to=end_P1,n=num),type="l",col="grey",xlim=c(xl,xr),ylim=c(0,1))
lines(density(sol[5,],from=start_P2,to=end_P2,n=num),type="l",col="black",xlim=c(xl,xr),ylim=c(0,1))
lines(density(sol[6,],from=start_P2,to=end_P2,n=num),type="l",col="grey",xlim=c(xl,xr),ylim=c(0,1))
lines(density(sol[7,],from=start_P2,to=end_P2,n=num),type="l",col="grey",xlim=c(xl,xr),ylim=c(0,1))
abline(h=0,col="grey",lwd=1)
mtext(expression(paste(pi[X],"(",x,")")), line = 2, side = 2, outer = F,cex=1.3)
legend("topright", legend=c("LT",expression(Delta==0.01)),
col=c("red","green"), cex=1.7,bg = "white",lty=NULL,bty="n")
#Splitting LT: h2
set.seed(1)
h<-5*10^-1
grid<-seq(0,T,h)
expmatA<-expmatA_SP(h,ny1,ny2,eta1,eta2)
startv1<-rep(0,kappa)
sol<-Hawkes_SP_LT_Cpp_(grid,h,startv1,c1,c2,ny1,ny2,p1,p2,N,expmatA,eta1,eta2)
plot(density(sol[1,],from=start_P1,to=end_P1,n=num)$x,density(sol[1,],from=start_P1,to=end_P1,n=num)$y,type="l",col="black",xlim=c(xl,xr),ylim=c(0,1),xlab="",ylab="",xaxt="n",yaxt="n")
lines(density(sol[2,],from=start_P1,to=end_P1,n=num),type="l",col="grey",xlim=c(xl,xr),ylim=c(0,1))
lines(density(sol[3,],from=start_P1,to=end_P1,n=num),type="l",col="grey",xlim=c(xl,xr),ylim=c(0,1))
lines(density(sol[4,],from=start_P1,to=end_P1,n=num),type="l",col="grey",xlim=c(xl,xr),ylim=c(0,1))
lines(density(sol[5,],from=start_P2,to=end_P2,n=num),type="l",col="black",xlim=c(xl,xr),ylim=c(0,1))
lines(density(sol[6,],from=start_P2,to=end_P2,n=num),type="l",col="grey",xlim=c(xl,xr),ylim=c(0,1))
lines(density(sol[7,],from=start_P2,to=end_P2,n=num),type="l",col="grey",xlim=c(xl,xr),ylim=c(0,1))
abline(h=0,col="grey",lwd=1)
legend("topright", legend=c("LT",expression(Delta==0.5)),
col=c("red","green"), cex=1.7,bg = "white",lty=NULL,bty="n")
#Splitting LT: h3
set.seed(1)
h<-7*10^-1
grid<-seq(0,T,h)
expmatA<-expmatA_SP(h,ny1,ny2,eta1,eta2)
startv1<-rep(0,kappa)
sol<-Hawkes_SP_LT_Cpp_(grid,h,startv1,c1,c2,ny1,ny2,p1,p2,N,expmatA,eta1,eta2)
plot(density(sol[1,],from=start_P1,to=end_P1,n=num)$x,density(sol[1,],from=start_P1,to=end_P1,n=num)$y,type="l",col="black",xlim=c(xl,xr),ylim=c(0,1),xlab="y",ylab="",xaxt="n",yaxt="n")
lines(density(sol[2,],from=start_P1,to=end_P1,n=num),type="l",col="grey",xlim=c(xl,xr),ylim=c(0,1))
lines(density(sol[3,],from=start_P1,to=end_P1,n=num),type="l",col="grey",xlim=c(xl,xr),ylim=c(0,1))
lines(density(sol[4,],from=start_P1,to=end_P1,n=num),type="l",col="grey",xlim=c(xl,xr),ylim=c(0,1))
lines(density(sol[5,],from=start_P2,to=end_P2,n=num),type="l",col="black",xlim=c(xl,xr),ylim=c(0,1))
lines(density(sol[6,],from=start_P2,to=end_P2,n=num),type="l",col="grey",xlim=c(xl,xr),ylim=c(0,1))
lines(density(sol[7,],from=start_P2,to=end_P2,n=num),type="l",col="grey",xlim=c(xl,xr),ylim=c(0,1))
abline(h=0,col="grey",lwd=1)
legend("topright", legend=c("LT",expression(Delta==0.7)),
col=c("red","green"), cex=1.7,bg = "white",lty=NULL,bty="n")
dev.off()
#--------------------------
pdf(width=12,height=3.3,"Fig_density_EM_SP_Sub3.pdf")
par(mfrow=c(1,3))
par(mar=c(0.3,1, 1,3), oma=c(4,1.5,2,0), mai = c(0.1, 0.35, 0.1, 0.1))
#Splitting ST: h1
set.seed(1)
h<-10^-2
grid<-seq(0,T,h)
expmatA<-expmatA_SP(h/2,ny1,ny2,eta1,eta2)
startv1<-rep(0,kappa)
sol<-Hawkes_SP_ST_Cpp_(grid,h,startv1,c1,c2,ny1,ny2,p1,p2,N,expmatA,eta1,eta2)
plot(density(sol[1,],from=start_P1,to=end_P1,n=num)$x,density(sol[1,],from=start_P1,to=end_P1,n=num)$y,type="l",col="black",xlim=c(xl,xr),ylim=c(0,1),xlab="",ylab="")
lines(density(sol[2,],from=start_P1,to=end_P1,n=num),type="l",col="grey",xlim=c(xl,xr),ylim=c(0,1))
lines(density(sol[3,],from=start_P1,to=end_P1,n=num),type="l",col="grey",xlim=c(xl,xr),ylim=c(0,1))
lines(density(sol[4,],from=start_P1,to=end_P1,n=num),type="l",col="grey",xlim=c(xl,xr),ylim=c(0,1))
lines(density(sol[5,],from=start_P2,to=end_P2,n=num),type="l",col="black",xlim=c(xl,xr),ylim=c(0,1))
lines(density(sol[6,],from=start_P2,to=end_P2,n=num),type="l",col="black",xlim=c(xl,xr),ylim=c(0,1))
lines(density(sol[7,],from=start_P2,to=end_P2,n=num),type="l",col="grey",xlim=c(xl,xr),ylim=c(0,1))
abline(h=0,col="grey",lwd=1)
mtext(expression("x"), line = 3, side = 1, outer = F,cex=1.3)
mtext(expression(paste(pi[X],"(",x,")")), line = 2, side = 2, outer = F,cex=1.3)
legend("topright", legend=c("ST",expression(Delta==0.01)),
col=c("red","green"), cex=1.7,bg = "white",lty=NULL,bty="n")
#Splitting ST: h2
set.seed(1)
h<-5*10^-1
grid<-seq(0,T,h)
expmatA<-expmatA_SP(h/2,ny1,ny2,eta1,eta2)
startv1<-rep(0,kappa)
sol<-Hawkes_SP_ST_Cpp_(grid,h,startv1,c1,c2,ny1,ny2,p1,p2,N,expmatA,eta1,eta2)
plot(density(sol[1,],from=start_P1,to=end_P1,n=num)$x,density(sol[1,],from=start_P1,to=end_P1,n=num)$y,type="l",col="black",xlim=c(xl,xr),ylim=c(0,1),xlab="",ylab="",yaxt="n")
lines(density(sol[2,],from=start_P1,to=end_P1,n=num),type="l",col="grey",xlim=c(xl,xr),ylim=c(0,1))
lines(density(sol[3,],from=start_P1,to=end_P1,n=num),type="l",col="grey",xlim=c(xl,xr),ylim=c(0,1))
lines(density(sol[4,],from=start_P1,to=end_P1,n=num),type="l",col="grey",xlim=c(xl,xr),ylim=c(0,1))
lines(density(sol[5,],from=start_P2,to=end_P2,n=num),type="l",col="black",xlim=c(xl,xr),ylim=c(0,1))
lines(density(sol[6,],from=start_P2,to=end_P2,n=num),type="l",col="grey",xlim=c(xl,xr),ylim=c(0,1))
lines(density(sol[7,],from=start_P2,to=end_P2,n=num),type="l",col="grey",xlim=c(xl,xr),ylim=c(0,1))
abline(h=0,col="grey",lwd=1)
mtext(expression("x"), line = 3, side = 1, outer = F,cex=1.3)
legend("topright", legend=c("ST",expression(Delta==0.5)),
col=c("red","green"), cex=1.7,bg = "white",lty=NULL,bty="n")
#Splitting ST: h3
set.seed(1)
h<-7*10^-1
grid<-seq(0,T,h)
expmatA<-expmatA_SP(h/2,ny1,ny2,eta1,eta2)
startv1<-rep(0,kappa)
sol<-Hawkes_SP_ST_Cpp_(grid,h,startv1,c1,c2,ny1,ny2,p1,p2,N,expmatA,eta1,eta2)
plot(density(sol[1,],from=start_P1,to=end_P1,n=num)$x,density(sol[1,],from=start_P1,to=end_P1,n=num)$y,type="l",col="black",xlim=c(xl,xr),ylim=c(0,1),xlab="",ylab="",yaxt="n")
lines(density(sol[2,],from=start_P1,to=end_P1,n=num),type="l",col="grey",xlim=c(xl,xr),ylim=c(0,1))
lines(density(sol[3,],from=start_P1,to=end_P1,n=num),type="l",col="grey",xlim=c(xl,xr),ylim=c(0,1))
lines(density(sol[4,],from=start_P1,to=end_P1,n=num),type="l",col="grey",xlim=c(xl,xr),ylim=c(0,1))
lines(density(sol[5,],from=start_P2,to=end_P2,n=num),type="l",col="black",xlim=c(xl,xr),ylim=c(0,1))
lines(density(sol[6,],from=start_P2,to=end_P2,n=num),type="l",col="grey",xlim=c(xl,xr),ylim=c(0,1))
lines(density(sol[7,],from=start_P2,to=end_P2,n=num),type="l",col="grey",xlim=c(xl,xr),ylim=c(0,1))
abline(h=0,col="grey",lwd=1)
mtext(expression("x"), line = 3, side = 1, outer = F,cex=1.3)
legend("topright", legend=c("ST",expression(Delta==0.7)),
col=c("red","green"), cex=1.7,bg = "white",lty=NULL,bty="n")
dev.off()
|
movie <- read.delim("movie.txt")
setwd("/Users/nictsau/Desktop")
View(movie)
library(tidyverse)
library(MASS)
install.packages("MASS")
install.packages("klaR")
library(klaR)
# Set a seed value
set.seed(123)
# Split the data 50/50
training_sample <- sample(c(TRUE, FALSE), nrow(movie), replace = T, prob = c(0.5,0.5))
# Define the Train Data
train <- movie[training_sample, ]
# Define the Testing Data
test <- movie[!training_sample, ]
# Create an initial LDA model (m1) based upon the data
# Results: Comedy (47%), Drama (42%), Horror (10%)
# Compare mean differences in Runtime and Metascore first
# LD1 explains 99.1% of the variation
m1 <- lda(Genre ~ Runtime + Metascore + imdbRating, train); m1
# Plot LD1 & LD2
plot(m1, col=as.integer(train$Genre))
# Plot LD1 only
plot(m1, dimen = 1, type ="b")
# Compare model against train set
lda.train <- predict(m1)
train$lda <- lda.train$class
table <- table(train$lda, train$Genre)
accuracy <- sum(table[1],table[5]) / sum(table)
accuracy
(319+213)/(319+156+78+92+213+9)
# The total number of correctly predicted observations is the sum of diagonal (328 + 226 + 0)
# So this model fit the training data correctly for 63.97% of the case
# Verifying the training set doesn't prove accuracy,
# but a poor fit to the training data could be a sign that the model isn't a good one.
# Compare model against test set
lda.test <- predict(m1, test)
test$lda <- lda.test$class
table2 <- table(test$lda, test$Genre)
table2
accuracy2 <- sum(table2[1],table2[5]) / sum(table2)
accuracy2
| /9 - Discriminant Analysis/Tutorial9.R | no_license | PannaD8ta/Multivariate_Analysis | R | false | false | 1,537 | r | movie <- read.delim("movie.txt")
setwd("/Users/nictsau/Desktop")
View(movie)
library(tidyverse)
library(MASS)
install.packages("MASS")
install.packages("klaR")
library(klaR)
# Set a seed value
set.seed(123)
# Split the data 50/50
training_sample <- sample(c(TRUE, FALSE), nrow(movie), replace = T, prob = c(0.5,0.5))
# Define the Train Data
train <- movie[training_sample, ]
# Define the Testing Data
test <- movie[!training_sample, ]
# Create an initial LDA model (m1) based upon the data
# Results: Comedy (47%), Drama (42%), Horror (10%)
# Compare mean differences in Runtime and Metascore first
# LD1 explains 99.1% of the variation
m1 <- lda(Genre ~ Runtime + Metascore + imdbRating, train); m1
# Plot LD1 & LD2
plot(m1, col=as.integer(train$Genre))
# Plot LD1 only
plot(m1, dimen = 1, type ="b")
# Compare model against train set
lda.train <- predict(m1)
train$lda <- lda.train$class
table <- table(train$lda, train$Genre)
accuracy <- sum(table[1],table[5]) / sum(table)
accuracy
(319+213)/(319+156+78+92+213+9)
# The total number of correctly predicted observations is the sum of diagonal (328 + 226 + 0)
# So this model fit the training data correctly for 63.97% of the case
# Verifying the training set doesn't prove accuracy,
# but a poor fit to the training data could be a sign that the model isn't a good one.
# Compare model against test set
lda.test <- predict(m1, test)
test$lda <- lda.test$class
table2 <- table(test$lda, test$Genre)
table2
accuracy2 <- sum(table2[1],table2[5]) / sum(table2)
accuracy2
|
# Consensus -----------------------------------------------------------------------------------
A.norm.sub <- matsFPN1$A.norm.sub
A.norm.mean <- matsFPN1$A.norm.mean
atlas <- "pFPN"
g1GrFPN <- g1FPN <- fnames <- vector('list', length=length(groups))
for (i in seq_along(groups)) {
for (j in seq_along(thresh1)) {
print(paste0('Threshold ', j, '/', length(thresh1), '; group ', i, '; ',
format(Sys.time(), '%H:%M:%S')))
foreach (k=seq_along(inds[[i]])) %dopar% {
g.tmp <- graph_from_adjacency_matrix(A.norm.sub[[j]][, , inds[[i]][k]],
mode='undirected', diag = F, weighted = T)
V(g.tmp)$name <- as.character(pFPN$name)
g.tmp <- set_brainGraph_attr_sof(g.tmp, atlas, modality = 'fmri',
weighting = 'sld', threshold = thresh1[j],
subject = covars[groups[i], Study.ID[k]],
group = groups[i],
use.parallel = F,
A = A.norm.sub[[j]][, , inds[[i]][k]])
write_rds(g.tmp, paste0(savedir1,
sprintf('g%i_thr%02i_subj%03i%s', i, j, k, '.rds')))
}
}
# group mean weighted graphs
print(paste0('Group', i, '; ', format(Sys.time(), '%H:%M:%S')))
g1GrFPN[[i]] <- lapply(seq_along(thresh1), function(x)
graph_from_adjacency_matrix(A.norm.mean[[x]][[i]],
mode = 'undirected', diag = F, weighted = T))
for (x in seq_along(thresh1)) {V(g1GrFPN[[i]][[x]])$name <- as.character(pFPN$name)}
g1GrFPN[[i]] <- llply(seq_along(thresh1), function(x)
set_brainGraph_attr_sof(g1GrFPN[[i]][[x]], atlas, modality = 'fmri',
weighting = 'sld', threshold = thresh1[x],
group = groups[i], A = A.norm.mean[[x]][[i]],
use.parallel = F), .parallel = T)
}
for (i in seq_along(groups)) {
g1FPN[[i]] <- fnames[[i]] <- vector('list', length = length(thresh1))
for (j in seq_along(thresh1)) {
fnames[[i]][[j]] <- list.files(savedir,
sprintf('*g%i_thr%02i.*', i, j), full.names = T)
g1FPN[[i]][[j]] <- lapply(fnames[[i]][[j]], read_rds)
}
x <- all.equal(sapply(g1FPN[[i]][[1]], graph_attr, 'name'),
covars[groups[i], Study.ID])
if (isTRUE(x)) lapply(fnames[[i]], file.remove)
}
write_rds(g1FPN, paste0(savedir1, 'g1FPN.rds'))
write_rds(g1GrFPN, paste0(savedir1, 'g1GrFPN.rds'))
# Density -------------------------------------------------------------------------------------
A.norm.sub <- matsFPN2$A.norm.sub
A.norm.mean <- matsFPN2$A.norm.mean
atlas <- "pFPN"
g2GrFPN <- g2FPN <- fnames <- vector('list', length=length(groups))
for (i in seq_along(groups)) {
for (j in seq_along(thresh2)) {
print(paste0('Threshold ', j, '/', length(thresh2), '; group ', i, '; ',
format(Sys.time(), '%H:%M:%S')))
foreach (k=seq_along(inds[[i]])) %dopar% {
g.tmp <- graph_from_adjacency_matrix(A.norm.sub[[j]][, , inds[[i]][k]],
mode='undirected', diag = F, weighted = T)
V(g.tmp)$name <- as.character(pFPN$name)
g.tmp <- set_brainGraph_attr_sof(g.tmp, atlas, modality = 'fmri',
weighting = 'sld', threshold = thresh2[j],
subject = covars[groups[i], Study.ID[k]],
group = groups[i],
use.parallel = F,
A = A.norm.sub[[j]][, , inds[[i]][k]])
write_rds(g.tmp, paste0(savedir1,
sprintf('g%i_thr%02i_subj%03i%s', i, j, k, '.rds')))
}
}
# group mean weighted graphs
print(paste0('Group', i, '; ', format(Sys.time(), '%H:%M:%S')))
g2GrFPN[[i]] <- lapply(seq_along(thresh2), function(x)
graph_from_adjacency_matrix(A.norm.mean[[x]][[i]],
mode = 'undirected', diag = F, weighted = T))
for (x in seq_along(thresh2)) {V(g2GrFPN[[i]][[x]])$name <- as.character(pFPN$name)}
g2GrFPN[[i]] <- llply(seq_along(thresh2), function(x)
set_brainGraph_attr_sof(g2GrFPN[[i]][[x]], atlas, modality = 'fmri',
weighting = 'sld', threshold = thresh2[x],
group = groups[i], A = A.norm.mean[[x]][[i]],
use.parallel = F), .parallel = T)
}
for (i in seq_along(groups)) {
g2FPN[[i]] <- fnames[[i]] <- vector('list', length = length(thresh2))
for (j in seq_along(thresh2)) {
fnames[[i]][[j]] <- list.files(savedir,
sprintf('*g%i_thr%02i.*', i, j), full.names = T)
g2FPN[[i]][[j]] <- lapply(fnames[[i]][[j]], read_rds)
}
x <- all.equal(sapply(g2FPN[[i]][[1]], graph_attr, 'name'),
covars[groups[i], Study.ID])
if (isTRUE(x)) lapply(fnames[[i]], file.remove)
}
write_rds(g2FPN, paste0(savedir1, 'g2FPN.rds'))
write_rds(g2GrFPN, paste0(savedir1, 'g2GrFPN.rds'))
# Consistency ---------------------------------------------------------------------------------
A.norm.sub <- matsFPN3$A.norm.sub
A.norm.mean <- matsFPN3$A.norm.mean
atlas <- "pFPN"
g3GrFPN <- g3FPN <- fnames <- vector('list', length=length(groups))
for (i in seq_along(groups)) {
for (j in seq_along(thresh2)) {
print(paste0('Threshold ', j, '/', length(thresh2), '; group ', i, '; ',
format(Sys.time(), '%H:%M:%S')))
foreach (k=seq_along(inds[[i]])) %dopar% {
g.tmp <- graph_from_adjacency_matrix(A.norm.sub[[j]][, , inds[[i]][k]],
mode='undirected', diag = F, weighted = T)
V(g.tmp)$name <- as.character(pFPN$name)
g.tmp <- set_brainGraph_attr_sof(g.tmp, atlas, modality = 'fmri',
weighting = 'sld', threshold = thresh2[j],
subject = covars[groups[i], Study.ID[k]],
group = groups[i],
use.parallel = F,
A = A.norm.sub[[j]][, , inds[[i]][k]])
write_rds(g.tmp, paste0(savedir1,
sprintf('g%i_thr%02i_subj%03i%s', i, j, k, '.rds')))
}
}
# group mean weighted graphs
print(paste0('Group', i, '; ', format(Sys.time(), '%H:%M:%S')))
g3GrFPN[[i]] <- lapply(seq_along(thresh2), function(x)
graph_from_adjacency_matrix(A.norm.mean[[x]][[i]],
mode = 'undirected', diag = F, weighted = T))
for (x in seq_along(thresh2)) {V(g3GrFPN[[i]][[x]])$name <- as.character(pFPN$name)}
g3GrFPN[[i]] <- llply(seq_along(thresh2), function(x)
set_brainGraph_attr_sof(g3GrFPN[[i]][[x]], atlas, modality = 'fmri',
weighting = 'sld', threshold = thresh2[x],
group = groups[i], A = A.norm.mean[[x]][[i]],
use.parallel = F), .parallel = T)
}
for (i in seq_along(groups)) {
g3FPN[[i]] <- fnames[[i]] <- vector('list', length = length(thresh2))
for (j in seq_along(thresh2)) {
fnames[[i]][[j]] <- list.files(savedir,
sprintf('*g%i_thr%02i.*', i, j), full.names = T)
g3FPN[[i]][[j]] <- lapply(fnames[[i]][[j]], read_rds)
}
x <- all.equal(sapply(g3FPN[[i]][[1]], graph_attr, 'name'),
covars[groups[i], Study.ID])
if (isTRUE(x)) lapply(fnames[[i]], file.remove)
}
write_rds(g3FPN, paste0(savedir1, 'g3FPN.rds'))
write_rds(g3GrFPN, paste0(savedir1, 'g3GrFPN.rds'))
| /graphsFPN.R | no_license | soffiafdz/AddimexConnGraphs | R | false | false | 8,312 | r |
# Consensus -----------------------------------------------------------------------------------
A.norm.sub <- matsFPN1$A.norm.sub
A.norm.mean <- matsFPN1$A.norm.mean
atlas <- "pFPN"
g1GrFPN <- g1FPN <- fnames <- vector('list', length=length(groups))
for (i in seq_along(groups)) {
for (j in seq_along(thresh1)) {
print(paste0('Threshold ', j, '/', length(thresh1), '; group ', i, '; ',
format(Sys.time(), '%H:%M:%S')))
foreach (k=seq_along(inds[[i]])) %dopar% {
g.tmp <- graph_from_adjacency_matrix(A.norm.sub[[j]][, , inds[[i]][k]],
mode='undirected', diag = F, weighted = T)
V(g.tmp)$name <- as.character(pFPN$name)
g.tmp <- set_brainGraph_attr_sof(g.tmp, atlas, modality = 'fmri',
weighting = 'sld', threshold = thresh1[j],
subject = covars[groups[i], Study.ID[k]],
group = groups[i],
use.parallel = F,
A = A.norm.sub[[j]][, , inds[[i]][k]])
write_rds(g.tmp, paste0(savedir1,
sprintf('g%i_thr%02i_subj%03i%s', i, j, k, '.rds')))
}
}
# group mean weighted graphs
print(paste0('Group', i, '; ', format(Sys.time(), '%H:%M:%S')))
g1GrFPN[[i]] <- lapply(seq_along(thresh1), function(x)
graph_from_adjacency_matrix(A.norm.mean[[x]][[i]],
mode = 'undirected', diag = F, weighted = T))
for (x in seq_along(thresh1)) {V(g1GrFPN[[i]][[x]])$name <- as.character(pFPN$name)}
g1GrFPN[[i]] <- llply(seq_along(thresh1), function(x)
set_brainGraph_attr_sof(g1GrFPN[[i]][[x]], atlas, modality = 'fmri',
weighting = 'sld', threshold = thresh1[x],
group = groups[i], A = A.norm.mean[[x]][[i]],
use.parallel = F), .parallel = T)
}
for (i in seq_along(groups)) {
g1FPN[[i]] <- fnames[[i]] <- vector('list', length = length(thresh1))
for (j in seq_along(thresh1)) {
fnames[[i]][[j]] <- list.files(savedir,
sprintf('*g%i_thr%02i.*', i, j), full.names = T)
g1FPN[[i]][[j]] <- lapply(fnames[[i]][[j]], read_rds)
}
x <- all.equal(sapply(g1FPN[[i]][[1]], graph_attr, 'name'),
covars[groups[i], Study.ID])
if (isTRUE(x)) lapply(fnames[[i]], file.remove)
}
write_rds(g1FPN, paste0(savedir1, 'g1FPN.rds'))
write_rds(g1GrFPN, paste0(savedir1, 'g1GrFPN.rds'))
# Density -------------------------------------------------------------------------------------
A.norm.sub <- matsFPN2$A.norm.sub
A.norm.mean <- matsFPN2$A.norm.mean
atlas <- "pFPN"
g2GrFPN <- g2FPN <- fnames <- vector('list', length=length(groups))
for (i in seq_along(groups)) {
for (j in seq_along(thresh2)) {
print(paste0('Threshold ', j, '/', length(thresh2), '; group ', i, '; ',
format(Sys.time(), '%H:%M:%S')))
foreach (k=seq_along(inds[[i]])) %dopar% {
g.tmp <- graph_from_adjacency_matrix(A.norm.sub[[j]][, , inds[[i]][k]],
mode='undirected', diag = F, weighted = T)
V(g.tmp)$name <- as.character(pFPN$name)
g.tmp <- set_brainGraph_attr_sof(g.tmp, atlas, modality = 'fmri',
weighting = 'sld', threshold = thresh2[j],
subject = covars[groups[i], Study.ID[k]],
group = groups[i],
use.parallel = F,
A = A.norm.sub[[j]][, , inds[[i]][k]])
write_rds(g.tmp, paste0(savedir1,
sprintf('g%i_thr%02i_subj%03i%s', i, j, k, '.rds')))
}
}
# group mean weighted graphs
print(paste0('Group', i, '; ', format(Sys.time(), '%H:%M:%S')))
g2GrFPN[[i]] <- lapply(seq_along(thresh2), function(x)
graph_from_adjacency_matrix(A.norm.mean[[x]][[i]],
mode = 'undirected', diag = F, weighted = T))
for (x in seq_along(thresh2)) {V(g2GrFPN[[i]][[x]])$name <- as.character(pFPN$name)}
g2GrFPN[[i]] <- llply(seq_along(thresh2), function(x)
set_brainGraph_attr_sof(g2GrFPN[[i]][[x]], atlas, modality = 'fmri',
weighting = 'sld', threshold = thresh2[x],
group = groups[i], A = A.norm.mean[[x]][[i]],
use.parallel = F), .parallel = T)
}
for (i in seq_along(groups)) {
g2FPN[[i]] <- fnames[[i]] <- vector('list', length = length(thresh2))
for (j in seq_along(thresh2)) {
fnames[[i]][[j]] <- list.files(savedir,
sprintf('*g%i_thr%02i.*', i, j), full.names = T)
g2FPN[[i]][[j]] <- lapply(fnames[[i]][[j]], read_rds)
}
x <- all.equal(sapply(g2FPN[[i]][[1]], graph_attr, 'name'),
covars[groups[i], Study.ID])
if (isTRUE(x)) lapply(fnames[[i]], file.remove)
}
write_rds(g2FPN, paste0(savedir1, 'g2FPN.rds'))
write_rds(g2GrFPN, paste0(savedir1, 'g2GrFPN.rds'))
# Consistency ---------------------------------------------------------------------------------
A.norm.sub <- matsFPN3$A.norm.sub
A.norm.mean <- matsFPN3$A.norm.mean
atlas <- "pFPN"
g3GrFPN <- g3FPN <- fnames <- vector('list', length=length(groups))
for (i in seq_along(groups)) {
for (j in seq_along(thresh2)) {
print(paste0('Threshold ', j, '/', length(thresh2), '; group ', i, '; ',
format(Sys.time(), '%H:%M:%S')))
foreach (k=seq_along(inds[[i]])) %dopar% {
g.tmp <- graph_from_adjacency_matrix(A.norm.sub[[j]][, , inds[[i]][k]],
mode='undirected', diag = F, weighted = T)
V(g.tmp)$name <- as.character(pFPN$name)
g.tmp <- set_brainGraph_attr_sof(g.tmp, atlas, modality = 'fmri',
weighting = 'sld', threshold = thresh2[j],
subject = covars[groups[i], Study.ID[k]],
group = groups[i],
use.parallel = F,
A = A.norm.sub[[j]][, , inds[[i]][k]])
write_rds(g.tmp, paste0(savedir1,
sprintf('g%i_thr%02i_subj%03i%s', i, j, k, '.rds')))
}
}
# group mean weighted graphs
print(paste0('Group', i, '; ', format(Sys.time(), '%H:%M:%S')))
g3GrFPN[[i]] <- lapply(seq_along(thresh2), function(x)
graph_from_adjacency_matrix(A.norm.mean[[x]][[i]],
mode = 'undirected', diag = F, weighted = T))
for (x in seq_along(thresh2)) {V(g3GrFPN[[i]][[x]])$name <- as.character(pFPN$name)}
g3GrFPN[[i]] <- llply(seq_along(thresh2), function(x)
set_brainGraph_attr_sof(g3GrFPN[[i]][[x]], atlas, modality = 'fmri',
weighting = 'sld', threshold = thresh2[x],
group = groups[i], A = A.norm.mean[[x]][[i]],
use.parallel = F), .parallel = T)
}
for (i in seq_along(groups)) {
g3FPN[[i]] <- fnames[[i]] <- vector('list', length = length(thresh2))
for (j in seq_along(thresh2)) {
fnames[[i]][[j]] <- list.files(savedir,
sprintf('*g%i_thr%02i.*', i, j), full.names = T)
g3FPN[[i]][[j]] <- lapply(fnames[[i]][[j]], read_rds)
}
x <- all.equal(sapply(g3FPN[[i]][[1]], graph_attr, 'name'),
covars[groups[i], Study.ID])
if (isTRUE(x)) lapply(fnames[[i]], file.remove)
}
write_rds(g3FPN, paste0(savedir1, 'g3FPN.rds'))
write_rds(g3GrFPN, paste0(savedir1, 'g3GrFPN.rds'))
|
compute <- function(path) {
keylog <- unique(readLines(path, warn = FALSE))
password <- ""
while (length(keylog) != 1) {
candidates <- unique(sapply(keylog, function (x) substr(x, 1, 1)))
for (candidate in candidates) {
for (key in keylog) {
if (grepl(candidate, substr(key, 2, nchar(key)))) {
candidates <- candidates[candidates != candidate]
break
}
}
}
password <- paste0(password, candidates)
new_keylog <- NULL
for (key in keylog) {
if (candidates == substr(key, 1, 1)) {
if (nchar(key) == 3) new_keylog <- c(new_keylog, substr(key, 2, 3))
keylog <- keylog[keylog != key]
}
}
keylog <- unique(c(keylog, new_keylog))
}
return(paste0(password, keylog))
}
| /problems/0079/compute.R | permissive | Dynortice/Project-Euler | R | false | false | 894 | r | compute <- function(path) {
keylog <- unique(readLines(path, warn = FALSE))
password <- ""
while (length(keylog) != 1) {
candidates <- unique(sapply(keylog, function (x) substr(x, 1, 1)))
for (candidate in candidates) {
for (key in keylog) {
if (grepl(candidate, substr(key, 2, nchar(key)))) {
candidates <- candidates[candidates != candidate]
break
}
}
}
password <- paste0(password, candidates)
new_keylog <- NULL
for (key in keylog) {
if (candidates == substr(key, 1, 1)) {
if (nchar(key) == 3) new_keylog <- c(new_keylog, substr(key, 2, 3))
keylog <- keylog[keylog != key]
}
}
keylog <- unique(c(keylog, new_keylog))
}
return(paste0(password, keylog))
}
|
# 단원문제
library(caret)
library(class)
library(e1071)
library(randomForest)
library(rpart)
library(survival)
1번
colon <- na.omit(colon)
data <- colon[sample(nrow(colon)),]
colon$status <- factor(colon$status)
set.seed(2021)
colon
# k = 5
k <- 5
q <- nrow(data)/k
l <- 1:nrow(data)
accuracy <- 0
precision <- 0
recall <- 0
# random forest
for (i in 1:k) {
test_list <- ((i-1)*q+1) : (i*q)
data_test <- data[test_list,]
data_train <- data[-test_list,]
rf <- randomForest(status ~., data_train)
pred <- predict(rf, data_test, type='class')
t <- table(pred, data_test$status)
accuracy <- accuracy + (t[1,1]+t[2,2])/nrow(data_test)
precision <- precision + t[2,2]/(t[2,1]+t[2,2])
recall <- recall + t[2,2]/(t[1,2]+t[2,2])
}
rf_avg_acc <- accuracy / k
rf_avg_prec <- precision / k
rf_avg_rec <- recall / k
sprintf('랜덤 포레스트: 정확도=%f',
rf_avg_acc)
# k = 10
k <- 10
q <- nrow(data)/k
l <- 1:nrow(data)
accuracy <- 0
precision <- 0
recall <- 0
# random forest
for (i in 1:k) {
test_list <- ((i-1)*q+1) : (i*q)
data_test <- data[test_list,]
data_train <- data[-test_list,]
rf <- randomForest(status ~., data_train)
pred <- predict(rf, data_test, type='class')
t <- table(pred, data_test$status)
accuracy <- accuracy + (t[1,1]+t[2,2])/nrow(data_test)
precision <- precision + t[2,2]/(t[2,1]+t[2,2])
recall <- recall + t[2,2]/(t[1,2]+t[2,2])
}
rf_avg_acc <- accuracy / k
rf_avg_prec <- precision / k
rf_avg_rec <- recall / k
sprintf('랜덤 포레스트: 정확도=%f',
rf_avg_acc)
# k = 15
k <- 15
q <- nrow(data)/k
l <- 1:nrow(data)
accuracy <- 0
precision <- 0
recall <- 0
# random forest
for (i in 1:k) {
test_list <- ((i-1)*q+1) : (i*q)
data_test <- data[test_list,]
data_train <- data[-test_list,]
rf <- randomForest(status ~., data_train)
pred <- predict(rf, data_test, type='class')
t <- table(pred, data_test$status)
accuracy <- accuracy + (t[1,1]+t[2,2])/nrow(data_test)
precision <- precision + t[2,2]/(t[2,1]+t[2,2])
recall <- recall + t[2,2]/(t[1,2]+t[2,2])
}
rf_avg_acc <- accuracy / k
rf_avg_prec <- precision / k
rf_avg_rec <- recall / k
sprintf('랜덤 포레스트: 정확도=%f',
rf_avg_acc)
# k = 20
k <- 20
q <- nrow(data)/k
l <- 1:nrow(data)
accuracy <- 0
precision <- 0
recall <- 0
# random forest
for (i in 1:k) {
test_list <- ((i-1)*q+1) : (i*q)
data_test <- data[test_list,]
data_train <- data[-test_list,]
rf <- randomForest(status ~., data_train)
pred <- predict(rf, data_test, type='class')
t <- table(pred, data_test$status)
accuracy <- accuracy + (t[1,1]+t[2,2])/nrow(data_test)
precision <- precision + t[2,2]/(t[2,1]+t[2,2])
recall <- recall + t[2,2]/(t[1,2]+t[2,2])
}
rf_avg_acc <- accuracy / k
rf_avg_prec <- precision / k
rf_avg_rec <- recall / k
sprintf('랜덤 포레스트: 정확도=%f',
rf_avg_acc)
library(caret)
library(class)
library(e1071)
library(randomForest)
library(rpart)
library(survival)
2. ucla admission
ucla <- read.csv('https://stats.idre.ucla.edu/stat/data/binary.csv')
ucla
library(caret)
clean_ucla=na.omit(ucla)
clean_ucla=clean_ucla[c(TRUE,FALSE),]
clean_ucla$admit=factor(clean_ucla$admit)
control=trainControl(method = 'cv',number = 10)
formular = admit~ gre+gpa+rank
L=train(formular, data=clean_ucla, method='svmLinear',metric='Accuracy',trControl=control)
LW=train(formular, data = clean_ucla, method='svmLinearWeights',metric='Accuracy',trControl=control)
P = train(formular,data= clean_ucla, method='svmPoly',metric='Accuarcy',trControl=control)
R = train(formular,data= clean_ucla, method='svmRadial',metric='Accuarcy',trControl=control)
RW= train(formular,data= clean_ucla, method='svmRadialWeights',metric='Accuarcy',trControl=control)
f100=train(formular,data= clean_ucla, method='rf',ntree=100, metric='Accuarcy',trControl=control)
f300=train(formular,data= clean_ucla, method='rf',ntree=300, metric='Accuarcy',trControl=control)
f500=train(formular,data= clean_ucla, method='rf',ntree=500, metric='Accuarcy',trControl=control)
r=train(formular, data=clean_ucla, method='rpart',metric='Accuracy',trControl=control)
k=train(formular, data=clean_ucla, method='knn',metric='Accuracy',trControl=control)
g=train(formular, data=clean_ucla, method='glm',metric='Accuracy',trControl=control)
resamp=resamples(list(선형=L, 선형가중치=LW, 다항식=P, RBF=R, 가중치= RW, rf100=f100, rf300=f300,
rf500=f500, tree=r, knn=k glm=g))
| /ch10/ch10 - 단원문제.R | no_license | khw8401/R-Lecture-2021 | R | false | false | 4,486 | r | # 단원문제
library(caret)
library(class)
library(e1071)
library(randomForest)
library(rpart)
library(survival)
1번
colon <- na.omit(colon)
data <- colon[sample(nrow(colon)),]
colon$status <- factor(colon$status)
set.seed(2021)
colon
# k = 5
k <- 5
q <- nrow(data)/k
l <- 1:nrow(data)
accuracy <- 0
precision <- 0
recall <- 0
# random forest
for (i in 1:k) {
test_list <- ((i-1)*q+1) : (i*q)
data_test <- data[test_list,]
data_train <- data[-test_list,]
rf <- randomForest(status ~., data_train)
pred <- predict(rf, data_test, type='class')
t <- table(pred, data_test$status)
accuracy <- accuracy + (t[1,1]+t[2,2])/nrow(data_test)
precision <- precision + t[2,2]/(t[2,1]+t[2,2])
recall <- recall + t[2,2]/(t[1,2]+t[2,2])
}
rf_avg_acc <- accuracy / k
rf_avg_prec <- precision / k
rf_avg_rec <- recall / k
sprintf('랜덤 포레스트: 정확도=%f',
rf_avg_acc)
# k = 10
k <- 10
q <- nrow(data)/k
l <- 1:nrow(data)
accuracy <- 0
precision <- 0
recall <- 0
# random forest
for (i in 1:k) {
test_list <- ((i-1)*q+1) : (i*q)
data_test <- data[test_list,]
data_train <- data[-test_list,]
rf <- randomForest(status ~., data_train)
pred <- predict(rf, data_test, type='class')
t <- table(pred, data_test$status)
accuracy <- accuracy + (t[1,1]+t[2,2])/nrow(data_test)
precision <- precision + t[2,2]/(t[2,1]+t[2,2])
recall <- recall + t[2,2]/(t[1,2]+t[2,2])
}
rf_avg_acc <- accuracy / k
rf_avg_prec <- precision / k
rf_avg_rec <- recall / k
sprintf('랜덤 포레스트: 정확도=%f',
rf_avg_acc)
# k = 15
k <- 15
q <- nrow(data)/k
l <- 1:nrow(data)
accuracy <- 0
precision <- 0
recall <- 0
# random forest
for (i in 1:k) {
test_list <- ((i-1)*q+1) : (i*q)
data_test <- data[test_list,]
data_train <- data[-test_list,]
rf <- randomForest(status ~., data_train)
pred <- predict(rf, data_test, type='class')
t <- table(pred, data_test$status)
accuracy <- accuracy + (t[1,1]+t[2,2])/nrow(data_test)
precision <- precision + t[2,2]/(t[2,1]+t[2,2])
recall <- recall + t[2,2]/(t[1,2]+t[2,2])
}
rf_avg_acc <- accuracy / k
rf_avg_prec <- precision / k
rf_avg_rec <- recall / k
sprintf('랜덤 포레스트: 정확도=%f',
rf_avg_acc)
# k = 20
k <- 20
q <- nrow(data)/k
l <- 1:nrow(data)
accuracy <- 0
precision <- 0
recall <- 0
# random forest
for (i in 1:k) {
test_list <- ((i-1)*q+1) : (i*q)
data_test <- data[test_list,]
data_train <- data[-test_list,]
rf <- randomForest(status ~., data_train)
pred <- predict(rf, data_test, type='class')
t <- table(pred, data_test$status)
accuracy <- accuracy + (t[1,1]+t[2,2])/nrow(data_test)
precision <- precision + t[2,2]/(t[2,1]+t[2,2])
recall <- recall + t[2,2]/(t[1,2]+t[2,2])
}
rf_avg_acc <- accuracy / k
rf_avg_prec <- precision / k
rf_avg_rec <- recall / k
sprintf('랜덤 포레스트: 정확도=%f',
rf_avg_acc)
library(caret)
library(class)
library(e1071)
library(randomForest)
library(rpart)
library(survival)
2. ucla admission
ucla <- read.csv('https://stats.idre.ucla.edu/stat/data/binary.csv')
ucla
library(caret)
clean_ucla=na.omit(ucla)
clean_ucla=clean_ucla[c(TRUE,FALSE),]
clean_ucla$admit=factor(clean_ucla$admit)
control=trainControl(method = 'cv',number = 10)
formular = admit~ gre+gpa+rank
L=train(formular, data=clean_ucla, method='svmLinear',metric='Accuracy',trControl=control)
LW=train(formular, data = clean_ucla, method='svmLinearWeights',metric='Accuracy',trControl=control)
P = train(formular,data= clean_ucla, method='svmPoly',metric='Accuarcy',trControl=control)
R = train(formular,data= clean_ucla, method='svmRadial',metric='Accuarcy',trControl=control)
RW= train(formular,data= clean_ucla, method='svmRadialWeights',metric='Accuarcy',trControl=control)
f100=train(formular,data= clean_ucla, method='rf',ntree=100, metric='Accuarcy',trControl=control)
f300=train(formular,data= clean_ucla, method='rf',ntree=300, metric='Accuarcy',trControl=control)
f500=train(formular,data= clean_ucla, method='rf',ntree=500, metric='Accuarcy',trControl=control)
r=train(formular, data=clean_ucla, method='rpart',metric='Accuracy',trControl=control)
k=train(formular, data=clean_ucla, method='knn',metric='Accuracy',trControl=control)
g=train(formular, data=clean_ucla, method='glm',metric='Accuracy',trControl=control)
resamp=resamples(list(선형=L, 선형가중치=LW, 다항식=P, RBF=R, 가중치= RW, rf100=f100, rf300=f300,
rf500=f500, tree=r, knn=k glm=g))
|
testlist <- list(a = 1634609448L, b = 693770778L, x = c(-1L, -1L, 107574111L, 1651074817L, 1701733481L))
result <- do.call(grattan:::anyOutside,testlist)
str(result) | /grattan/inst/testfiles/anyOutside/libFuzzer_anyOutside/anyOutside_valgrind_files/1610128369-test.R | no_license | akhikolla/updated-only-Issues | R | false | false | 166 | r | testlist <- list(a = 1634609448L, b = 693770778L, x = c(-1L, -1L, 107574111L, 1651074817L, 1701733481L))
result <- do.call(grattan:::anyOutside,testlist)
str(result) |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mark_outlier_rows.R
\name{mark_outlier_rows}
\alias{mark_outlier_rows}
\title{Mark outlier rows.}
\usage{
mark_outlier_rows(
population,
variables,
sample,
method = "svd+iqr",
outlier_col = "is_outlier",
...
)
}
\arguments{
\item{population}{tbl with grouping (metadata) and observation variables.}
\item{variables}{character vector specifying observation variables.}
\item{sample}{tbl containing sample that is used by outlier removal methods
to estimate parameters. \code{sample} has same structure as
\code{population}. Typically, \code{sample} corresponds to controls in the
experiment.}
\item{method}{optional character string specifying method for outlier
removal. There is currently only one option (\code{"svd_iqr"}).}
\item{outlier_col}{optional character string specifying the name for the
column that will indicate outliers (in the output).
Default \code{"is_outlier"}.}
\item{...}{arguments passed to outlier removal method.}
}
\value{
\code{population} with an extra column \code{is_outlier}.
}
\description{
\code{mark_outlier_rows} drops outlier rows.
}
\examples{
suppressMessages(suppressWarnings(library(magrittr)))
population <- tibble::tibble(
Metadata_group = sample(c("a", "b"), 100, replace = TRUE),
Metadata_type = sample(c("control", "trt"), 100, replace = TRUE),
AreaShape_Area = c(rnorm(98), 20, 30),
AreaShape_Eccentricity = rnorm(100)
)
variables <- c("AreaShape_Area", "AreaShape_Eccentricity")
sample <- population \%>\% dplyr::filter(Metadata_type == "control")
population_marked <-
cytominer::mark_outlier_rows(
population,
variables,
sample,
method = "svd+iqr"
)
population_marked \%>\%
dplyr::group_by(is_outlier) \%>\%
dplyr::sample_n(3)
}
| /man/mark_outlier_rows.Rd | permissive | cytomining/cytominer | R | false | true | 1,803 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mark_outlier_rows.R
\name{mark_outlier_rows}
\alias{mark_outlier_rows}
\title{Mark outlier rows.}
\usage{
mark_outlier_rows(
population,
variables,
sample,
method = "svd+iqr",
outlier_col = "is_outlier",
...
)
}
\arguments{
\item{population}{tbl with grouping (metadata) and observation variables.}
\item{variables}{character vector specifying observation variables.}
\item{sample}{tbl containing sample that is used by outlier removal methods
to estimate parameters. \code{sample} has same structure as
\code{population}. Typically, \code{sample} corresponds to controls in the
experiment.}
\item{method}{optional character string specifying method for outlier
removal. There is currently only one option (\code{"svd_iqr"}).}
\item{outlier_col}{optional character string specifying the name for the
column that will indicate outliers (in the output).
Default \code{"is_outlier"}.}
\item{...}{arguments passed to outlier removal method.}
}
\value{
\code{population} with an extra column \code{is_outlier}.
}
\description{
\code{mark_outlier_rows} drops outlier rows.
}
\examples{
suppressMessages(suppressWarnings(library(magrittr)))
population <- tibble::tibble(
Metadata_group = sample(c("a", "b"), 100, replace = TRUE),
Metadata_type = sample(c("control", "trt"), 100, replace = TRUE),
AreaShape_Area = c(rnorm(98), 20, 30),
AreaShape_Eccentricity = rnorm(100)
)
variables <- c("AreaShape_Area", "AreaShape_Eccentricity")
sample <- population \%>\% dplyr::filter(Metadata_type == "control")
population_marked <-
cytominer::mark_outlier_rows(
population,
variables,
sample,
method = "svd+iqr"
)
population_marked \%>\%
dplyr::group_by(is_outlier) \%>\%
dplyr::sample_n(3)
}
|
x <-
"aaa"
y <-
1:5
z <-
c(TRUE, FALSE, TRUE)
| /dumpdata.R | no_license | dfarberovas/TestRepo | R | false | false | 46 | r | x <-
"aaa"
y <-
1:5
z <-
c(TRUE, FALSE, TRUE)
|
#https://gist.github.com/kdauria/524eade46135f6348140
#Stat_smooth_Func.R
stat_smooth_func <- function(mapping = NULL, data = NULL,
geom = "smooth", position = "identity",
...,
method = "auto",
formula = y ~ x,
se = TRUE,
n = 80,
span = 0.75,
fullrange = FALSE,
level = 0.95,
method.args = list(),
na.rm = FALSE,
show.legend = NA,
inherit.aes = TRUE,
xpos = NULL,
ypos = NULL) {
layer(
data = data,
mapping = mapping,
stat = StatSmoothFunc,
geom = geom,
position = position,
show.legend = show.legend,
inherit.aes = inherit.aes,
params = list(
method = method,
formula = formula,
se = se,
n = n,
fullrange = fullrange,
level = level,
na.rm = na.rm,
method.args = method.args,
span = span,
xpos = xpos,
ypos = ypos,
...
)
)
}
StatSmoothFunc <- ggproto("StatSmooth", Stat,
setup_params = function(data, params) {
# Figure out what type of smoothing to do: loess for small datasets,
# gam with a cubic regression basis for large data
# This is based on the size of the _largest_ group.
if (identical(params$method, "auto")) {
max_group <- max(table(data$group))
if (max_group < 1000) {
params$method <- "loess"
} else {
params$method <- "gam"
params$formula <- y ~ s(x, bs = "cs")
}
}
if (identical(params$method, "gam")) {
params$method <- mgcv::gam
}
params
},
compute_group = function(data, scales, method = "auto", formula = y~x,
se = TRUE, n = 80, span = 0.75, fullrange = FALSE,
xseq = NULL, level = 0.95, method.args = list(),
na.rm = FALSE, xpos=NULL, ypos=NULL) {
if (length(unique(data$x)) < 2) {
# Not enough data to perform fit
return(data.frame())
}
if (is.null(data$weight)) data$weight <- 1
if (is.null(xseq)) {
if (is.integer(data$x)) {
if (fullrange) {
xseq <- scales$x$dimension()
} else {
xseq <- sort(unique(data$x))
}
} else {
if (fullrange) {
range <- scales$x$dimension()
} else {
range <- range(data$x, na.rm = TRUE)
}
xseq <- seq(range[1], range[2], length.out = n)
}
}
# Special case span because it's the most commonly used model argument
if (identical(method, "loess")) {
method.args$span <- span
}
if (is.character(method)) method <- match.fun(method)
base.args <- list(quote(formula), data = quote(data), weights = quote(weight))
model <- do.call(method, c(base.args, method.args))
m = model
eq <- substitute(italic(y) == a + b %.% italic(x)*","~~italic(r)^2~"="~r2,
list(a = format(coef(m)[1], digits = 3),
b = format(coef(m)[2], digits = 3),
r2 = format(summary(m)$r.squared, digits = 3)))
func_string = as.character(as.expression(eq))
if(is.null(xpos)) xpos = min(data$x)*0.9
if(is.null(ypos)) ypos = min(data$y)*0.9
data.frame(x=xpos, y=ypos, label=func_string)
},
required_aes = c("x", "y")
)
| /Stat_smooth_func.R | no_license | PaulKnoops/Perception | R | false | false | 5,438 | r | #https://gist.github.com/kdauria/524eade46135f6348140
#Stat_smooth_Func.R
stat_smooth_func <- function(mapping = NULL, data = NULL,
geom = "smooth", position = "identity",
...,
method = "auto",
formula = y ~ x,
se = TRUE,
n = 80,
span = 0.75,
fullrange = FALSE,
level = 0.95,
method.args = list(),
na.rm = FALSE,
show.legend = NA,
inherit.aes = TRUE,
xpos = NULL,
ypos = NULL) {
layer(
data = data,
mapping = mapping,
stat = StatSmoothFunc,
geom = geom,
position = position,
show.legend = show.legend,
inherit.aes = inherit.aes,
params = list(
method = method,
formula = formula,
se = se,
n = n,
fullrange = fullrange,
level = level,
na.rm = na.rm,
method.args = method.args,
span = span,
xpos = xpos,
ypos = ypos,
...
)
)
}
StatSmoothFunc <- ggproto("StatSmooth", Stat,
setup_params = function(data, params) {
# Figure out what type of smoothing to do: loess for small datasets,
# gam with a cubic regression basis for large data
# This is based on the size of the _largest_ group.
if (identical(params$method, "auto")) {
max_group <- max(table(data$group))
if (max_group < 1000) {
params$method <- "loess"
} else {
params$method <- "gam"
params$formula <- y ~ s(x, bs = "cs")
}
}
if (identical(params$method, "gam")) {
params$method <- mgcv::gam
}
params
},
compute_group = function(data, scales, method = "auto", formula = y~x,
se = TRUE, n = 80, span = 0.75, fullrange = FALSE,
xseq = NULL, level = 0.95, method.args = list(),
na.rm = FALSE, xpos=NULL, ypos=NULL) {
if (length(unique(data$x)) < 2) {
# Not enough data to perform fit
return(data.frame())
}
if (is.null(data$weight)) data$weight <- 1
if (is.null(xseq)) {
if (is.integer(data$x)) {
if (fullrange) {
xseq <- scales$x$dimension()
} else {
xseq <- sort(unique(data$x))
}
} else {
if (fullrange) {
range <- scales$x$dimension()
} else {
range <- range(data$x, na.rm = TRUE)
}
xseq <- seq(range[1], range[2], length.out = n)
}
}
# Special case span because it's the most commonly used model argument
if (identical(method, "loess")) {
method.args$span <- span
}
if (is.character(method)) method <- match.fun(method)
base.args <- list(quote(formula), data = quote(data), weights = quote(weight))
model <- do.call(method, c(base.args, method.args))
m = model
eq <- substitute(italic(y) == a + b %.% italic(x)*","~~italic(r)^2~"="~r2,
list(a = format(coef(m)[1], digits = 3),
b = format(coef(m)[2], digits = 3),
r2 = format(summary(m)$r.squared, digits = 3)))
func_string = as.character(as.expression(eq))
if(is.null(xpos)) xpos = min(data$x)*0.9
if(is.null(ypos)) ypos = min(data$y)*0.9
data.frame(x=xpos, y=ypos, label=func_string)
},
required_aes = c("x", "y")
)
|
#Lectura de datos
getwd()
setwd("~/GitHub/Programacion_Actuarial_lll_OT16")
data <- read.csv("table.csv")
data <- read.table("table.csv",T,",")
data
getwd()
setwd("~/GitHub/programacion_Actuarial_lll_OT16/specdata")
direc <- read.csv("001.csv")
direc <- read.table("001.csv",T,",",nrows = 1462)
direc
| /PracticaRBimon.R | no_license | Erickghub/Programacion_Actuarial_lll_OT16 | R | false | false | 305 | r | #Lectura de datos
getwd()
setwd("~/GitHub/Programacion_Actuarial_lll_OT16")
data <- read.csv("table.csv")
data <- read.table("table.csv",T,",")
data
getwd()
setwd("~/GitHub/programacion_Actuarial_lll_OT16/specdata")
direc <- read.csv("001.csv")
direc <- read.table("001.csv",T,",",nrows = 1462)
direc
|
\alias{gtkWindowSetGeometryHints}
\name{gtkWindowSetGeometryHints}
\title{gtkWindowSetGeometryHints}
\description{This function sets up hints about how a window can be resized by
the user. You can set a minimum and maximum size; allowed resize
increments (e.g. for xterm, you can only resize by the size of a
character); aspect ratios; and more. See the \code{\link{GdkGeometry}} struct.}
\usage{gtkWindowSetGeometryHints(object, geometry.widget, geometry)}
\arguments{
\item{\verb{object}}{a \code{\link{GtkWindow}}}
\item{\verb{geometry.widget}}{widget the geometry hints will be applied to or \code{NULL}. \emph{[ \acronym{allow-none} ]}}
\item{\verb{geometry}}{struct containing geometry information or \code{NULL}. \emph{[ \acronym{allow-none} ]}}
}
\author{Derived by RGtkGen from GTK+ documentation}
\keyword{internal}
| /RGtk2/man/gtkWindowSetGeometryHints.Rd | no_license | lawremi/RGtk2 | R | false | false | 831 | rd | \alias{gtkWindowSetGeometryHints}
\name{gtkWindowSetGeometryHints}
\title{gtkWindowSetGeometryHints}
\description{This function sets up hints about how a window can be resized by
the user. You can set a minimum and maximum size; allowed resize
increments (e.g. for xterm, you can only resize by the size of a
character); aspect ratios; and more. See the \code{\link{GdkGeometry}} struct.}
\usage{gtkWindowSetGeometryHints(object, geometry.widget, geometry)}
\arguments{
\item{\verb{object}}{a \code{\link{GtkWindow}}}
\item{\verb{geometry.widget}}{widget the geometry hints will be applied to or \code{NULL}. \emph{[ \acronym{allow-none} ]}}
\item{\verb{geometry}}{struct containing geometry information or \code{NULL}. \emph{[ \acronym{allow-none} ]}}
}
\author{Derived by RGtkGen from GTK+ documentation}
\keyword{internal}
|
% Generated by roxygen2 (4.0.1): do not edit by hand
\name{LLmodplot}
\alias{LLmodplot}
\title{plots the fits for each model considered in \code{fitModels}}
\usage{
LLmodplot(fit)
}
\arguments{
\item{fit}{an fitted MCMC returned from \code{fitModels}}
}
\value{
NULL produces one or several plots
}
\description{
plots the fits for each model considered in \code{fitModels}
}
\author{
Colin Millar \email{colin.millar@jrc.ec.europa.eu}
}
| /man/LLmodplot.Rd | no_license | AndyCampbell/msy | R | false | false | 439 | rd | % Generated by roxygen2 (4.0.1): do not edit by hand
\name{LLmodplot}
\alias{LLmodplot}
\title{plots the fits for each model considered in \code{fitModels}}
\usage{
LLmodplot(fit)
}
\arguments{
\item{fit}{an fitted MCMC returned from \code{fitModels}}
}
\value{
NULL produces one or several plots
}
\description{
plots the fits for each model considered in \code{fitModels}
}
\author{
Colin Millar \email{colin.millar@jrc.ec.europa.eu}
}
|
df <- read.csv("Line2_Month02_2016.csv.bz2")
class <- c("LogTime" = "character", "Id" = "integer", "Seg" = "factor", "Site" = "factor", "TagType" = "character",
"ValType" = "factor", "ValInt" = "numeric", "ValFlt" = "numeric", "ValStr" = "character", "ValDig" = "character",
"ValTStamp" = "character", "IsGood" = "factor", "IsQuestionable" = "factor", "uom" = "factor",
"LogMonth" = "factor", "LogYear" = "factor")
azureStorageCall <- function(url, verb, key, storageType="blob", requestBody=NULL, headers=NULL, ifMatch="", md5="")
azureCSVFileToDataFrame <- function (azureStorageUrl, key, container, csvFilepath, Md5, storageType = "blob", encoding = 'UTF-8') {
# Replace spaces with HTTP encoding, i.e., %20
container <- gsub(" ", "%20", container)
csvFilepath <- gsub(" ", "%20", csvFilepath)
# Assemble the URL
azureUrl <- paste0(azureStorageUrl, "/", container, "/", csvFilepath)
# Get the file from the Azure Storage Account
response <- azureStorageCall(url = azureUrl, verb = "GET", key = key, storageType = storageType, md5 = Md5)
# Get the content of the response. We are expecting "text".
csv <- content(response, as="text", encoding = encoding)
# Save the csv content to a temporary file.
tmp_csv_filename <- tempfile(pattern = "file", tmpdir = tempdir(), fileext = ".csv")
write(csv, tmp_csv_filename, ncolumns = 1, sep = "")
# Remove the csv variable to save memory
remove(csv)
# Read the csv file into a data frame.
df <- read.csv(tmp_csv_filename)
# Remove the temporary file.
file.remove(tmp_csv_filename)
return (df)
}
url1 <- "https://enbze2elpdestarserver.blob.core.windows.net/2017-power-optimization/machine_learning/data/2016/Line2_Month02_2016.csv.bz2"
container <- "2017-power-optimization"
zipFilepath <- "machine_learning/data/2016/Line2_Month02_2016.csv.bz2"
azureStorageUrl <- "https://enbze2elpdestarserver.blob.core.windows.net"
container <- gsub(" ", "%20", container)
zipFilepath <- gsub(" ", "%20", zipFilepath)
url <- paste0(azureStorageUrl, "/", container, "/", zipFilepath)
response <- azureStorageCall(url, "GET", AzurePrimkey, storageType = "blob")
csv <- content(response, as="text", encoding = encoding)
class <- c("LogTime" = "character", "Id" = "integer", "Seg" = "factor", "Site" = "factor", "TagType" = "character",
"ValType" = "factor", "ValInt" = "numeric", "ValFlt" = "numeric", "ValStr" = "character",
"ValDig" = "character", "ValTStamp" = "character", "IsGood" = "factor", "IsQuestionable" = "factor",
"uom" = "character", "LogMonth" = "factor", "LogYear" = "factor")
sdf <- rxDataStep(Line21, rowSelection = (u < .1), transforms = list(u = runif(.rxNumRows)))
create_Value1 <- function(dat){
for (i in 1:nrow(dat)){
if (dat[i,5] == "int") {dat[i,5] <- as.character(dat[i,6])}
else if (dat[i,5] == "float") {dat[i,5] <- as.character(dat[i,7])}
else {dat[i,5] <- dat[i,8]}
}
return(dat)
}
create_Value2 <- function(x){
if (x == "int") {x <- as}
}
Line2_int <- rxDataStep(Line2, outFile = "Line2_int.xdf", rowSelection = (ValType == "int"), overwrite = T,
transforms = list(Value = as.character(ValInt)))
rename(Line2_int,c("Value"="ValInt"))
rxDataStep(Line2_int, outFile = "Line2_int1.xdf", overwrite = T, varsToDrop = c("ValType","ValInt","ValFlt","ValDig"))
file.remove("Line2_int.xdf")
file.rename("Line2_int1.xdf","Line2_int.xdf")
Line2_int <- RxXdfData("Line2_int.xdf")
Conv_Str <- function(dat){
dat$Value = as.character(dat$ValInt)
return(dat)
}
Line2_int <- rxDataStep(Line2_int, outFile = "Line2_int.xdf", overwrite = T,
transforms = list(Value = as.character(ValInt)))
Site_df <- rxImport(inData = Line2, rowSelection = (Site == S[1]), transformObjects = list(S = Sites))
for (i in 1:length(Sites)){
Site_xdf <- rxDataStep(inData = Line21, rowSelection = (Site == S[i]), transformObjects = list(S = Sites))
}
library(FSelector)
Line2_df <- rxImport(Line2)
Line2_df <- Line2_df[-c(1,8,15,17,18)]
corr <- information.gain(hhpu~.,Line2_df)
col_names <- names(Line2_df)
col_names <- col_names[-c(6)]
corr <- data.frame("Features" = col_names, "Information_Gain" = corr$attr_importance)
corr <- corr[order(corr$Information_Gain, decreasing = T),]
Rename segments 2-CM-GF and 2-GF-CR to 2-CM-CR. We'll need to convert DESTINATION_FCLTY_NAME to a character and then back to a factor afterwards for this to work.
```{r Rename Segments, message = FALSE}
segment_rename <- function(dat) {
dat$DESTINATION_FCLTY_NAME <- as.character(dat$DESTINATION_FCLTY_NAME)
dat$DESTINATION_FCLTY_NAME <- ifelse(dat$DESTINATION_FCLTY_NAME == "2-CM-GF", "2-CM-CR",
ifelse(dat$DESTINATION_FCLTY_NAME == "2-GF-CR", "2-CM-CR",
dat$DESTINATION_FCLTY_NAME))
return(dat)
}
rxDataStep(Schedule, outFile = "Schedule_Linefill1.xdf", overwrite = T, transformFunc = segment_rename)
rxFactors("Schedule_Linefill1.xdf", outFile = "Schedule_Linefill2.xdf", overwrite = T,
factorInfo = c("DESTINATION_FCLTY_NAME"))
file.remove("Schedule_Linefill.xdf")
file.remove("Schedule_Linefill1.xdf")
file.rename("Schedule_Linefill2.xdf","Schedule_Linefill.xdf")
Schedule <- RxXdfData("Schedule_Linefill.xdf")
```
| /R Work.R | no_license | JackZhang0224/Project1 | R | false | false | 5,270 | r | df <- read.csv("Line2_Month02_2016.csv.bz2")
class <- c("LogTime" = "character", "Id" = "integer", "Seg" = "factor", "Site" = "factor", "TagType" = "character",
"ValType" = "factor", "ValInt" = "numeric", "ValFlt" = "numeric", "ValStr" = "character", "ValDig" = "character",
"ValTStamp" = "character", "IsGood" = "factor", "IsQuestionable" = "factor", "uom" = "factor",
"LogMonth" = "factor", "LogYear" = "factor")
azureStorageCall <- function(url, verb, key, storageType="blob", requestBody=NULL, headers=NULL, ifMatch="", md5="")
azureCSVFileToDataFrame <- function (azureStorageUrl, key, container, csvFilepath, Md5, storageType = "blob", encoding = 'UTF-8') {
# Replace spaces with HTTP encoding, i.e., %20
container <- gsub(" ", "%20", container)
csvFilepath <- gsub(" ", "%20", csvFilepath)
# Assemble the URL
azureUrl <- paste0(azureStorageUrl, "/", container, "/", csvFilepath)
# Get the file from the Azure Storage Account
response <- azureStorageCall(url = azureUrl, verb = "GET", key = key, storageType = storageType, md5 = Md5)
# Get the content of the response. We are expecting "text".
csv <- content(response, as="text", encoding = encoding)
# Save the csv content to a temporary file.
tmp_csv_filename <- tempfile(pattern = "file", tmpdir = tempdir(), fileext = ".csv")
write(csv, tmp_csv_filename, ncolumns = 1, sep = "")
# Remove the csv variable to save memory
remove(csv)
# Read the csv file into a data frame.
df <- read.csv(tmp_csv_filename)
# Remove the temporary file.
file.remove(tmp_csv_filename)
return (df)
}
url1 <- "https://enbze2elpdestarserver.blob.core.windows.net/2017-power-optimization/machine_learning/data/2016/Line2_Month02_2016.csv.bz2"
container <- "2017-power-optimization"
zipFilepath <- "machine_learning/data/2016/Line2_Month02_2016.csv.bz2"
azureStorageUrl <- "https://enbze2elpdestarserver.blob.core.windows.net"
container <- gsub(" ", "%20", container)
zipFilepath <- gsub(" ", "%20", zipFilepath)
url <- paste0(azureStorageUrl, "/", container, "/", zipFilepath)
response <- azureStorageCall(url, "GET", AzurePrimkey, storageType = "blob")
csv <- content(response, as="text", encoding = encoding)
class <- c("LogTime" = "character", "Id" = "integer", "Seg" = "factor", "Site" = "factor", "TagType" = "character",
"ValType" = "factor", "ValInt" = "numeric", "ValFlt" = "numeric", "ValStr" = "character",
"ValDig" = "character", "ValTStamp" = "character", "IsGood" = "factor", "IsQuestionable" = "factor",
"uom" = "character", "LogMonth" = "factor", "LogYear" = "factor")
sdf <- rxDataStep(Line21, rowSelection = (u < .1), transforms = list(u = runif(.rxNumRows)))
create_Value1 <- function(dat){
for (i in 1:nrow(dat)){
if (dat[i,5] == "int") {dat[i,5] <- as.character(dat[i,6])}
else if (dat[i,5] == "float") {dat[i,5] <- as.character(dat[i,7])}
else {dat[i,5] <- dat[i,8]}
}
return(dat)
}
create_Value2 <- function(x){
if (x == "int") {x <- as}
}
Line2_int <- rxDataStep(Line2, outFile = "Line2_int.xdf", rowSelection = (ValType == "int"), overwrite = T,
transforms = list(Value = as.character(ValInt)))
rename(Line2_int,c("Value"="ValInt"))
rxDataStep(Line2_int, outFile = "Line2_int1.xdf", overwrite = T, varsToDrop = c("ValType","ValInt","ValFlt","ValDig"))
file.remove("Line2_int.xdf")
file.rename("Line2_int1.xdf","Line2_int.xdf")
Line2_int <- RxXdfData("Line2_int.xdf")
Conv_Str <- function(dat){
dat$Value = as.character(dat$ValInt)
return(dat)
}
Line2_int <- rxDataStep(Line2_int, outFile = "Line2_int.xdf", overwrite = T,
transforms = list(Value = as.character(ValInt)))
Site_df <- rxImport(inData = Line2, rowSelection = (Site == S[1]), transformObjects = list(S = Sites))
for (i in 1:length(Sites)){
Site_xdf <- rxDataStep(inData = Line21, rowSelection = (Site == S[i]), transformObjects = list(S = Sites))
}
library(FSelector)
Line2_df <- rxImport(Line2)
Line2_df <- Line2_df[-c(1,8,15,17,18)]
corr <- information.gain(hhpu~.,Line2_df)
col_names <- names(Line2_df)
col_names <- col_names[-c(6)]
corr <- data.frame("Features" = col_names, "Information_Gain" = corr$attr_importance)
corr <- corr[order(corr$Information_Gain, decreasing = T),]
Rename segments 2-CM-GF and 2-GF-CR to 2-CM-CR. We'll need to convert DESTINATION_FCLTY_NAME to a character and then back to a factor afterwards for this to work.
```{r Rename Segments, message = FALSE}
segment_rename <- function(dat) {
dat$DESTINATION_FCLTY_NAME <- as.character(dat$DESTINATION_FCLTY_NAME)
dat$DESTINATION_FCLTY_NAME <- ifelse(dat$DESTINATION_FCLTY_NAME == "2-CM-GF", "2-CM-CR",
ifelse(dat$DESTINATION_FCLTY_NAME == "2-GF-CR", "2-CM-CR",
dat$DESTINATION_FCLTY_NAME))
return(dat)
}
rxDataStep(Schedule, outFile = "Schedule_Linefill1.xdf", overwrite = T, transformFunc = segment_rename)
rxFactors("Schedule_Linefill1.xdf", outFile = "Schedule_Linefill2.xdf", overwrite = T,
factorInfo = c("DESTINATION_FCLTY_NAME"))
file.remove("Schedule_Linefill.xdf")
file.remove("Schedule_Linefill1.xdf")
file.rename("Schedule_Linefill2.xdf","Schedule_Linefill.xdf")
Schedule <- RxXdfData("Schedule_Linefill.xdf")
```
|
?locator
locator(2)
z<- runif(100)
x<- 1:10
dat<- expand.grid(x=x,y=x)
dat$z<- z
plot(y~x,dat)
image(x,x,z)
outer(
identify(x=dat$x,y=dat$y, labels=dat$z) | /_archive/WFA8000x/_Example R codes/identify.R | no_license | mcolvin/WFA8000-Research-Credits | R | false | false | 159 | r |
?locator
locator(2)
z<- runif(100)
x<- 1:10
dat<- expand.grid(x=x,y=x)
dat$z<- z
plot(y~x,dat)
image(x,x,z)
outer(
identify(x=dat$x,y=dat$y, labels=dat$z) |
setwd("C:/Users/leip/Desktop/Exploratory Data Analysis/Course_Project_1")
### Read-in data
hpc <- read.table("./household_power_consumption.txt",sep = ";" , header = T, na.strings = "?", dec = ".")
### Subset data
hpc2 <- hpc[which(hpc$Date %in% c("1/2/2007","2/2/2007")), ]
### Data manipulation- change format and create new vars
# make plot variables into numeric
hpc2$Global_active_power <- as.numeric(hpc2$Global_active_power)
hpc2$Sub_metering_1 <- as.numeric(hpc2$Sub_metering_1)
hpc2$Sub_metering_2 <- as.numeric(hpc2$Sub_metering_2)
# Create DateTime var for plotting
DateTime1 <- paste (as.character(hpc2$Date),as.character(hpc2$Time) , sep = " ", collapse = NULL)
hpc2$DateTime <- strptime(DateTime1,'%d/%m/%Y %H:%M:%S')
# Create weekdays
library(lubridate)
hpc2$day <- wday(as.Date(hpc2$DateTime), label=TRUE)
### Create plot 3
with(hpc2, plot(DateTime,Sub_metering_1, type = "l", xlab="", ylab = "Energy sub metering"))
lines(hpc2$DateTime, hpc2$Sub_metering_2, type="l", col="red")
lines(hpc2$DateTime, hpc2$Sub_metering_3, type="l", col="blue")
legend("topright",c("Sub_metering_1","Sub_metering_2","Sub_metering_3"), lty=1, lwd=2,
col = c("black","red", "blue"), cex = 0.3)
dev.copy(png, file="plot3.png", height=480, width=480)
dev.off()
### END | /plot3.R | no_license | carollei926/Exploratory_Data_Analysis | R | false | false | 1,274 | r | setwd("C:/Users/leip/Desktop/Exploratory Data Analysis/Course_Project_1")
### Read-in data
hpc <- read.table("./household_power_consumption.txt",sep = ";" , header = T, na.strings = "?", dec = ".")
### Subset data
hpc2 <- hpc[which(hpc$Date %in% c("1/2/2007","2/2/2007")), ]
### Data manipulation- change format and create new vars
# make plot variables into numeric
hpc2$Global_active_power <- as.numeric(hpc2$Global_active_power)
hpc2$Sub_metering_1 <- as.numeric(hpc2$Sub_metering_1)
hpc2$Sub_metering_2 <- as.numeric(hpc2$Sub_metering_2)
# Create DateTime var for plotting
DateTime1 <- paste (as.character(hpc2$Date),as.character(hpc2$Time) , sep = " ", collapse = NULL)
hpc2$DateTime <- strptime(DateTime1,'%d/%m/%Y %H:%M:%S')
# Create weekdays
library(lubridate)
hpc2$day <- wday(as.Date(hpc2$DateTime), label=TRUE)
### Create plot 3
with(hpc2, plot(DateTime,Sub_metering_1, type = "l", xlab="", ylab = "Energy sub metering"))
lines(hpc2$DateTime, hpc2$Sub_metering_2, type="l", col="red")
lines(hpc2$DateTime, hpc2$Sub_metering_3, type="l", col="blue")
legend("topright",c("Sub_metering_1","Sub_metering_2","Sub_metering_3"), lty=1, lwd=2,
col = c("black","red", "blue"), cex = 0.3)
dev.copy(png, file="plot3.png", height=480, width=480)
dev.off()
### END |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/is_Iterator.R
\name{is_Iterator}
\alias{is_Iterator}
\title{Test if an object is an Iterator}
\usage{
is_Iterator(list)
}
\arguments{
\item{list}{Object to test}
}
\description{
Test if an object is an Iterator
}
| /man/is_Iterator.Rd | permissive | jacgoldsm/peruse | R | false | true | 291 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/is_Iterator.R
\name{is_Iterator}
\alias{is_Iterator}
\title{Test if an object is an Iterator}
\usage{
is_Iterator(list)
}
\arguments{
\item{list}{Object to test}
}
\description{
Test if an object is an Iterator
}
|
source(testthat::test_path("make_example_data.R"))
source(testthat::test_path("test-helpers.R"))
opts <- list(seed = 34677, chains = 2, iter = 500)
omit_warning <- function(pattern) {
function(x) {
if (any(grepl(pattern, x))) {
return(NULL)
}
x
}
}
test_that("factor encoded predictor", {
skip_on_cran()
skip_if_not_installed("rstanarm")
expect_snapshot(
transform = omit_warning("^(The largest R-hat is|Bulk Effective|Tail Effective)"),
{
class_test <- recipe(x2 ~ ., data = ex_dat) %>%
step_lencode_bayes(x3,
outcome = vars(x2),
verbose = FALSE,
options = opts
) %>%
prep(training = ex_dat, retain = TRUE)
}
)
tr_values <- bake(class_test, new_data = NULL)$x3
new_values <- bake(class_test, new_data = new_dat)
expect_snapshot(
new_values_ch <- bake(class_test, new_data = new_dat_ch)
)
key <- class_test$steps[[1]]$mapping
td_obj <- tidy(class_test, number = 1)
expect_equal("x3", names(key))
expect_equal(
length(unique(ex_dat$x3)) + 1,
nrow(key$x3)
)
expect_true(sum(key$x3$..level == "..new") == 1)
expect_true(is.numeric(tr_values))
expect_equal(
new_values$x3[1],
key$x3$..value[key$x3$..level == "..new"]
)
expect_equal(
new_values$x3[2],
key$x3$..value[key$x3$..level == levels(ex_dat$x3)[1]]
)
expect_equal(
new_values$x3[3],
key$x3$..value[key$x3$..level == "..new"]
)
expect_equal(
new_values_ch$x3[1],
key$x3$..value[key$x3$..level == "..new"]
)
expect_equal(
new_values_ch$x3[2],
key$x3$..value[key$x3$..level == levels(ex_dat$x3)[1]]
)
expect_equal(
new_values_ch$x3[3],
key$x3$..value[key$x3$..level == "..new"]
)
expect_equal(
td_obj$level,
key$x3$..level
)
expect_equal(
td_obj$value,
key$x3$..value
)
})
test_that("character encoded predictor", {
skip_on_cran()
skip_if_not_installed("rstanarm")
expect_snapshot(
transform = omit_warning("^(The largest R-hat is|Bulk Effective|Tail Effective)"),
class_test <- recipe(x2 ~ ., data = ex_dat_ch) %>%
step_lencode_bayes(x3,
outcome = vars(x2),
verbose = FALSE,
options = opts,
id = "id"
) %>%
prep(
training = ex_dat_ch, retain = TRUE,
options = opts
)
)
tr_values <- bake(class_test, new_data = NULL)$x3
new_values <- bake(class_test, new_data = new_dat_ch)
new_values_fc <- bake(class_test, new_data = new_dat)
key <- class_test$steps[[1]]$mapping
td_obj <- tidy(class_test, number = 1)
expect_equal("x3", names(key))
expect_equal(
length(unique(ex_dat$x3)) + 1,
nrow(key$x3)
)
expect_true(sum(key$x3$..level == "..new") == 1)
expect_true(is.numeric(tr_values))
expect_equal(
new_values$x3[1],
key$x3$..value[key$x3$..level == "..new"]
)
expect_equal(
new_values$x3[2],
key$x3$..value[key$x3$..level == levels(ex_dat$x3)[1]]
)
expect_equal(
new_values$x3[3],
key$x3$..value[key$x3$..level == "..new"]
)
expect_equal(
new_values_fc$x3[1],
key$x3$..value[key$x3$..level == "..new"]
)
expect_equal(
new_values_fc$x3[2],
key$x3$..value[key$x3$..level == levels(ex_dat$x3)[1]]
)
expect_equal(
new_values_fc$x3[3],
key$x3$..value[key$x3$..level == "..new"]
)
expect_equal(
td_obj$level,
key$x3$..level
)
expect_equal(
td_obj$value,
key$x3$..value
)
})
test_that("factor encoded predictor", {
skip_on_cran()
skip_if_not_installed("rstanarm")
expect_snapshot(
transform = omit_warning("^(The largest R-hat is|Bulk Effective|Tail Effective)"),
{
set.seed(8283)
reg_test <- recipe(x1 ~ ., data = ex_dat) %>%
step_lencode_bayes(x3,
outcome = vars(x1),
verbose = FALSE,
options = opts
) %>%
prep(training = ex_dat, retain = TRUE)
}
)
tr_values <- bake(reg_test, new_data = NULL)$x3
new_values <- bake(reg_test, new_data = new_dat)
expect_snapshot(
new_values_ch <- bake(reg_test, new_data = new_dat_ch)
)
td_obj <- tidy(reg_test, number = 1)
key <- reg_test$steps[[1]]$mapping
expect_equal("x3", names(key))
expect_equal(
length(unique(ex_dat$x3)) + 1,
nrow(key$x3)
)
expect_true(sum(key$x3$..level == "..new") == 1)
expect_true(is.numeric(tr_values))
expect_equal(
new_values$x3[1],
key$x3$..value[key$x3$..level == "..new"]
)
expect_equal(
new_values$x3[2],
key$x3$..value[key$x3$..level == levels(ex_dat$x3)[1]]
)
expect_equal(
new_values$x3[3],
key$x3$..value[key$x3$..level == "..new"]
)
expect_equal(
new_values_ch$x3[1],
key$x3$..value[key$x3$..level == "..new"]
)
expect_equal(
new_values_ch$x3[2],
key$x3$..value[key$x3$..level == levels(ex_dat$x3)[1]]
)
expect_equal(
new_values_ch$x3[3],
key$x3$..value[key$x3$..level == "..new"]
)
expect_equal(
td_obj$level,
key$x3$..level
)
expect_equal(
td_obj$value,
key$x3$..value
)
})
test_that("character encoded predictor", {
skip_on_cran()
skip_if_not_installed("rstanarm")
expect_snapshot(
transform = omit_warning("^(The largest R-hat is|Bulk Effective|Tail Effective)"),
{
set.seed(8283)
reg_test <- recipe(x1 ~ ., data = ex_dat_ch) %>%
step_lencode_bayes(x3,
outcome = vars(x1),
verbose = FALSE,
options = opts
) %>%
prep(training = ex_dat_ch, retain = TRUE)
}
)
tr_values <- bake(reg_test, new_data = NULL)$x3
new_values <- bake(reg_test, new_data = new_dat_ch)
new_values_fc <- bake(reg_test, new_data = new_dat)
key <- reg_test$steps[[1]]$mapping
td_obj <- tidy(reg_test, number = 1)
expect_equal("x3", names(key))
expect_equal(
length(unique(ex_dat$x3)) + 1,
nrow(key$x3)
)
expect_true(sum(key$x3$..level == "..new") == 1)
expect_true(is.numeric(tr_values))
expect_equal(
new_values$x3[1],
key$x3$..value[key$x3$..level == "..new"]
)
expect_equal(
new_values$x3[2],
key$x3$..value[key$x3$..level == levels(ex_dat$x3)[1]]
)
expect_equal(
new_values$x3[3],
key$x3$..value[key$x3$..level == "..new"]
)
expect_equal(
new_values_fc$x3[1],
key$x3$..value[key$x3$..level == "..new"]
)
expect_equal(
new_values_fc$x3[2],
key$x3$..value[key$x3$..level == levels(ex_dat$x3)[1]]
)
expect_equal(
new_values_fc$x3[3],
key$x3$..value[key$x3$..level == "..new"]
)
expect_equal(
td_obj$level,
key$x3$..level
)
expect_equal(
td_obj$value,
key$x3$..value
)
})
test_that("Works with passing family ", {
skip_on_cran()
skip_if_not_installed("rstanarm")
ex_dat_poisson <- ex_dat %>%
mutate(outcome = rpois(n(), 5))
expect_snapshot(
transform = omit_warning("^(Bulk Effective|Tail Effective)"),
{
class_test <- recipe(outcome ~ ., data = ex_dat_poisson) %>%
step_lencode_bayes(x3,
outcome = vars(outcome),
verbose = FALSE,
options = c(opts, family = stats::poisson)
) %>%
prep(training = ex_dat_poisson, retain = TRUE)
}
)
tr_values <- bake(class_test, new_data = NULL)$x3
new_values <- bake(class_test, new_data = new_dat)
expect_snapshot(
new_values_ch <- bake(class_test, new_data = new_dat_ch)
)
key <- class_test$steps[[1]]$mapping
td_obj <- tidy(class_test, number = 1)
expect_equal("x3", names(key))
expect_equal(
length(unique(ex_dat$x3)) + 1,
nrow(key$x3)
)
expect_true(sum(key$x3$..level == "..new") == 1)
expect_true(is.numeric(tr_values))
expect_equal(
new_values$x3[1],
key$x3$..value[key$x3$..level == "..new"]
)
expect_equal(
new_values$x3[2],
key$x3$..value[key$x3$..level == levels(ex_dat$x3)[1]]
)
expect_equal(
new_values$x3[3],
key$x3$..value[key$x3$..level == "..new"]
)
expect_equal(
new_values_ch$x3[1],
key$x3$..value[key$x3$..level == "..new"]
)
expect_equal(
new_values_ch$x3[2],
key$x3$..value[key$x3$..level == levels(ex_dat$x3)[1]]
)
expect_equal(
new_values_ch$x3[3],
key$x3$..value[key$x3$..level == "..new"]
)
expect_equal(
td_obj$level,
key$x3$..level
)
expect_equal(
td_obj$value,
key$x3$..value
)
})
test_that("case weights", {
skip_on_cran()
skip_if_not_installed("rstanarm")
wts_int <- rep(c(0, 1), times = c(100, 400))
ex_dat_cw <- ex_dat %>%
mutate(wts = importance_weights(wts_int))
expect_snapshot(
transform = omit_warning("^^(Bulk Effective|Tail Effective|The largest)"),
{
class_test <- recipe(x2 ~ ., data = ex_dat_cw) %>%
step_lencode_bayes(x3,
outcome = vars(x2),
verbose = FALSE,
options = opts
) %>%
prep(training = ex_dat_cw, retain = TRUE)
junk <- capture.output(
ref_mod <- rstanarm::stan_glmer(
formula = x2 ~ (1 | value),
data = ex_dat_cw %>% transmute(value = x3, x2),
family = binomial(),
na.action = na.omit,
seed = 34677,
chains = 2,
iter = 500,
weights = wts_int,
)
)
}
)
expect_equal(
-coef(ref_mod)$value[[1]],
slice_head(class_test$steps[[1]]$mapping$x3, n = -1)$..value
)
expect_snapshot(class_test)
})
# Infrastructure ---------------------------------------------------------------
test_that("bake method errors when needed non-standard role columns are missing", {
skip_if_not_installed("rstanarm")
rec <- recipe(x2 ~ ., data = ex_dat) %>%
step_lencode_bayes(x3, outcome = vars(x2)) %>%
update_role(x3, new_role = "potato") %>%
update_role_requirements(role = "potato", bake = FALSE)
rec_trained <- prep(rec, training = ex_dat, verbose = FALSE)
expect_error(
bake(rec_trained, new_data = ex_dat[, -3]),
class = "new_data_missing_column"
)
})
test_that("empty printing", {
rec <- recipe(mpg ~ ., mtcars)
rec <- step_lencode_bayes(rec, outcome = vars(mpg))
expect_snapshot(rec)
rec <- prep(rec, mtcars)
expect_snapshot(rec)
})
test_that("empty selection prep/bake is a no-op", {
rec1 <- recipe(mpg ~ ., mtcars)
rec2 <- step_lencode_bayes(rec1, outcome = vars(mpg))
rec1 <- prep(rec1, mtcars)
rec2 <- prep(rec2, mtcars)
baked1 <- bake(rec1, mtcars)
baked2 <- bake(rec2, mtcars)
expect_identical(baked1, baked2)
})
test_that("empty selection tidy method works", {
rec <- recipe(mpg ~ ., mtcars)
rec <- step_lencode_bayes(rec, outcome = vars(mpg))
expect <- tibble(
terms = character(),
level = character(),
value = double(),
id = character()
)
expect_identical(tidy(rec, number = 1), expect)
rec <- prep(rec, mtcars)
expect_identical(tidy(rec, number = 1), expect)
})
test_that("printing", {
skip_if_not_installed("rstanarm")
rec <- recipe(x2 ~ ., data = ex_dat) %>%
step_lencode_bayes(x3,
outcome = vars(x2),
verbose = FALSE,
options = opts
)
expect_snapshot(print(rec))
expect_snapshot(
prep(rec),
transform = omit_warning("^(Bulk Effective|Tail Effective|The largest)")
)
})
| /tests/testthat/test-lencode_bayes.R | permissive | tidymodels/embed | R | false | false | 11,307 | r | source(testthat::test_path("make_example_data.R"))
source(testthat::test_path("test-helpers.R"))
opts <- list(seed = 34677, chains = 2, iter = 500)
omit_warning <- function(pattern) {
function(x) {
if (any(grepl(pattern, x))) {
return(NULL)
}
x
}
}
test_that("factor encoded predictor", {
skip_on_cran()
skip_if_not_installed("rstanarm")
expect_snapshot(
transform = omit_warning("^(The largest R-hat is|Bulk Effective|Tail Effective)"),
{
class_test <- recipe(x2 ~ ., data = ex_dat) %>%
step_lencode_bayes(x3,
outcome = vars(x2),
verbose = FALSE,
options = opts
) %>%
prep(training = ex_dat, retain = TRUE)
}
)
tr_values <- bake(class_test, new_data = NULL)$x3
new_values <- bake(class_test, new_data = new_dat)
expect_snapshot(
new_values_ch <- bake(class_test, new_data = new_dat_ch)
)
key <- class_test$steps[[1]]$mapping
td_obj <- tidy(class_test, number = 1)
expect_equal("x3", names(key))
expect_equal(
length(unique(ex_dat$x3)) + 1,
nrow(key$x3)
)
expect_true(sum(key$x3$..level == "..new") == 1)
expect_true(is.numeric(tr_values))
expect_equal(
new_values$x3[1],
key$x3$..value[key$x3$..level == "..new"]
)
expect_equal(
new_values$x3[2],
key$x3$..value[key$x3$..level == levels(ex_dat$x3)[1]]
)
expect_equal(
new_values$x3[3],
key$x3$..value[key$x3$..level == "..new"]
)
expect_equal(
new_values_ch$x3[1],
key$x3$..value[key$x3$..level == "..new"]
)
expect_equal(
new_values_ch$x3[2],
key$x3$..value[key$x3$..level == levels(ex_dat$x3)[1]]
)
expect_equal(
new_values_ch$x3[3],
key$x3$..value[key$x3$..level == "..new"]
)
expect_equal(
td_obj$level,
key$x3$..level
)
expect_equal(
td_obj$value,
key$x3$..value
)
})
test_that("character encoded predictor", {
skip_on_cran()
skip_if_not_installed("rstanarm")
expect_snapshot(
transform = omit_warning("^(The largest R-hat is|Bulk Effective|Tail Effective)"),
class_test <- recipe(x2 ~ ., data = ex_dat_ch) %>%
step_lencode_bayes(x3,
outcome = vars(x2),
verbose = FALSE,
options = opts,
id = "id"
) %>%
prep(
training = ex_dat_ch, retain = TRUE,
options = opts
)
)
tr_values <- bake(class_test, new_data = NULL)$x3
new_values <- bake(class_test, new_data = new_dat_ch)
new_values_fc <- bake(class_test, new_data = new_dat)
key <- class_test$steps[[1]]$mapping
td_obj <- tidy(class_test, number = 1)
expect_equal("x3", names(key))
expect_equal(
length(unique(ex_dat$x3)) + 1,
nrow(key$x3)
)
expect_true(sum(key$x3$..level == "..new") == 1)
expect_true(is.numeric(tr_values))
expect_equal(
new_values$x3[1],
key$x3$..value[key$x3$..level == "..new"]
)
expect_equal(
new_values$x3[2],
key$x3$..value[key$x3$..level == levels(ex_dat$x3)[1]]
)
expect_equal(
new_values$x3[3],
key$x3$..value[key$x3$..level == "..new"]
)
expect_equal(
new_values_fc$x3[1],
key$x3$..value[key$x3$..level == "..new"]
)
expect_equal(
new_values_fc$x3[2],
key$x3$..value[key$x3$..level == levels(ex_dat$x3)[1]]
)
expect_equal(
new_values_fc$x3[3],
key$x3$..value[key$x3$..level == "..new"]
)
expect_equal(
td_obj$level,
key$x3$..level
)
expect_equal(
td_obj$value,
key$x3$..value
)
})
test_that("factor encoded predictor", {
skip_on_cran()
skip_if_not_installed("rstanarm")
expect_snapshot(
transform = omit_warning("^(The largest R-hat is|Bulk Effective|Tail Effective)"),
{
set.seed(8283)
reg_test <- recipe(x1 ~ ., data = ex_dat) %>%
step_lencode_bayes(x3,
outcome = vars(x1),
verbose = FALSE,
options = opts
) %>%
prep(training = ex_dat, retain = TRUE)
}
)
tr_values <- bake(reg_test, new_data = NULL)$x3
new_values <- bake(reg_test, new_data = new_dat)
expect_snapshot(
new_values_ch <- bake(reg_test, new_data = new_dat_ch)
)
td_obj <- tidy(reg_test, number = 1)
key <- reg_test$steps[[1]]$mapping
expect_equal("x3", names(key))
expect_equal(
length(unique(ex_dat$x3)) + 1,
nrow(key$x3)
)
expect_true(sum(key$x3$..level == "..new") == 1)
expect_true(is.numeric(tr_values))
expect_equal(
new_values$x3[1],
key$x3$..value[key$x3$..level == "..new"]
)
expect_equal(
new_values$x3[2],
key$x3$..value[key$x3$..level == levels(ex_dat$x3)[1]]
)
expect_equal(
new_values$x3[3],
key$x3$..value[key$x3$..level == "..new"]
)
expect_equal(
new_values_ch$x3[1],
key$x3$..value[key$x3$..level == "..new"]
)
expect_equal(
new_values_ch$x3[2],
key$x3$..value[key$x3$..level == levels(ex_dat$x3)[1]]
)
expect_equal(
new_values_ch$x3[3],
key$x3$..value[key$x3$..level == "..new"]
)
expect_equal(
td_obj$level,
key$x3$..level
)
expect_equal(
td_obj$value,
key$x3$..value
)
})
test_that("character encoded predictor", {
skip_on_cran()
skip_if_not_installed("rstanarm")
expect_snapshot(
transform = omit_warning("^(The largest R-hat is|Bulk Effective|Tail Effective)"),
{
set.seed(8283)
reg_test <- recipe(x1 ~ ., data = ex_dat_ch) %>%
step_lencode_bayes(x3,
outcome = vars(x1),
verbose = FALSE,
options = opts
) %>%
prep(training = ex_dat_ch, retain = TRUE)
}
)
tr_values <- bake(reg_test, new_data = NULL)$x3
new_values <- bake(reg_test, new_data = new_dat_ch)
new_values_fc <- bake(reg_test, new_data = new_dat)
key <- reg_test$steps[[1]]$mapping
td_obj <- tidy(reg_test, number = 1)
expect_equal("x3", names(key))
expect_equal(
length(unique(ex_dat$x3)) + 1,
nrow(key$x3)
)
expect_true(sum(key$x3$..level == "..new") == 1)
expect_true(is.numeric(tr_values))
expect_equal(
new_values$x3[1],
key$x3$..value[key$x3$..level == "..new"]
)
expect_equal(
new_values$x3[2],
key$x3$..value[key$x3$..level == levels(ex_dat$x3)[1]]
)
expect_equal(
new_values$x3[3],
key$x3$..value[key$x3$..level == "..new"]
)
expect_equal(
new_values_fc$x3[1],
key$x3$..value[key$x3$..level == "..new"]
)
expect_equal(
new_values_fc$x3[2],
key$x3$..value[key$x3$..level == levels(ex_dat$x3)[1]]
)
expect_equal(
new_values_fc$x3[3],
key$x3$..value[key$x3$..level == "..new"]
)
expect_equal(
td_obj$level,
key$x3$..level
)
expect_equal(
td_obj$value,
key$x3$..value
)
})
test_that("Works with passing family ", {
skip_on_cran()
skip_if_not_installed("rstanarm")
ex_dat_poisson <- ex_dat %>%
mutate(outcome = rpois(n(), 5))
expect_snapshot(
transform = omit_warning("^(Bulk Effective|Tail Effective)"),
{
class_test <- recipe(outcome ~ ., data = ex_dat_poisson) %>%
step_lencode_bayes(x3,
outcome = vars(outcome),
verbose = FALSE,
options = c(opts, family = stats::poisson)
) %>%
prep(training = ex_dat_poisson, retain = TRUE)
}
)
tr_values <- bake(class_test, new_data = NULL)$x3
new_values <- bake(class_test, new_data = new_dat)
expect_snapshot(
new_values_ch <- bake(class_test, new_data = new_dat_ch)
)
key <- class_test$steps[[1]]$mapping
td_obj <- tidy(class_test, number = 1)
expect_equal("x3", names(key))
expect_equal(
length(unique(ex_dat$x3)) + 1,
nrow(key$x3)
)
expect_true(sum(key$x3$..level == "..new") == 1)
expect_true(is.numeric(tr_values))
expect_equal(
new_values$x3[1],
key$x3$..value[key$x3$..level == "..new"]
)
expect_equal(
new_values$x3[2],
key$x3$..value[key$x3$..level == levels(ex_dat$x3)[1]]
)
expect_equal(
new_values$x3[3],
key$x3$..value[key$x3$..level == "..new"]
)
expect_equal(
new_values_ch$x3[1],
key$x3$..value[key$x3$..level == "..new"]
)
expect_equal(
new_values_ch$x3[2],
key$x3$..value[key$x3$..level == levels(ex_dat$x3)[1]]
)
expect_equal(
new_values_ch$x3[3],
key$x3$..value[key$x3$..level == "..new"]
)
expect_equal(
td_obj$level,
key$x3$..level
)
expect_equal(
td_obj$value,
key$x3$..value
)
})
test_that("case weights", {
skip_on_cran()
skip_if_not_installed("rstanarm")
wts_int <- rep(c(0, 1), times = c(100, 400))
ex_dat_cw <- ex_dat %>%
mutate(wts = importance_weights(wts_int))
expect_snapshot(
transform = omit_warning("^^(Bulk Effective|Tail Effective|The largest)"),
{
class_test <- recipe(x2 ~ ., data = ex_dat_cw) %>%
step_lencode_bayes(x3,
outcome = vars(x2),
verbose = FALSE,
options = opts
) %>%
prep(training = ex_dat_cw, retain = TRUE)
junk <- capture.output(
ref_mod <- rstanarm::stan_glmer(
formula = x2 ~ (1 | value),
data = ex_dat_cw %>% transmute(value = x3, x2),
family = binomial(),
na.action = na.omit,
seed = 34677,
chains = 2,
iter = 500,
weights = wts_int,
)
)
}
)
expect_equal(
-coef(ref_mod)$value[[1]],
slice_head(class_test$steps[[1]]$mapping$x3, n = -1)$..value
)
expect_snapshot(class_test)
})
# Infrastructure ---------------------------------------------------------------
test_that("bake method errors when needed non-standard role columns are missing", {
skip_if_not_installed("rstanarm")
rec <- recipe(x2 ~ ., data = ex_dat) %>%
step_lencode_bayes(x3, outcome = vars(x2)) %>%
update_role(x3, new_role = "potato") %>%
update_role_requirements(role = "potato", bake = FALSE)
rec_trained <- prep(rec, training = ex_dat, verbose = FALSE)
expect_error(
bake(rec_trained, new_data = ex_dat[, -3]),
class = "new_data_missing_column"
)
})
test_that("empty printing", {
rec <- recipe(mpg ~ ., mtcars)
rec <- step_lencode_bayes(rec, outcome = vars(mpg))
expect_snapshot(rec)
rec <- prep(rec, mtcars)
expect_snapshot(rec)
})
test_that("empty selection prep/bake is a no-op", {
rec1 <- recipe(mpg ~ ., mtcars)
rec2 <- step_lencode_bayes(rec1, outcome = vars(mpg))
rec1 <- prep(rec1, mtcars)
rec2 <- prep(rec2, mtcars)
baked1 <- bake(rec1, mtcars)
baked2 <- bake(rec2, mtcars)
expect_identical(baked1, baked2)
})
test_that("empty selection tidy method works", {
rec <- recipe(mpg ~ ., mtcars)
rec <- step_lencode_bayes(rec, outcome = vars(mpg))
expect <- tibble(
terms = character(),
level = character(),
value = double(),
id = character()
)
expect_identical(tidy(rec, number = 1), expect)
rec <- prep(rec, mtcars)
expect_identical(tidy(rec, number = 1), expect)
})
test_that("printing", {
skip_if_not_installed("rstanarm")
rec <- recipe(x2 ~ ., data = ex_dat) %>%
step_lencode_bayes(x3,
outcome = vars(x2),
verbose = FALSE,
options = opts
)
expect_snapshot(print(rec))
expect_snapshot(
prep(rec),
transform = omit_warning("^(Bulk Effective|Tail Effective|The largest)")
)
})
|
# October 26, 2018
#' Class \code{Surrogate}
#'
#' General class for surrogate objects.
#'
#' @name Surrogate-class
#'
#' @keywords internal
#'
#' @slot we included to avoid VIRTUAL designation
setClass(Class = "Surrogate", slots = c("we" = "ANY"))
##########
## GENERICS
##########
#' @rdname DynTxRegime-internal-api
setGeneric(name = ".dPhiFunc",
def = function(surrogate, ...) { standardGeneric(f = ".dPhiFunc") })
#' @rdname DynTxRegime-internal-api
setGeneric(name = ".optim",
def = function(surrogate, ...) { standardGeneric(f = ".optim") })
#' @rdname DynTxRegime-internal-api
setGeneric(name = ".phiFunc",
def = function(surrogate, ...) { standardGeneric(f = ".phiFunc") })
##########
## METHODS
##########
#' Methods Available for Objects of Class \code{Surrogate}
#'
#' @name Surrogate-methods
#'
#' @keywords internal
NULL
#' \code{optim}
#' optimize objective function
#'
#' Utilizes stats::optim to obtain parameter estimates. Requires that
#' the objective function and its derivative are defined by the
#' calling learning method. Returns NULL if optimization is not successful
#' due to problems;
#' a vector of the current parameter estimates if optimization is not
#' successful because it hit the maximum number if iterations; and
#' the list object returned by stats::optim if optimization is successful
#'
#' @rdname Surrogate-methods
#' @importFrom stats optim
setMethod(f = ".optim",
signature = c(surrogate = "Surrogate"),
definition = function(surrogate,
par,
lambda,
fn,
gr,
suppress, ...) {
# determine if additional arguments to stats::optim were
# provided by the user
argList <- list(...)
# modify the print control based on suppress if not provided
# through elipsis
if ("control" %in% names(x = argList)) {
if (!{"trace" %in% names(x = argList[[ "control" ]])}) {
argList[[ "control" ]][[ "trace" ]] <- !suppress
}
} else {
argList[[ "control" ]] <- list("trace" = !suppress)
}
# set additional inputs for stats::optim
argList[[ "fn" ]] <- fn
argList[[ "gr" ]] <- gr
argList[[ "method" ]] <- "BFGS"
argList[[ "par" ]] <- par
argList[[ "lambda" ]] <- lambda
# call stats::optim
test <- do.call(what = stats::optim, args = argList)
# if stats::optim did not converge due to maximum iterations
# return current parameter estimates
if (test$convergence == 1L) {
return( test$par )
}
# if stats::optim did not converge for other reasonse, return
# NULL
if (test$convergence != 0L) {
cat("stats::optim() did not converge", test$convergence, "\n")
return( NULL )
}
# if stats::optim converged, return list object returned by
# stats::optim
return( test )
})
| /R/L_Surrogate.R | no_license | cran/DynTxRegime | R | false | false | 3,421 | r | # October 26, 2018
#' Class \code{Surrogate}
#'
#' General class for surrogate objects.
#'
#' @name Surrogate-class
#'
#' @keywords internal
#'
#' @slot we included to avoid VIRTUAL designation
setClass(Class = "Surrogate", slots = c("we" = "ANY"))
##########
## GENERICS
##########
#' @rdname DynTxRegime-internal-api
setGeneric(name = ".dPhiFunc",
def = function(surrogate, ...) { standardGeneric(f = ".dPhiFunc") })
#' @rdname DynTxRegime-internal-api
setGeneric(name = ".optim",
def = function(surrogate, ...) { standardGeneric(f = ".optim") })
#' @rdname DynTxRegime-internal-api
setGeneric(name = ".phiFunc",
def = function(surrogate, ...) { standardGeneric(f = ".phiFunc") })
##########
## METHODS
##########
#' Methods Available for Objects of Class \code{Surrogate}
#'
#' @name Surrogate-methods
#'
#' @keywords internal
NULL
#' \code{optim}
#' optimize objective function
#'
#' Utilizes stats::optim to obtain parameter estimates. Requires that
#' the objective function and its derivative are defined by the
#' calling learning method. Returns NULL if optimization is not successful
#' due to problems;
#' a vector of the current parameter estimates if optimization is not
#' successful because it hit the maximum number if iterations; and
#' the list object returned by stats::optim if optimization is successful
#'
#' @rdname Surrogate-methods
#' @importFrom stats optim
setMethod(f = ".optim",
signature = c(surrogate = "Surrogate"),
definition = function(surrogate,
par,
lambda,
fn,
gr,
suppress, ...) {
# determine if additional arguments to stats::optim were
# provided by the user
argList <- list(...)
# modify the print control based on suppress if not provided
# through elipsis
if ("control" %in% names(x = argList)) {
if (!{"trace" %in% names(x = argList[[ "control" ]])}) {
argList[[ "control" ]][[ "trace" ]] <- !suppress
}
} else {
argList[[ "control" ]] <- list("trace" = !suppress)
}
# set additional inputs for stats::optim
argList[[ "fn" ]] <- fn
argList[[ "gr" ]] <- gr
argList[[ "method" ]] <- "BFGS"
argList[[ "par" ]] <- par
argList[[ "lambda" ]] <- lambda
# call stats::optim
test <- do.call(what = stats::optim, args = argList)
# if stats::optim did not converge due to maximum iterations
# return current parameter estimates
if (test$convergence == 1L) {
return( test$par )
}
# if stats::optim did not converge for other reasonse, return
# NULL
if (test$convergence != 0L) {
cat("stats::optim() did not converge", test$convergence, "\n")
return( NULL )
}
# if stats::optim converged, return list object returned by
# stats::optim
return( test )
})
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/text_learner.R
\name{TextLearner_load_pretrained}
\alias{TextLearner_load_pretrained}
\title{Load_pretrained}
\usage{
TextLearner_load_pretrained(wgts_fname, vocab_fname, model = NULL)
}
\arguments{
\item{wgts_fname}{wgts_fname}
\item{vocab_fname}{vocab_fname}
\item{model}{model}
}
\value{
None
}
\description{
Load a pretrained model and adapt it to the data vocabulary.
}
| /man/TextLearner_load_pretrained.Rd | permissive | Cdk29/fastai | R | false | true | 455 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/text_learner.R
\name{TextLearner_load_pretrained}
\alias{TextLearner_load_pretrained}
\title{Load_pretrained}
\usage{
TextLearner_load_pretrained(wgts_fname, vocab_fname, model = NULL)
}
\arguments{
\item{wgts_fname}{wgts_fname}
\item{vocab_fname}{vocab_fname}
\item{model}{model}
}
\value{
None
}
\description{
Load a pretrained model and adapt it to the data vocabulary.
}
|
remove(list = ls())
library('dplyr')
library('ggplot2')
source('./utils.R')
setwd('/home/nischal/nischalshakya15.github.io/work-related-learning')
# R code for Java Programming One
javaProgrammingOneDf <- read.csv('data-sets/JavaProgrammingOne.csv')
colnames(javaProgrammingOneDf)
# Give meaning ful row names
javaProgrammingOneDf <- javaProgrammingOneDf %>% rename(
Tutorial.Full.Marks = Full.Marks, Tutorial.Marks.Obtained = Marks.Obtained,
Group.Project.Full.Marks = Full.Marks.1, Group.Project.Obtained.Marks = Marks.Obtained.1,
Assignment.Full.Marks = Full.Marks.2, Assignment.Marks.Obtained = Marks.Obtained.2,
Mid.Term.Full.Marks = Full.Marks.3, Mid.Term.Marks.Obtained = Marks.Obtained.3, Mid.Term.FifteenPercent.Marks = Marks.Obtained.4,
Lab.Test.Full.Marks = Full.Marks.4, Lab.Test.Marks.Obtained = Marks.Obtained.5, Lab.Test.FifteenPercent.Marks = Marks.Obtained.6,
FinalExam.Full.Marks = Full.Marks.5, Final.Exam.Marks.Obtained = Marks.Obtained.7, Final.Exam.FortyPercent.Marks = Marks.Obtained..40.)
colnames(javaProgrammingOneDf)
# Since, tutorial marks and group project marks is not used for final calculation of internal marks we can drop it from dataframe
javaProgrammingOneDf <- select(javaProgrammingOneDf, -c(Tutorial.Full.Marks, Tutorial.Marks.Obtained, Group.Project.Full.Marks, Group.Project.Obtained.Marks))
# filter unique grade
uniqueGradeJavaProgrammingOne <- getUniqueAttribute(javaProgrammingOneDf$Grade)
print(uniqueGradeJavaProgrammingOne)
javaProgrammingOneCountDf <- data.frame()
str(javaProgrammingOneDf)
for (u in uniqueGradeJavaProgrammingOne) {
javaProgrammingOneCountDf <- rbind(javaProgrammingOneCountDf,
data.frame(
NoOfStudent = javaProgrammingOneDf %>% filter(Grade == u) %>% count(Grade)
))
}
#Sort the count in ascending order on basics of grade
javaProgrammingOneCountDf <- sortInAscendingOrder(javaProgrammingOneCountDf, javaProgrammingOneCountDf$NoOfStudent.n)
plotBarGraph(df = javaProgrammingOneCountDf, x = javaProgrammingOneCountDf$NoOfStudent.Grade,
y = javaProgrammingOneCountDf$NoOfStudent.n, label = javaProgrammingOneCountDf$NoOfStudent.n,
title = 'No of student on basics of grade',
xlab = 'Grade', ylab = 'Total No of Student')
# find top 5 student
javaProgrammingOneTopFive <- findTopTen(javaProgrammingOneDf, javaProgrammingOneDf$Grade.Point, top = 5)
# Sort on basics of grade point
javaProgrammingOneTopFive <- sortInAscendingOrder(javaProgrammingOneTopFive, javaProgrammingOneTopFive$Grade.Point)
# Total number of student attending exam
totalStudentInJavaOne <- javaProgrammingOneCountDf %>% select(NoOfStudent.n) %>% sum()
print(totalStudentInJavaOne)
# Genearate summary
summary(javaProgrammingOneDf$Total)
# Find out standard deviation
sd(javaProgrammingOneDf$Total)
# R code for java programming one ends
# R code for java programming two
javaTwoDf <- read.csv('data-sets/JavaProgrammingTwo.csv')
colnames(javaTwoDf)
javaTwoDf <- javaTwoDf %>% rename(
Mid.Term.Full.Marks = Full.Marks, Mid.Term.Marks.Obtained = Score, Mid.Term.Fifteen.Percent.Marks = Marks.Obtained..15..,
Lab.Test.Full.Marks = Full.Marks.1, Lab.Test.Marks.Obtained = Marks.Obtained..15...1,
Assignment.Full.Marks = Full.Marks.2, Assignment.Marks.Obtained = Marks.Obtained..25..,
Tutorial.Full.Marks = Full.Marks.3, Tutorial.Marks.Obtained = Marks.Obtained..5..,
Internal.Marks = Internal.Marks..60..,
Final.Exam.Full.Marks = Full.Marks.4, Final.Exam.Marks.Obtained = Score.1, Final.Exam.FortyPercent.Marks = Marks.Obtained..40..,
Total = Total..100..,
Grade = Grade.after.40..rule
)
colnames(javaTwoDf)
# filter unique grade
uniqueGradeJavaTwo <- getUniqueAttribute(javaTwoDf$Grade)
print(uniqueGradeJavaTwo)
javaTwoCountDf <- data.frame()
str(javaTwoDf)
for (u in uniqueGradeJavaTwo) {
javaTwoCountDf <- rbind(javaTwoCountDf,
data.frame(
NoOfStudent = javaTwoDf %>% filter(Grade == u) %>% count(Grade)
))
}
#Sort the count in ascending order on basics of grade
javaTwoCountDf <- sortInAscendingOrder(javaTwoCountDf, javaTwoCountDf$NoOfStudent.n)
plotBarGraph(df = javaTwoCountDf, x = javaTwoCountDf$NoOfStudent.Grade,
y = javaTwoCountDf$NoOfStudent.n, label = javaTwoCountDf$NoOfStudent.n,
title = 'No of student on basics of grade',
xlab = 'Grade', ylab = 'Total No of Student')
# find top 5 student
javaTwoTopFive <- findTopTen(javaTwoDf, javaTwoDf$Grade.Point, top = 5)
# Sort on basics of grade point
javaTwoTopFive <- sortInAscendingOrder(javaTwoTopFive, javaTwoTopFive$Grade.Point)
# Total number of student attending exam
totalStudentInJavaTwo <- javaTwoCountDf %>% select(NoOfStudent.n) %>% sum()
print(totalStudentInJavaTwo)
# Genearate summary
summary(javaTwoDf$Total)
# Find out standard deviation
sd(javaTwoDf$Total)
# load csv file into data frame
javaWebDf <- read.csv('data-sets/JavaWebProgramming.csv')
colnames(javaWebDf)
javaWebDf <- javaWebDf %>% rename(
Mid.Term.Full.Marks = Full.Marks, Mid.Term.Marks.Obtained = Score, Mid.Term.Twenty.Percent.Marks = Marks.Obtained..20..,
Individual.Assignment.Full.Marks = Full.Marks.1, Individual.Assignment.Marks.Obtained = Assignment.Score..20.,
Group.Assignment.Full.Marks = Full.Marks.2, Group.Assignment.Marks.Obtained = Group.Proj..Score..20.,
Internal.Marks = Internal.Marks..60..,
Final.Exam.Full.Marks = Full.Marks.3, Final.Exam.Marks.Obtained = Score.1, Final.Exam.FortyPercent.Marks=Marks.Obtained..40..,
Total = Total..100..,
Grade = Grade.after.40..rule
)
colnames(javaWebDf)
# filter unique grade
uniqueGradeJavaWeb <- getUniqueAttribute(javaWebDf$Grade)
print(uniqueGradeJavaWeb)
javaWebCountDf <- data.frame()
str(javaWebDf)
for (u in uniqueGradeJavaWeb) {
javaWebCountDf <- rbind(javaWebCountDf,
data.frame(
NoOfStudent = javaWebDf %>% filter(Grade == u) %>% count(Grade)
))
}
#Sort the count in ascending order on basics of grade
javaWebCountDf <- sortInAscendingOrder(javaWebCountDf, javaWebCountDf$NoOfStudent.n)
plotBarGraph(df = javaWebCountDf, x = javaWebCountDf$NoOfStudent.Grade,
y = javaWebCountDf$NoOfStudent.n, label = javaWebCountDf$NoOfStudent.n,
title = 'No of student on basics of grade',
xlab = 'Grade', ylab = 'Total No of Student')
# find top 5 student
javaWebTopFive <- findTopTen(javaWebDf, javaWebDf$Grade.Point, top = 5)
# Sort on basics of grade point
javaWebTopFive <- sortInAscendingOrder(javaWebTopFive, javaWebTopFive$Grade.Point)
# Total number of student attending exam
totalStudentInJavaWeb <- javaWebCountDf %>% select(NoOfStudent.n) %>% sum()
print(totalStudentInJavaWeb)
# Genearate summary
summary(javaWebDf$Total)
# Find out standard deviation
sd(javaWebDf$Total) | /work-related-learning/data-sets/work-related-learning.R | no_license | nischalshakya15/nischalshakya15.github.io | R | false | false | 7,317 | r | remove(list = ls())
library('dplyr')
library('ggplot2')
source('./utils.R')
setwd('/home/nischal/nischalshakya15.github.io/work-related-learning')
# R code for Java Programming One
javaProgrammingOneDf <- read.csv('data-sets/JavaProgrammingOne.csv')
colnames(javaProgrammingOneDf)
# Give meaning ful row names
javaProgrammingOneDf <- javaProgrammingOneDf %>% rename(
Tutorial.Full.Marks = Full.Marks, Tutorial.Marks.Obtained = Marks.Obtained,
Group.Project.Full.Marks = Full.Marks.1, Group.Project.Obtained.Marks = Marks.Obtained.1,
Assignment.Full.Marks = Full.Marks.2, Assignment.Marks.Obtained = Marks.Obtained.2,
Mid.Term.Full.Marks = Full.Marks.3, Mid.Term.Marks.Obtained = Marks.Obtained.3, Mid.Term.FifteenPercent.Marks = Marks.Obtained.4,
Lab.Test.Full.Marks = Full.Marks.4, Lab.Test.Marks.Obtained = Marks.Obtained.5, Lab.Test.FifteenPercent.Marks = Marks.Obtained.6,
FinalExam.Full.Marks = Full.Marks.5, Final.Exam.Marks.Obtained = Marks.Obtained.7, Final.Exam.FortyPercent.Marks = Marks.Obtained..40.)
colnames(javaProgrammingOneDf)
# Since, tutorial marks and group project marks is not used for final calculation of internal marks we can drop it from dataframe
javaProgrammingOneDf <- select(javaProgrammingOneDf, -c(Tutorial.Full.Marks, Tutorial.Marks.Obtained, Group.Project.Full.Marks, Group.Project.Obtained.Marks))
# filter unique grade
uniqueGradeJavaProgrammingOne <- getUniqueAttribute(javaProgrammingOneDf$Grade)
print(uniqueGradeJavaProgrammingOne)
javaProgrammingOneCountDf <- data.frame()
str(javaProgrammingOneDf)
for (u in uniqueGradeJavaProgrammingOne) {
javaProgrammingOneCountDf <- rbind(javaProgrammingOneCountDf,
data.frame(
NoOfStudent = javaProgrammingOneDf %>% filter(Grade == u) %>% count(Grade)
))
}
#Sort the count in ascending order on basics of grade
javaProgrammingOneCountDf <- sortInAscendingOrder(javaProgrammingOneCountDf, javaProgrammingOneCountDf$NoOfStudent.n)
plotBarGraph(df = javaProgrammingOneCountDf, x = javaProgrammingOneCountDf$NoOfStudent.Grade,
y = javaProgrammingOneCountDf$NoOfStudent.n, label = javaProgrammingOneCountDf$NoOfStudent.n,
title = 'No of student on basics of grade',
xlab = 'Grade', ylab = 'Total No of Student')
# find top 5 student
javaProgrammingOneTopFive <- findTopTen(javaProgrammingOneDf, javaProgrammingOneDf$Grade.Point, top = 5)
# Sort on basics of grade point
javaProgrammingOneTopFive <- sortInAscendingOrder(javaProgrammingOneTopFive, javaProgrammingOneTopFive$Grade.Point)
# Total number of student attending exam
totalStudentInJavaOne <- javaProgrammingOneCountDf %>% select(NoOfStudent.n) %>% sum()
print(totalStudentInJavaOne)
# Genearate summary
summary(javaProgrammingOneDf$Total)
# Find out standard deviation
sd(javaProgrammingOneDf$Total)
# R code for java programming one ends
# R code for java programming two
javaTwoDf <- read.csv('data-sets/JavaProgrammingTwo.csv')
colnames(javaTwoDf)
javaTwoDf <- javaTwoDf %>% rename(
Mid.Term.Full.Marks = Full.Marks, Mid.Term.Marks.Obtained = Score, Mid.Term.Fifteen.Percent.Marks = Marks.Obtained..15..,
Lab.Test.Full.Marks = Full.Marks.1, Lab.Test.Marks.Obtained = Marks.Obtained..15...1,
Assignment.Full.Marks = Full.Marks.2, Assignment.Marks.Obtained = Marks.Obtained..25..,
Tutorial.Full.Marks = Full.Marks.3, Tutorial.Marks.Obtained = Marks.Obtained..5..,
Internal.Marks = Internal.Marks..60..,
Final.Exam.Full.Marks = Full.Marks.4, Final.Exam.Marks.Obtained = Score.1, Final.Exam.FortyPercent.Marks = Marks.Obtained..40..,
Total = Total..100..,
Grade = Grade.after.40..rule
)
colnames(javaTwoDf)
# filter unique grade
uniqueGradeJavaTwo <- getUniqueAttribute(javaTwoDf$Grade)
print(uniqueGradeJavaTwo)
javaTwoCountDf <- data.frame()
str(javaTwoDf)
for (u in uniqueGradeJavaTwo) {
javaTwoCountDf <- rbind(javaTwoCountDf,
data.frame(
NoOfStudent = javaTwoDf %>% filter(Grade == u) %>% count(Grade)
))
}
#Sort the count in ascending order on basics of grade
javaTwoCountDf <- sortInAscendingOrder(javaTwoCountDf, javaTwoCountDf$NoOfStudent.n)
plotBarGraph(df = javaTwoCountDf, x = javaTwoCountDf$NoOfStudent.Grade,
y = javaTwoCountDf$NoOfStudent.n, label = javaTwoCountDf$NoOfStudent.n,
title = 'No of student on basics of grade',
xlab = 'Grade', ylab = 'Total No of Student')
# find top 5 student
javaTwoTopFive <- findTopTen(javaTwoDf, javaTwoDf$Grade.Point, top = 5)
# Sort on basics of grade point
javaTwoTopFive <- sortInAscendingOrder(javaTwoTopFive, javaTwoTopFive$Grade.Point)
# Total number of student attending exam
totalStudentInJavaTwo <- javaTwoCountDf %>% select(NoOfStudent.n) %>% sum()
print(totalStudentInJavaTwo)
# Genearate summary
summary(javaTwoDf$Total)
# Find out standard deviation
sd(javaTwoDf$Total)
# load csv file into data frame
javaWebDf <- read.csv('data-sets/JavaWebProgramming.csv')
colnames(javaWebDf)
javaWebDf <- javaWebDf %>% rename(
Mid.Term.Full.Marks = Full.Marks, Mid.Term.Marks.Obtained = Score, Mid.Term.Twenty.Percent.Marks = Marks.Obtained..20..,
Individual.Assignment.Full.Marks = Full.Marks.1, Individual.Assignment.Marks.Obtained = Assignment.Score..20.,
Group.Assignment.Full.Marks = Full.Marks.2, Group.Assignment.Marks.Obtained = Group.Proj..Score..20.,
Internal.Marks = Internal.Marks..60..,
Final.Exam.Full.Marks = Full.Marks.3, Final.Exam.Marks.Obtained = Score.1, Final.Exam.FortyPercent.Marks=Marks.Obtained..40..,
Total = Total..100..,
Grade = Grade.after.40..rule
)
colnames(javaWebDf)
# filter unique grade
uniqueGradeJavaWeb <- getUniqueAttribute(javaWebDf$Grade)
print(uniqueGradeJavaWeb)
javaWebCountDf <- data.frame()
str(javaWebDf)
for (u in uniqueGradeJavaWeb) {
javaWebCountDf <- rbind(javaWebCountDf,
data.frame(
NoOfStudent = javaWebDf %>% filter(Grade == u) %>% count(Grade)
))
}
#Sort the count in ascending order on basics of grade
javaWebCountDf <- sortInAscendingOrder(javaWebCountDf, javaWebCountDf$NoOfStudent.n)
plotBarGraph(df = javaWebCountDf, x = javaWebCountDf$NoOfStudent.Grade,
y = javaWebCountDf$NoOfStudent.n, label = javaWebCountDf$NoOfStudent.n,
title = 'No of student on basics of grade',
xlab = 'Grade', ylab = 'Total No of Student')
# find top 5 student
javaWebTopFive <- findTopTen(javaWebDf, javaWebDf$Grade.Point, top = 5)
# Sort on basics of grade point
javaWebTopFive <- sortInAscendingOrder(javaWebTopFive, javaWebTopFive$Grade.Point)
# Total number of student attending exam
totalStudentInJavaWeb <- javaWebCountDf %>% select(NoOfStudent.n) %>% sum()
print(totalStudentInJavaWeb)
# Genearate summary
summary(javaWebDf$Total)
# Find out standard deviation
sd(javaWebDf$Total) |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/suggest_cosmic_signatures_to_remove.R
\name{suggest_cosmic_signatures_to_remove}
\alias{suggest_cosmic_signatures_to_remove}
\title{Identify mutational signatures to exclude from analysis}
\usage{
suggest_cosmic_signatures_to_remove(
cancer_type = NULL,
treatment_naive = NULL,
quiet = FALSE
)
}
\arguments{
\item{cancer_type}{See chart on website for possible cancer type labels}
\item{treatment_naive}{give TRUE if samples were taken pre-treatment; FALSE or leave NULL otherwise}
\item{quiet}{(default false) for non-interactive use, suppress explanations and advice}
}
\value{
a string of signatures to feed to signatures_to_remove
}
\description{
Get suggestions on signatures_to_remove for trinuc_mutation_rates for COSMIC v3/v3.1 signatures.
For details, see \code{vignette("cosmic_cancer_type_note")}.
}
| /man/suggest_cosmic_signatures_to_remove.Rd | no_license | sbidochko/cancereffectsizeR | R | false | true | 897 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/suggest_cosmic_signatures_to_remove.R
\name{suggest_cosmic_signatures_to_remove}
\alias{suggest_cosmic_signatures_to_remove}
\title{Identify mutational signatures to exclude from analysis}
\usage{
suggest_cosmic_signatures_to_remove(
cancer_type = NULL,
treatment_naive = NULL,
quiet = FALSE
)
}
\arguments{
\item{cancer_type}{See chart on website for possible cancer type labels}
\item{treatment_naive}{give TRUE if samples were taken pre-treatment; FALSE or leave NULL otherwise}
\item{quiet}{(default false) for non-interactive use, suppress explanations and advice}
}
\value{
a string of signatures to feed to signatures_to_remove
}
\description{
Get suggestions on signatures_to_remove for trinuc_mutation_rates for COSMIC v3/v3.1 signatures.
For details, see \code{vignette("cosmic_cancer_type_note")}.
}
|
#!/usr/bin/Rscript
# a utility to convert from csv to rds
# written by Eric Bridgeford
# usage:
# ./csv2rds.R csvfile.csv rdsfile.rds
readcsv <- function(filename) {
data <- readLines(file(filename, 'r'))
return(matrix(as.numeric(unlist(strsplit(data, split="\\s+"))), nrow=length(data), byrow=TRUE))
}
args <- commandArgs(trailingOnly = TRUE)
csvpath <- args[[1]]
rdspath <- args[[2]]
csvobj <- readcsv(csvpath)
saveRDS(csvobj, rdspath)
| /data_processing/csv2rds.R | permissive | NeuroDataDesign/fngs-archive | R | false | false | 452 | r | #!/usr/bin/Rscript
# a utility to convert from csv to rds
# written by Eric Bridgeford
# usage:
# ./csv2rds.R csvfile.csv rdsfile.rds
readcsv <- function(filename) {
data <- readLines(file(filename, 'r'))
return(matrix(as.numeric(unlist(strsplit(data, split="\\s+"))), nrow=length(data), byrow=TRUE))
}
args <- commandArgs(trailingOnly = TRUE)
csvpath <- args[[1]]
rdspath <- args[[2]]
csvobj <- readcsv(csvpath)
saveRDS(csvobj, rdspath)
|
devtools::install_github("AllenInstitute/scrattch.hicat", ref = "dev")
library(scrattch.hicat)
library(scrattch.io)
library(dplyr)
library(purrr)
library(Matrix)
options(stringsAsFactors = F)
load("../common/tss_regions.rda")
tss_regions$start <- tss_regions$start + 2e4
tss_regions$end <- tss_regions$end - 2e4
tss_regions <- tss_regions[,-5]
tome <- "//allen/programs/celltypes/workgroups/rnaseqanalysis/shiny/tomes/facs/mouse_V1_ALM_20180520/transcrip.tome"
data <- read_tome_dgCMatrix(tome,"/data/exon")
data <- Matrix::t(data)
sample_names <- read_tome_sample_names(tome)
gene_names <- read_tome_gene_names(tome)
colnames(data) <- sample_names
rownames(data) <- gene_names
anno <- read_tome_anno(tome)
anno <- anno %>%
filter(cluster_id < 134) %>%
filter(!grepl("ALM",cluster_label))
data <- data[,anno$sample_name]
norm_data <- log2(data + 1)
# group-level comparisons
grouping <- read.csv("../common/cluster_grouping_for_tracks.csv")
names(grouping) <- sub("pred_","",names(grouping))
anno <- left_join(anno, grouping)
group_anno <- anno %>%
select(group_id, group_label) %>%
unique() %>%
arrange(group_id)
group_cl <- as.factor(anno$group_id)
names(group_cl) <- anno$sample_name
group_markers <- map(group_anno$group_id,
function(x) {
print(paste("group",x))
group_specific_markers(x,
norm_data,
group_cl,
de.param = de_param(),
n.markers = 20)
})
names(group_markers) <- group_anno$group_label
group_markers_df <- map_dfr(1:length(group_markers),
function(x) {
df <- group_markers[[x]]
df$group_label <- names(group_markers)[x]
names(df)[1] <- "gene_name"
df %>%
left_join(tss_regions,
by = c("gene_name" = "name")) %>%
select(group_label,
gene_name, chr, start, end, strand,
everything())
})
write.csv(group_markers_df, "../common/rnaseq_group_markers.csv", row.names = F)
# subclass-level
subclass_anno <- anno %>%
select(subclass_id, subclass_label) %>%
unique() %>%
arrange(subclass_id)
subclass_cl <- as.factor(anno$subclass_id)
names(subclass_cl) <- anno$sample_name
possible_markers <- possibly(group_specific_markers, NULL)
subclass_markers <- map(subclass_anno$subclass_id,
function(x) {
print(paste("subclass",x))
possible_markers(x,
norm_data,
subclass_cl,
de.param = de_param(),
n.markers = 20)
})
names(subclass_markers) <- subclass_anno$subclass_label
subclass_markers_df <- map_dfr(1:length(subclass_markers),
function(x) {
df <- subclass_markers[[x]]
if(!is.null(df)) {
df$subclass_label <- names(subclass_markers)[x]
names(df)[1] <- "gene_name"
df %>%
left_join(tss_regions,
by = c("gene_name" = "name")) %>%
select(subclass_label,
gene_name, chr, start, end, strand,
everything())
}
})
write.csv(subclass_markers_df, "../common/rnaseq_subclass_markers.csv", row.names = F)
# cluster-level
cluster_anno <- anno %>%
select(cluster_id, cluster_label) %>%
unique() %>%
arrange(cluster_id)
cluster_cl <- as.factor(anno$cluster_id)
names(cluster_cl) <- anno$sample_name
cluster_markers <- map(cluster_anno$cluster_id,
function(x) {
print(paste("cluster",x))
possible_markers(x,
norm_data,
cluster_cl,
de.param = de_param(),
n.markers = 20)
})
names(cluster_markers) <- cluster_anno$cluster_label
cluster_markers_df <- map_dfr(1:length(cluster_markers),
function(x) {
df <- cluster_markers[[x]]
if(!is.null(df)) {
df$cluster_label <- names(cluster_markers)[x]
names(df)[1] <- "gene_name"
df %>%
left_join(tss_regions,
by = c("gene_name" = "name")) %>%
select(cluster_label,
gene_name, chr, start, end, strand,
everything())
}
})
write.csv(cluster_markers_df, "../common/rnaseq_cluster_markers.csv", row.names = F)
| /gb_analysis/20_generate_RNAseq_markers.R | permissive | adrisede/graybuck2019analysis | R | false | false | 5,750 | r | devtools::install_github("AllenInstitute/scrattch.hicat", ref = "dev")
library(scrattch.hicat)
library(scrattch.io)
library(dplyr)
library(purrr)
library(Matrix)
options(stringsAsFactors = F)
load("../common/tss_regions.rda")
tss_regions$start <- tss_regions$start + 2e4
tss_regions$end <- tss_regions$end - 2e4
tss_regions <- tss_regions[,-5]
tome <- "//allen/programs/celltypes/workgroups/rnaseqanalysis/shiny/tomes/facs/mouse_V1_ALM_20180520/transcrip.tome"
data <- read_tome_dgCMatrix(tome,"/data/exon")
data <- Matrix::t(data)
sample_names <- read_tome_sample_names(tome)
gene_names <- read_tome_gene_names(tome)
colnames(data) <- sample_names
rownames(data) <- gene_names
anno <- read_tome_anno(tome)
anno <- anno %>%
filter(cluster_id < 134) %>%
filter(!grepl("ALM",cluster_label))
data <- data[,anno$sample_name]
norm_data <- log2(data + 1)
# group-level comparisons
grouping <- read.csv("../common/cluster_grouping_for_tracks.csv")
names(grouping) <- sub("pred_","",names(grouping))
anno <- left_join(anno, grouping)
group_anno <- anno %>%
select(group_id, group_label) %>%
unique() %>%
arrange(group_id)
group_cl <- as.factor(anno$group_id)
names(group_cl) <- anno$sample_name
group_markers <- map(group_anno$group_id,
function(x) {
print(paste("group",x))
group_specific_markers(x,
norm_data,
group_cl,
de.param = de_param(),
n.markers = 20)
})
names(group_markers) <- group_anno$group_label
group_markers_df <- map_dfr(1:length(group_markers),
function(x) {
df <- group_markers[[x]]
df$group_label <- names(group_markers)[x]
names(df)[1] <- "gene_name"
df %>%
left_join(tss_regions,
by = c("gene_name" = "name")) %>%
select(group_label,
gene_name, chr, start, end, strand,
everything())
})
write.csv(group_markers_df, "../common/rnaseq_group_markers.csv", row.names = F)
# subclass-level
subclass_anno <- anno %>%
select(subclass_id, subclass_label) %>%
unique() %>%
arrange(subclass_id)
subclass_cl <- as.factor(anno$subclass_id)
names(subclass_cl) <- anno$sample_name
possible_markers <- possibly(group_specific_markers, NULL)
subclass_markers <- map(subclass_anno$subclass_id,
function(x) {
print(paste("subclass",x))
possible_markers(x,
norm_data,
subclass_cl,
de.param = de_param(),
n.markers = 20)
})
names(subclass_markers) <- subclass_anno$subclass_label
subclass_markers_df <- map_dfr(1:length(subclass_markers),
function(x) {
df <- subclass_markers[[x]]
if(!is.null(df)) {
df$subclass_label <- names(subclass_markers)[x]
names(df)[1] <- "gene_name"
df %>%
left_join(tss_regions,
by = c("gene_name" = "name")) %>%
select(subclass_label,
gene_name, chr, start, end, strand,
everything())
}
})
write.csv(subclass_markers_df, "../common/rnaseq_subclass_markers.csv", row.names = F)
# cluster-level
cluster_anno <- anno %>%
select(cluster_id, cluster_label) %>%
unique() %>%
arrange(cluster_id)
cluster_cl <- as.factor(anno$cluster_id)
names(cluster_cl) <- anno$sample_name
cluster_markers <- map(cluster_anno$cluster_id,
function(x) {
print(paste("cluster",x))
possible_markers(x,
norm_data,
cluster_cl,
de.param = de_param(),
n.markers = 20)
})
names(cluster_markers) <- cluster_anno$cluster_label
cluster_markers_df <- map_dfr(1:length(cluster_markers),
function(x) {
df <- cluster_markers[[x]]
if(!is.null(df)) {
df$cluster_label <- names(cluster_markers)[x]
names(df)[1] <- "gene_name"
df %>%
left_join(tss_regions,
by = c("gene_name" = "name")) %>%
select(cluster_label,
gene_name, chr, start, end, strand,
everything())
}
})
write.csv(cluster_markers_df, "../common/rnaseq_cluster_markers.csv", row.names = F)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/numberFormatters.R
\name{num_to_word}
\alias{num_to_word}
\title{num_to_words}
\usage{
num_to_word(x)
}
\arguments{
\item{x}{a number to convert to a string}
}
\value{
a string
}
\examples{
num_to_word(3)
}
| /man/num_to_word.Rd | permissive | murraycadzow/phdFunctions | R | false | true | 285 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/numberFormatters.R
\name{num_to_word}
\alias{num_to_word}
\title{num_to_words}
\usage{
num_to_word(x)
}
\arguments{
\item{x}{a number to convert to a string}
}
\value{
a string
}
\examples{
num_to_word(3)
}
|
## Question 3.
## Of the four types of sources indicated by the type (point, nonpoint, onroad,
## nonroad) variable, which of these four sources have seen decreases in emissions
## from 1999–2008 for Baltimore City? Which have seen increases in emissions from
## 1999–2008?
## Use the ggplot2 plotting system to make a plot to answer this question.
library(plyr)
library(ggplot2)
NEI <- readRDS("../summarySCC_PM25.rds")
SCC <- readRDS("../Source_Classification_Code.rds")
## Baltimore NEI dataset
bNEI <- subset(NEI, fips == "24510")
## Summarised NEI: Total emissions from 1999 to 2008 in Baltimore City
sNEI <- ddply(bNEI, .(year, type), summarise, total = sum(Emissions))
## plot graph to png file
png(filename = "plot3.png", width = 960, height = 480)
qplot(year, total, data = sNEI, geom = c('point','line'), facets = . ~ type,
xlab = "Year", ylab = "PM2.5 Emissions",
main = "Baltimore City - Total emissions (1999 to 2008)")
dev.off()
| /plot3.R | no_license | wunzeco/exdata-project2 | R | false | false | 970 | r | ## Question 3.
## Of the four types of sources indicated by the type (point, nonpoint, onroad,
## nonroad) variable, which of these four sources have seen decreases in emissions
## from 1999–2008 for Baltimore City? Which have seen increases in emissions from
## 1999–2008?
## Use the ggplot2 plotting system to make a plot to answer this question.
library(plyr)
library(ggplot2)
NEI <- readRDS("../summarySCC_PM25.rds")
SCC <- readRDS("../Source_Classification_Code.rds")
## Baltimore NEI dataset
bNEI <- subset(NEI, fips == "24510")
## Summarised NEI: Total emissions from 1999 to 2008 in Baltimore City
sNEI <- ddply(bNEI, .(year, type), summarise, total = sum(Emissions))
## plot graph to png file
png(filename = "plot3.png", width = 960, height = 480)
qplot(year, total, data = sNEI, geom = c('point','line'), facets = . ~ type,
xlab = "Year", ylab = "PM2.5 Emissions",
main = "Baltimore City - Total emissions (1999 to 2008)")
dev.off()
|
'
Author: Rory Pulvino
Date: Jan. 20, 2017
Title: Twitter Sports Sentiment Analysis
'
################################################################
############ Packages ##############
################################################################
pkgs <- c("purrr", "data.table", "dplyr", "tidyr", "readr", "lubridate", "stringr",
"ggplot2", "igraph", "tidytext", "FactoMineR", "openNLP", "fpc", "wordcloud")
allLoaded <- sapply(pkgs, require, character.only = TRUE)
################################################################
############ Data ##############
################################################################
df <- read.csv("~/Dropbox/Python/jupyter-blog/content/Twitter_soccer/Tweets_sports.csv", header = TRUE)
################################################################
############ Tidying data ##############
################################################################
'Website guidance: http://varianceexplained.org/r/trump-tweets/
'
reg <- "([^A-Za-z\\d#@']|'(?![A-Za-z\\d#@]))"
# Creating a tokenized data frame where each row is a word from a tweet
tweet_words <- df %>%
filter(!str_detect(Text, '^"') & !str_detect(Text, '^RT')) %>% # Removes tweets that are quotes or retweets as these are not the users' own words
mutate(Text = str_replace_all(Text, "https://t.co/[A-Za-z\\d]+|&", "")) %>% # Removing links
mutate(Text = str_replace_all(Text, "http://t.co/[A-Za-z\\d]+|&", "")) %>% # Removing links
unnest_tokens(word, Text, token = "regex", pattern = reg, to_lower = TRUE) %>%
filter(!word %in% stop_words$word,
str_detect(word, "[a-z]")) # Removing 'stop words'
# Creating a dataframe of tweets without RT and links
tweets_cleaned <- df %>%
filter(!str_detect(Text, '^"') & !str_detect(Text, '^RT')) %>% # Removes tweets that are quotes or retweets as these are not the users' own words
mutate(Text = str_replace_all(Text, "https://t.co/[A-Za-z\\d]+|&", "")) %>% # Removing links
mutate(Text = str_replace_all(Text, "http://t.co/[A-Za-z\\d]+|&", "")) # Removing links
# Pulling a random sample of full tweets to train data set on for later clustering use
sample <- tweets_cleaned[sample(nrow(tweets_cleaned), 5000), ]
write.csv(sample, file = "~/Dropbox/Python/jupyter-blog/content/Twitter_soccer/sample.csv", row.names = FALSE)
'From this sample, two new columns are added - "Athletes" and
"Female". The "Athletes" column will be a dummy variable
indicating whether the tweet is about an athelete or team. The
"Female" column will be a dummy variable indicating whether the
tweet is about female athletes or teams.'
################################################################
############ Exploring data ##############
################################################################
women_top_words <- tweet_words %>% # Dataset of top words from female tweeters
filter(Sex == 'F') %>%
count(word, sort = TRUE)
men_top_words <- tweet_words %>% # Dataset of top words from male tweeters
filter(Sex == 'M') %>%
count(word, sort = TRUE)
# Graphing top words by gender
ggplot(data = subset(men_top_words, n > 1000))+
geom_bar(aes(factor(word), y = n), stat = 'identity', fill = 'darkorange', colour = 'dodgerblue')+
coord_flip(ylim = c(0,3200))+
labs(title = "Top Words Used by Male Tweeters",
y = "Number of Instances", x = "Word")
ggplot(data = subset(women_top_words, n > 730))+
geom_bar(aes(factor(word), y = n), stat = 'identity', fill = 'darkorange', colour = 'dodgerblue')+
coord_flip(ylim = c(0,2400))+
labs(title = "Top Words Used by Female Tweeters",
y = "Number of Instances", x = "Word")
################################################################
############ Analyzing data ##############
################################################################
'We only want tweets pertaining to sports for our analysis since
we are interested in asking how sportscasters talk about sports.
Separating out the tweets about sports seems pretty difficult,
but the plan is to use some form of clustering to group the
tweets into two bins that hopefully separate out between sports
and non-sports tweets.
After separating out the tweets, we will move on to the sentiment
analysis. We may attempt another clustering to group texts based
on whether they are about women or men sports as well.'
# Clustering
'Website guidance: http://www.rdatamining.com/docs/twitter-analysis-with-r
This includes guidance for LDA clustering'
| /Twitter_soccer/twitter_sports_analysis.R | no_license | adp04c/Blog_Content | R | false | false | 4,625 | r | '
Author: Rory Pulvino
Date: Jan. 20, 2017
Title: Twitter Sports Sentiment Analysis
'
################################################################
############ Packages ##############
################################################################
pkgs <- c("purrr", "data.table", "dplyr", "tidyr", "readr", "lubridate", "stringr",
"ggplot2", "igraph", "tidytext", "FactoMineR", "openNLP", "fpc", "wordcloud")
allLoaded <- sapply(pkgs, require, character.only = TRUE)
################################################################
############ Data ##############
################################################################
df <- read.csv("~/Dropbox/Python/jupyter-blog/content/Twitter_soccer/Tweets_sports.csv", header = TRUE)
################################################################
############ Tidying data ##############
################################################################
'Website guidance: http://varianceexplained.org/r/trump-tweets/
'
reg <- "([^A-Za-z\\d#@']|'(?![A-Za-z\\d#@]))"
# Creating a tokenized data frame where each row is a word from a tweet
tweet_words <- df %>%
filter(!str_detect(Text, '^"') & !str_detect(Text, '^RT')) %>% # Removes tweets that are quotes or retweets as these are not the users' own words
mutate(Text = str_replace_all(Text, "https://t.co/[A-Za-z\\d]+|&", "")) %>% # Removing links
mutate(Text = str_replace_all(Text, "http://t.co/[A-Za-z\\d]+|&", "")) %>% # Removing links
unnest_tokens(word, Text, token = "regex", pattern = reg, to_lower = TRUE) %>%
filter(!word %in% stop_words$word,
str_detect(word, "[a-z]")) # Removing 'stop words'
# Creating a dataframe of tweets without RT and links
tweets_cleaned <- df %>%
filter(!str_detect(Text, '^"') & !str_detect(Text, '^RT')) %>% # Removes tweets that are quotes or retweets as these are not the users' own words
mutate(Text = str_replace_all(Text, "https://t.co/[A-Za-z\\d]+|&", "")) %>% # Removing links
mutate(Text = str_replace_all(Text, "http://t.co/[A-Za-z\\d]+|&", "")) # Removing links
# Pulling a random sample of full tweets to train data set on for later clustering use
sample <- tweets_cleaned[sample(nrow(tweets_cleaned), 5000), ]
write.csv(sample, file = "~/Dropbox/Python/jupyter-blog/content/Twitter_soccer/sample.csv", row.names = FALSE)
'From this sample, two new columns are added - "Athletes" and
"Female". The "Athletes" column will be a dummy variable
indicating whether the tweet is about an athelete or team. The
"Female" column will be a dummy variable indicating whether the
tweet is about female athletes or teams.'
################################################################
############ Exploring data ##############
################################################################
women_top_words <- tweet_words %>% # Dataset of top words from female tweeters
filter(Sex == 'F') %>%
count(word, sort = TRUE)
men_top_words <- tweet_words %>% # Dataset of top words from male tweeters
filter(Sex == 'M') %>%
count(word, sort = TRUE)
# Graphing top words by gender
ggplot(data = subset(men_top_words, n > 1000))+
geom_bar(aes(factor(word), y = n), stat = 'identity', fill = 'darkorange', colour = 'dodgerblue')+
coord_flip(ylim = c(0,3200))+
labs(title = "Top Words Used by Male Tweeters",
y = "Number of Instances", x = "Word")
ggplot(data = subset(women_top_words, n > 730))+
geom_bar(aes(factor(word), y = n), stat = 'identity', fill = 'darkorange', colour = 'dodgerblue')+
coord_flip(ylim = c(0,2400))+
labs(title = "Top Words Used by Female Tweeters",
y = "Number of Instances", x = "Word")
################################################################
############ Analyzing data ##############
################################################################
'We only want tweets pertaining to sports for our analysis since
we are interested in asking how sportscasters talk about sports.
Separating out the tweets about sports seems pretty difficult,
but the plan is to use some form of clustering to group the
tweets into two bins that hopefully separate out between sports
and non-sports tweets.
After separating out the tweets, we will move on to the sentiment
analysis. We may attempt another clustering to group texts based
on whether they are about women or men sports as well.'
# Clustering
'Website guidance: http://www.rdatamining.com/docs/twitter-analysis-with-r
This includes guidance for LDA clustering'
|
/3_Forcing_TMAX.R | no_license | DaDaDaDaDaLi/VIC_data_process | R | false | false | 4,477 | r | ||
##rCharts
##Gathers data and computes Share Price for a stuck, outputs a plot
SPsolo <- function(Ticker,months=6){
library(ggplot2)
library(dplyr)
library(grid)
library(quantmod)
library(TTR)
library(lubridate)
library(scales)
histdata <- getSymbols.yahoo(paste(Ticker,".AX",sep = ""),env = .GlobalEnv,return.class = "data.frame",auto.assign=FALSE)
histdata[,7] <- row.names(histdata)
histdata$V7 <- as.POSIXlt(histdata$V7,format = "%Y-%m-%d")
histdata$V7 <- as.Date(histdata$V7)
rownames(histdata) <- NULL
histdata <- histdata[,c(7,4)]
colnames(histdata) <- c("Date","SharePrice")
histdata$Date <- as.Date(histdata$Date)
data <- histdata
rownames(data) <- NULL
data$MA50 <- SMA(x = data$SharePrice,n=50)
data$EMA50 <- EMA(x = data$SharePrice,n = 50)
data <- data[data$Date > Sys.Date() - months(months),]
data <- na.exclude(data)
data$min <- min(data$SharePrice)*0.8
##Share Price Chart
b <- ggplot(data = data,x = Date) +
geom_ribbon(aes(x = Date,ymin=min, ymax=SharePrice), fill="orange",alpha=0.5)+
geom_line(aes(x = Date,y = SharePrice),color = "darkorange",lwd=0.5) +
geom_line(aes(x = Date,y = MA50,color="MA50"),lwd=0.5) +
geom_line(aes(x = Date,y = EMA50,color = "EMA50"),lwd=0.5) +
scale_color_manual(values=c("MA50" = "red","EMA50"="black"))+
theme(legend.title=element_blank(),
plot.title = element_text(lineheight=1,face="bold",vjust = 0.25,hjust = 0.0),
legend.justification=c(0,0),
legend.position=c(0,0),
legend.background = element_rect(colour = 'lightgrey', fill = 'lightgrey'),
plot.margin=unit(c(0,10,1,3),"mm"))+
scale_x_date(expand=c(0,0)) +
scale_y_continuous(labels = dollar_format(largest_with_cents = 5),
limits = c(min(data$SharePrice)*0.8,max(data$SharePrice)*1.1),expand = c(0,0))+
labs(title = paste(Ticker," Share Price",sep = ""),x = NULL,y = NULL)+
geom_text(data = subset(data[nrow(data),]),aes(x = Date,y = SharePrice, label = SharePrice),hjust=1, vjust=0,size=4,colour = "darkgreen") +
geom_text(data = subset(data[nrow(data),]),aes(x = Date,y = SharePrice, label = Date),hjust=1, vjust=-1.5,size=4,colour = "darkgreen")
b
} | /rCharts.R | no_license | highandhigh/nwGitTest | R | false | false | 2,491 | r | ##rCharts
##Gathers data and computes Share Price for a stuck, outputs a plot
SPsolo <- function(Ticker,months=6){
library(ggplot2)
library(dplyr)
library(grid)
library(quantmod)
library(TTR)
library(lubridate)
library(scales)
histdata <- getSymbols.yahoo(paste(Ticker,".AX",sep = ""),env = .GlobalEnv,return.class = "data.frame",auto.assign=FALSE)
histdata[,7] <- row.names(histdata)
histdata$V7 <- as.POSIXlt(histdata$V7,format = "%Y-%m-%d")
histdata$V7 <- as.Date(histdata$V7)
rownames(histdata) <- NULL
histdata <- histdata[,c(7,4)]
colnames(histdata) <- c("Date","SharePrice")
histdata$Date <- as.Date(histdata$Date)
data <- histdata
rownames(data) <- NULL
data$MA50 <- SMA(x = data$SharePrice,n=50)
data$EMA50 <- EMA(x = data$SharePrice,n = 50)
data <- data[data$Date > Sys.Date() - months(months),]
data <- na.exclude(data)
data$min <- min(data$SharePrice)*0.8
##Share Price Chart
b <- ggplot(data = data,x = Date) +
geom_ribbon(aes(x = Date,ymin=min, ymax=SharePrice), fill="orange",alpha=0.5)+
geom_line(aes(x = Date,y = SharePrice),color = "darkorange",lwd=0.5) +
geom_line(aes(x = Date,y = MA50,color="MA50"),lwd=0.5) +
geom_line(aes(x = Date,y = EMA50,color = "EMA50"),lwd=0.5) +
scale_color_manual(values=c("MA50" = "red","EMA50"="black"))+
theme(legend.title=element_blank(),
plot.title = element_text(lineheight=1,face="bold",vjust = 0.25,hjust = 0.0),
legend.justification=c(0,0),
legend.position=c(0,0),
legend.background = element_rect(colour = 'lightgrey', fill = 'lightgrey'),
plot.margin=unit(c(0,10,1,3),"mm"))+
scale_x_date(expand=c(0,0)) +
scale_y_continuous(labels = dollar_format(largest_with_cents = 5),
limits = c(min(data$SharePrice)*0.8,max(data$SharePrice)*1.1),expand = c(0,0))+
labs(title = paste(Ticker," Share Price",sep = ""),x = NULL,y = NULL)+
geom_text(data = subset(data[nrow(data),]),aes(x = Date,y = SharePrice, label = SharePrice),hjust=1, vjust=0,size=4,colour = "darkgreen") +
geom_text(data = subset(data[nrow(data),]),aes(x = Date,y = SharePrice, label = Date),hjust=1, vjust=-1.5,size=4,colour = "darkgreen")
b
} |
library(portalr)
### Name: download_observations
### Title: Download the PortalData repo
### Aliases: download_observations
### ** Examples
## No test:
download_observations()
download_observations("~/old-data", version = "1.50.0")
## End(No test)
| /data/genthat_extracted_code/portalr/examples/download_observations.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 261 | r | library(portalr)
### Name: download_observations
### Title: Download the PortalData repo
### Aliases: download_observations
### ** Examples
## No test:
download_observations()
download_observations("~/old-data", version = "1.50.0")
## End(No test)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/stats.R
\name{prune_cophylo}
\alias{prune_cophylo}
\title{Pruning a cophylogeny}
\usage{
prune_cophylo(cophy)
}
\arguments{
\item{cophy}{an object of class cophylogeny that contains a host tree with one
associated parasite tree.}
}
\description{
This function prunes a cophylogeny object to include only extant species.
The function returns the two trees and the associations of the tips of the parasite tree with the host tree.
This can then be used to plot a tanglegram, as shown in the example.
}
\examples{
coph <- rcophylo(tmax=5)
cophPruned <- prune_cophylo(coph)
plot(phytools::cophylo(cophPruned$prunedHtree, cophPruned$prunedPtree, cophPruned$tipAssociations))
}
| /man/prune_Cophylo.Rd | no_license | JanEngelstaedter/cophy | R | false | true | 750 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/stats.R
\name{prune_cophylo}
\alias{prune_cophylo}
\title{Pruning a cophylogeny}
\usage{
prune_cophylo(cophy)
}
\arguments{
\item{cophy}{an object of class cophylogeny that contains a host tree with one
associated parasite tree.}
}
\description{
This function prunes a cophylogeny object to include only extant species.
The function returns the two trees and the associations of the tips of the parasite tree with the host tree.
This can then be used to plot a tanglegram, as shown in the example.
}
\examples{
coph <- rcophylo(tmax=5)
cophPruned <- prune_cophylo(coph)
plot(phytools::cophylo(cophPruned$prunedHtree, cophPruned$prunedPtree, cophPruned$tipAssociations))
}
|
library(AICcmodavg)
library(SDMTools)
library(ROCR)
library(raster)
library(rgdal)
library(lme4)
library(corrplot)
library(ggplot2)
library(dismo)
library(caret)
library(hier.part)
library(ltm)
#new eval with TSS/sens/spec/cutoff that optimizes TSS (basically same as max sens/spec)
###AOC
#setwd("E:\\FinalModels\\Predictors\\AOC")
setwd("C:\\Users\\eliza\\Documents\\UGA\\Thesis\\NicheModels_2019\\EvalEdits")
df<-read.csv('AOCfin.csv')
lr04<- glm(PA ~ pland_bh100 + I(pland_bh100^2) + ed_msh1km + I(ed_msh1km^2) + urb1kmf + ow1kmf + I(ow1kmf^2), family=binomial(link="logit"), df, na.action=na.pass)
#each coef represents the change in the log odds of being 1/0 for a unit increase in that predictor while holding all other predictors constant
#the exp of each coef
## Top model estimates, odds ratios, and 95% CI
coef.results<- cbind(coef(lr04),confint(lr04),OR = exp(coef(lr04)))
coef.results <- cbind(coef.results, exp(confint(lr04)), coef(summary(lr04))[,4])
coef.results <- round(coef.results,3)
colnames(coef.results)[1] <- "Estimate"
colnames(coef.results)[7] <- "p value"
#OR represents the odds that an outcome will occur given a particular predictor, compared to the odds of the outcome occuring without the predictor
write.csv(coef.results,'AOC_coefresultsn.csv')
df$fitted<-fitted(lr04)
( lrAUC<- auc(df$PA, df$fitted) )
#biserial correlation
bis<-cor.test(df$fitted,df$PA)
biserial<-biserial.cor(df$fitted,df$PA)*-1
biserial
#really just a measure of how well the fitted data correlates with the original PA data
pred <- prediction(df$fitted, df$PA)
perf <- performance(pred, measure="tpr", x.measure="fpr")
plot(perf, col=rainbow(10),main='ROC Curve, AOC')
opt.cut = function(perf, pred){
cut.ind = mapply(FUN=function(x, y, p){
d = (x - 0)^2 + (y-1)^2
ind = which(d == min(d))
c(sensitivity = y[[ind]], specificity = 1-x[[ind]],
cutoff = p[[ind]])
}, perf@x.values, perf@y.values, pred@cutoffs)
}
perf <- data.frame(AUC=NA,sens=NA,spec=NA,TSS=NA,cutoff=NA)
pred <- predict(lr04, df, type = "response")
pred.lab <- as.data.frame(cbind(pred,df$PA))
colnames(pred.lab) <- c("predictions","labels")
pred.lab1 <- prediction(pred.lab$predictions,pred.lab$labels)
perf.auc <- performance(prediction(pred, df$PA),"auc")
perf.roc <- performance(pred.lab1, measure = "tpr", x.measure = "fpr")
perf.sens <- performance(prediction(pred,df$PA), "sens","spec")
cutoff <- opt.cut(perf.roc, pred.lab1)
perf$AUC <- perf.auc@y.values[[1]] # AUC
perf$sens <- cutoff["sensitivity",] # sensitivity
perf$spec <- cutoff["specificity",] # specificity
perf$cutoff <- cutoff["cutoff",] # optimal cutoff
perf$TSS <- perf$sens + perf$spec - 1
write.csv(perf,'AOC_evalresults.csv')
###BASP
setwd("C:\\Users\\eliza\\Documents\\UGA\\Thesis\\NicheModels_2019\\EvalEdits")
df<-read.csv('BASP.csv')
lr04<- glm(PA ~ plandpine800 + I(plandpine800^2) + fire800 + I(fire800^2) + herbht800 + shrbht800 + can100 + I(can100^2), family=binomial(link="logit"), df, na.action=na.pass)
#each coef represents the change in the log odds of being 1/0 for a unit increase in that predictor while holding all other predictors constant
#the exp of each coef
## Top model estimates, odds ratios, and 95% CI
coef.results<- cbind(coef(lr04),confint(lr04),OR = exp(coef(lr04)))
coef.results <- cbind(coef.results, exp(confint(lr04)), coef(summary(lr04))[,4])
coef.results <- round(coef.results,3)
colnames(coef.results)[1] <- "Estimate"
colnames(coef.results)[7] <- "p value"
#OR represents the odds that an outcome will occur given a particular predictor, compared to the odds of the outcome occuring without the predictor
write.csv(coef.results,'BASP_coefresultsn.csv')
df$fitted<-fitted(lr04)
( lrAUC<- auc(df$PA, df$fitted) )
#biserial correlation
bis<-cor.test(df$fitted,df$PA)
biserial<-biserial.cor(df$fitted,df$PA)*-1
biserial
#really just a measure of how well the fitted data correlates with the original PA data
pred <- prediction(df$fitted, df$PA)
perf <- performance(pred, measure="tpr", x.measure="fpr")
plot(perf, col=rainbow(10),main='ROC Curve, AOC')
opt.cut = function(perf, pred){
cut.ind = mapply(FUN=function(x, y, p){
d = (x - 0)^2 + (y-1)^2
ind = which(d == min(d))
c(sensitivity = y[[ind]], specificity = 1-x[[ind]],
cutoff = p[[ind]])
}, perf@x.values, perf@y.values, pred@cutoffs)
}
perf <- data.frame(AUC=NA,sens=NA,spec=NA,TSS=NA,cutoff=NA)
pred <- predict(lr04, df, type = "response")
pred.lab <- as.data.frame(cbind(pred,df$PA))
colnames(pred.lab) <- c("predictions","labels")
pred.lab1 <- prediction(pred.lab$predictions,pred.lab$labels)
perf.auc <- performance(prediction(pred, df$PA),"auc")
perf.roc <- performance(pred.lab1, measure = "tpr", x.measure = "fpr")
perf.sens <- performance(prediction(pred,df$PA), "sens","spec")
cutoff <- opt.cut(perf.roc, pred.lab1)
perf$AUC <- perf.auc@y.values[[1]] # AUC
perf$sens <- cutoff["sensitivity",] # sensitivity
perf$spec <- cutoff["specificity",] # specificity
perf$cutoff <- cutoff["cutoff",] # optimal cutoff
perf$TSS <- perf$sens + perf$spec - 1
write.csv(perf,'BASP_evalresults.csv')
###DT
setwd("C:\\Users\\eliza\\Documents\\UGA\\Thesis\\NicheModels_2019\\EvalEdits")
df<-read.csv('DT.csv')
lr01<- glm(PA ~ marsh500 + landco_800 + I(landco_800^2) + urb_800 + I(urb_800^2) + elev500, family=binomial(link="logit"), df, na.action=na.pass)
summary(lr01)
#each coef represents the change in the log odds of being 1/0 for a unit increase in that predictor while holding all other predictors constant
#the exp of each coef
## Top model estimates, odds ratios, and 95% CI
coef.results<- cbind(coef(lr01),confint(lr01),OR = exp(coef(lr01)))
coef.results <- cbind(coef.results, exp(confint(lr01)), coef(summary(lr01))[,4])
colnames(coef.results)[1] <- "Estimate"
colnames(coef.results)[7] <- "p value"
#OR represents the odds that an outcome will occur given a particular predictor, compared to the odds of the outcome occuring without the predictor
write.csv(coef.results,'DT_coefresults.csv')
df$fitted<-fitted(lr01)
( lrAUC<- auc(df$PA, df$fitted) )
#biserial correlation
bis<-cor.test(df$fitted,df$PA)
biserial<-biserial.cor(df$fitted,df$PA)*-1
biserial
#really just a measure of how well the fitted data correlates with the original PA data
pred <- prediction(df$fitted, df$PA)
#look at ROC curve
perf <- performance(pred, measure="tpr", x.measure="fpr")
plot(perf, col=rainbow(10),main='ROC Curve, AOC')
opt.cut = function(perf, pred){
cut.ind = mapply(FUN=function(x, y, p){
d = (x - 0)^2 + (y-1)^2
ind = which(d == min(d))
c(sensitivity = y[[ind]], specificity = 1-x[[ind]],
cutoff = p[[ind]])
}, perf@x.values, perf@y.values, pred@cutoffs)
}
perf <- data.frame(AUC=NA,sens=NA,spec=NA,TSS=NA,cutoff=NA)
pred <- predict(lr01, df, type = "response")
pred.lab <- as.data.frame(cbind(pred,df$PA))
colnames(pred.lab) <- c("predictions","labels")
pred.lab1 <- prediction(pred.lab$predictions,pred.lab$labels)
perf.auc <- performance(prediction(pred, df$PA),"auc")
perf.roc <- performance(pred.lab1, measure = "tpr", x.measure = "fpr")
perf.sens <- performance(prediction(pred,df$PA), "sens","spec")
cutoff <- opt.cut(perf.roc, pred.lab1)
perf$AUC <- perf.auc@y.values[[1]] # AUC
perf$sens <- cutoff["sensitivity",] # sensitivity
perf$spec <- cutoff["specificity",] # specificity
perf$cutoff <- cutoff["cutoff",] # optimal cutoff
perf$TSS <- perf$sens + perf$spec - 1
write.csv(perf,'DT_evalresults.csv')
###EDR
setwd("C:\\Users\\eliza\\Documents\\UGA\\Thesis\\NicheModels_2019\\EvalEdits")
df<-read.csv('EDR.csv')
lr01<- glm(PA ~ can900 + dran250 + fire900 + landco250 + tpi_raw +urb_250 + precip_raw + I(precip_raw^2), family=binomial(link="logit"), df, na.action=na.pass)
summary(lr01)
#each coef represents the change in the log odds of being 1/0 for a unit increase in that predictor while holding all other predictors constant
#the exp of each coef
## Top model estimates, odds ratios, and 95% CI
coef.results<- cbind(coef(lr01),confint(lr01),OR = exp(coef(lr01)))
coef.results <- cbind(coef.results, exp(confint(lr01)), coef(summary(lr01))[,4])
colnames(coef.results)[1] <- "Estimate"
colnames(coef.results)[7] <- "p value"
#OR represents the odds that an outcome will occur given a particular predictor, compared to the odds of the outcome occuring without the predictor
write.csv(coef.results,'EDR_coefresults.csv')
df$fitted<-fitted(lr01)
( lrAUC<- auc(df$PA, df$fitted) )
#biserial correlation
bis<-cor.test(df$fitted,df$PA)
biserial<-biserial.cor(df$fitted,df$PA)*-1
biserial
#really just a measure of how well the fitted data correlates with the original PA data
pred <- prediction(df$fitted, df$PA)
#look at ROC curve
perf <- performance(pred, measure="tpr", x.measure="fpr")
plot(perf, col=rainbow(10),main='ROC Curve, AOC')
opt.cut = function(perf, pred){
cut.ind = mapply(FUN=function(x, y, p){
d = (x - 0)^2 + (y-1)^2
ind = which(d == min(d))
c(sensitivity = y[[ind]], specificity = 1-x[[ind]],
cutoff = p[[ind]])
}, perf@x.values, perf@y.values, pred@cutoffs)
}
perf <- data.frame(AUC=NA,sens=NA,spec=NA,TSS=NA,cutoff=NA)
pred <- predict(lr01, df, type = "response")
pred.lab <- as.data.frame(cbind(pred,df$PA))
colnames(pred.lab) <- c("predictions","labels")
pred.lab1 <- prediction(pred.lab$predictions,pred.lab$labels)
perf.auc <- performance(prediction(pred, df$PA),"auc")
perf.roc <- performance(pred.lab1, measure = "tpr", x.measure = "fpr")
perf.sens <- performance(prediction(pred,df$PA), "sens","spec")
cutoff <- opt.cut(perf.roc, pred.lab1)
perf$AUC <- perf.auc@y.values[[1]] # AUC
perf$sens <- cutoff["sensitivity",] # sensitivity
perf$spec <- cutoff["specificity",] # specificity
perf$cutoff <- cutoff["cutoff",] # optimal cutoff
perf$TSS <- perf$sens + perf$spec - 1
write.csv(perf,'EDR_evalresults.csv')
###EIS
setwd("C:\\Users\\eliza\\Documents\\UGA\\Thesis\\NicheModels_2019\\EvalEdits")
df<-read.csv('EIS.csv')
lr01<- glm(PA ~ rip_900 + can900 + pine900 + dran250 + landco250 + precip_raw + I(precip_raw^2) +urb_250 +hist900, family=binomial(link="logit"), df, na.action=na.pass)
summary(lr01)
#each coef represents the change in the log odds of being 1/0 for a unit increase in that predictor while holding all other predictors constant
#the exp of each coef
## Top model estimates, odds ratios, and 95% CI
coef.results<- cbind(coef(lr01),confint(lr01),OR = exp(coef(lr01)))
coef.results <- cbind(coef.results, exp(confint(lr01)), coef(summary(lr01))[,4])
colnames(coef.results)[1] <- "Estimate"
colnames(coef.results)[7] <- "p value"
#OR represents the odds that an outcome will occur given a particular predictor, compared to the odds of the outcome occuring without the predictor
write.csv(coef.results,'EIS_coefresults.csv')
df$fitted<-fitted(lr01)
( lrAUC<- auc(df$PA, df$fitted) )
#biserial correlation
bis<-cor.test(df$fitted,df$PA)
biserial<-biserial.cor(df$fitted,df$PA)*-1
biserial
#really just a measure of how well the fitted data correlates with the original PA data
pred <- prediction(df$fitted, df$PA)
#look at ROC curve
perf <- performance(pred, measure="tpr", x.measure="fpr")
plot(perf, col=rainbow(10),main='ROC Curve, AOC')
opt.cut = function(perf, pred){
cut.ind = mapply(FUN=function(x, y, p){
d = (x - 0)^2 + (y-1)^2
ind = which(d == min(d))
c(sensitivity = y[[ind]], specificity = 1-x[[ind]],
cutoff = p[[ind]])
}, perf@x.values, perf@y.values, pred@cutoffs)
}
perf <- data.frame(AUC=NA,sens=NA,spec=NA,TSS=NA,cutoff=NA)
pred <- predict(lr01, df, type = "response")
pred.lab <- as.data.frame(cbind(pred,df$PA))
colnames(pred.lab) <- c("predictions","labels")
pred.lab1 <- prediction(pred.lab$predictions,pred.lab$labels)
perf.auc <- performance(prediction(pred, df$PA),"auc")
perf.roc <- performance(pred.lab1, measure = "tpr", x.measure = "fpr")
perf.sens <- performance(prediction(pred,df$PA), "sens","spec")
cutoff <- opt.cut(perf.roc, pred.lab1)
perf$AUC <- perf.auc@y.values[[1]] # AUC
perf$sens <- cutoff["sensitivity",] # sensitivity
perf$spec <- cutoff["specificity",] # specificity
perf$cutoff <- cutoff["cutoff",] # optimal cutoff
perf$TSS <- perf$sens + perf$spec - 1
write.csv(perf,'EIS_evalresults.csv')
###PB
setwd("C:\\Users\\eliza\\Documents\\UGA\\Thesis\\NicheModels_2019\\EvalEdits")
df<-read.csv('PBfinedits.csv')
lr01<- glm(PA ~ patmshb700 + shrbht700 + edmar700 + can700 + I(can700^2), family=binomial(link="logit"), df, na.action=na.pass)
summary(lr01)
#each coef represents the change in the log odds of being 1/0 for a unit increase in that predictor while holding all other predictors constant
#the exp of each coef
## Top model estimates, odds ratios, and 95% CI
coef.results<- cbind(coef(lr01),confint(lr01),OR = exp(coef(lr01)))
coef.results <- cbind(coef.results, exp(confint(lr01)), coef(summary(lr01))[,4])
colnames(coef.results)[1] <- "Estimate"
colnames(coef.results)[7] <- "p value"
#OR represents the odds that an outcome will occur given a particular predictor, compared to the odds of the outcome occuring without the predictor
write.csv(coef.results,'PB_coefresults.csv')
df$fitted<-fitted(lr01)
( lrAUC<- auc(df$PA, df$fitted) )
#biserial correlation
bis<-cor.test(df$fitted,df$PA)
biserial<-biserial.cor(df$fitted,df$PA)*-1
biserial
#really just a measure of how well the fitted data correlates with the original PA data
pred <- prediction(df$fitted, df$PA)
#look at ROC curve
perf <- performance(pred, measure="tpr", x.measure="fpr")
plot(perf, col=rainbow(10),main='ROC Curve, AOC')
opt.cut = function(perf, pred){
cut.ind = mapply(FUN=function(x, y, p){
d = (x - 0)^2 + (y-1)^2
ind = which(d == min(d))
c(sensitivity = y[[ind]], specificity = 1-x[[ind]],
cutoff = p[[ind]])
}, perf@x.values, perf@y.values, pred@cutoffs)
}
perf <- data.frame(AUC=NA,sens=NA,spec=NA,TSS=NA,cutoff=NA)
pred <- predict(lr01, df, type = "response")
pred.lab <- as.data.frame(cbind(pred,df$PA))
colnames(pred.lab) <- c("predictions","labels")
pred.lab1 <- prediction(pred.lab$predictions,pred.lab$labels)
perf.auc <- performance(prediction(pred, df$PA),"auc")
perf.roc <- performance(pred.lab1, measure = "tpr", x.measure = "fpr")
perf.sens <- performance(prediction(pred,df$PA), "sens","spec")
cutoff <- opt.cut(perf.roc, pred.lab1)
perf$AUC <- perf.auc@y.values[[1]] # AUC
perf$sens <- cutoff["sensitivity",] # sensitivity
perf$spec <- cutoff["specificity",] # specificity
perf$cutoff <- cutoff["cutoff",] # optimal cutoff
perf$TSS <- perf$sens + perf$spec - 1
write.csv(perf,'PB_evalresults.csv')
###RCW
df<-read.csv('RCW.csv')
lr01<- glm(PA ~ plandpine800 + I(plandpine800^2) + fire800 + I(fire800^2) + herbht800 + shrbht800 + can800, family=binomial(link="logit"), df, na.action=na.pass)
summary(lr01)
## Top model estimates, odds ratios, and 95% CI
coef.results<- cbind(coef(lr01),confint(lr01),OR = exp(coef(lr01)))
coef.results <- cbind(coef.results, exp(confint(lr01)), coef(summary(lr01))[,4])
colnames(coef.results)[1] <- "Estimate"
colnames(coef.results)[7] <- "p value"
#OR represents the odds that an outcome will occur given a particular predictor, compared to the odds of the outcome occuring without the predictor
write.csv(coef.results,'RCW_coefresults.csv')
df$fitted<-fitted(lr01)
( lrAUC<- auc(df$PA, df$fitted) )
#biserial correlation
bis<-cor.test(df$fitted,df$PA)
biserial<-biserial.cor(df$fitted,df$PA)*-1
biserial
#really just a measure of how well the fitted data correlates with the original PA data
pred <- prediction(df$fitted, df$PA)
#look at ROC curve
perf <- performance(pred, measure="tpr", x.measure="fpr")
plot(perf, col=rainbow(10),main='ROC Curve, AOC')
opt.cut = function(perf, pred){
cut.ind = mapply(FUN=function(x, y, p){
d = (x - 0)^2 + (y-1)^2
ind = which(d == min(d))
c(sensitivity = y[[ind]], specificity = 1-x[[ind]],
cutoff = p[[ind]])
}, perf@x.values, perf@y.values, pred@cutoffs)
}
perf <- data.frame(AUC=NA,sens=NA,spec=NA,TSS=NA,cutoff=NA)
pred <- predict(lr01, df, type = "response")
pred.lab <- as.data.frame(cbind(pred,df$PA))
colnames(pred.lab) <- c("predictions","labels")
pred.lab1 <- prediction(pred.lab$predictions,pred.lab$labels)
perf.auc <- performance(prediction(pred, df$PA),"auc")
perf.roc <- performance(pred.lab1, measure = "tpr", x.measure = "fpr")
perf.sens <- performance(prediction(pred,df$PA), "sens","spec")
cutoff <- opt.cut(perf.roc, pred.lab1)
perf$AUC <- perf.auc@y.values[[1]] # AUC
perf$sens <- cutoff["sensitivity",] # sensitivity
perf$spec <- cutoff["specificity",] # specificity
perf$cutoff <- cutoff["cutoff",] # optimal cutoff
perf$TSS <- perf$sens + perf$spec - 1
write.csv(perf,'RCW_evalresults.csv')
###SSS
# df<-read.csv('SSS.csv')
#
# lr01<- glm(PA ~ ed_msh200 + elev200f + brack200 +urb_200 +for_200, family=binomial(link="logit"), df, na.action=na.pass)
# summary(lr01)
#
# ## Top model estimates, odds ratios, and 95% CI
# coef.results<- cbind(coef(lr01),confint(lr01),OR = exp(coef(lr01)))
# coef.results <- cbind(coef.results, exp(confint(lr01)), coef(summary(lr01))[,4])
# colnames(coef.results)[1] <- "Estimate"
# colnames(coef.results)[7] <- "p value"
# #OR represents the odds that an outcome will occur given a particular predictor, compared to the odds of the outcome occuring without the predictor
# write.csv(coef.results,'SSS_coefresults.csv')
#
# df$fitted<-fitted(lr01)
# ( lrAUC<- auc(df$PA, df$fitted) )
# #biserial correlation
# bis<-cor.test(df$fitted,df$PA)
# biserial<-biserial.cor(df$fitted,df$PA)*-1
# biserial
# #really just a measure of how well the fitted data correlates with the original PA data
# pred <- prediction(df$fitted, df$PA)
# #look at ROC curve
# perf <- performance(pred, measure="tpr", x.measure="fpr")
# plot(perf, col=rainbow(10),main='ROC Curve, AOC')
#
#
# opt.cut = function(perf, pred){
# cut.ind = mapply(FUN=function(x, y, p){
# d = (x - 0)^2 + (y-1)^2
# ind = which(d == min(d))
# c(sensitivity = y[[ind]], specificity = 1-x[[ind]],
# cutoff = p[[ind]])
# }, perf@x.values, perf@y.values, pred@cutoffs)
# }
#
# perf <- data.frame(AUC=NA,sens=NA,spec=NA,TSS=NA,cutoff=NA)
# pred <- predict(lr01, df, type = "response")
# pred.lab <- as.data.frame(cbind(pred,df$PA))
# colnames(pred.lab) <- c("predictions","labels")
# pred.lab1 <- prediction(pred.lab$predictions,pred.lab$labels)
# perf.auc <- performance(prediction(pred, df$PA),"auc")
# perf.roc <- performance(pred.lab1, measure = "tpr", x.measure = "fpr")
# perf.sens <- performance(prediction(pred,df$PA), "sens","spec")
# cutoff <- opt.cut(perf.roc, pred.lab1)
# perf$AUC <- perf.auc@y.values[[1]] # AUC
# perf$sens <- cutoff["sensitivity",] # sensitivity
# perf$spec <- cutoff["specificity",] # specificity
# perf$cutoff <- cutoff["cutoff",] # optimal cutoff
# perf$TSS <- perf$sens + perf$spec - 1
#
# write.csv(perf,'SSS_evalresults.csv')
#WP
#setwd("E:\\FinalModels\\Predictors\\WS")
setwd("C:\\Users\\eliza\Documents\\UGA\\Thesis\\NicheModels_2019\\EvalEdits")
df<-read.csv('WPnewedits.csv')
lr01<- glm(PA ~ pland_bh100f + ed_msh100 + landco100 + urb1kmf + elev1kmf, family=binomial(link="logit"), df, na.action=na.pass)
#each coef represents the change in the log odds of being 1/0 for a unit increase in that predictor while holding all other predictors constant
#the exp of each coef
## Top model estimates, odds ratios, and 95% CI
coef.results<- cbind(coef(lr01),confint(lr01),OR = exp(coef(lr01)))
coef.results <- cbind(coef.results, exp(confint(lr01)), coef(summary(lr01))[,4])
colnames(coef.results)[1] <- "Estimate"
colnames(coef.results)[7] <- "p value"
#OR represents the odds that an outcome will occur given a particular predictor, compared to the odds of the outcome occuring without the predictor
write.csv(coef.results,'WP_coefresults.csv')
df$fitted<-fitted(lr01)
( lrAUC<- auc(df$PA, df$fitted) )
#biserial correlation
bis<-cor.test(df$fitted,df$PA)
biserial<-biserial.cor(df$fitted,df$PA)*-1
biserial
#really just a measure of how well the fitted data correlates with the original PA data
pred <- prediction(df$fitted, df$PA)
#look at ROC curve
perf <- performance(pred, measure="tpr", x.measure="fpr")
plot(perf, col=rainbow(10),main='ROC Curve, AOC')
opt.cut = function(perf, pred){
cut.ind = mapply(FUN=function(x, y, p){
d = (x - 0)^2 + (y-1)^2
ind = which(d == min(d))
c(sensitivity = y[[ind]], specificity = 1-x[[ind]],
cutoff = p[[ind]])
}, perf@x.values, perf@y.values, pred@cutoffs)
}
perf <- data.frame(AUC=NA,sens=NA,spec=NA,TSS=NA,cutoff=NA)
pred <- predict(lr01, df, type = "response")
pred.lab <- as.data.frame(cbind(pred,df$PA))
colnames(pred.lab) <- c("predictions","labels")
pred.lab1 <- prediction(pred.lab$predictions,pred.lab$labels)
perf.auc <- performance(prediction(pred, df$PA),"auc")
perf.roc <- performance(pred.lab1, measure = "tpr", x.measure = "fpr")
perf.sens <- performance(prediction(pred,df$PA), "sens","spec")
cutoff <- opt.cut(perf.roc, pred.lab1)
perf$AUC <- perf.auc@y.values[[1]] # AUC
perf$sens <- cutoff["sensitivity",] # sensitivity
perf$spec <- cutoff["specificity",] # specificity
perf$cutoff <- cutoff["cutoff",] # optimal cutoff
perf$TSS <- perf$sens + perf$spec - 1
write.csv(perf,'WP_evalresults.csv')
#WS
#setwd("E:\\FinalModels\\Predictors\\WS")
setwd("C:\\Users\\eliza\Documents\\UGA\\Thesis\\NicheModels_2019\\EvalEdits")
df<-read.csv('WSfinfin1010.csv')
lr01 <- glm(PA ~ nwi2000 + wat2000 + I(wat2000^2) + nwifwd_2000 + nhd_2000 + landco2000 + I(landco2000^2) + can2000, family=binomial(link="logit"), df, na.action=na.pass)
summary(lr01)
#each coef represents the change in the log odds of being 1/0 for a unit increase in that predictor while holding all other predictors constant
#the exp of each coef
## Top model estimates, odds ratios, and 95% CI
coef.results<- cbind(coef(lr01),confint(lr01),OR = exp(coef(lr01)))
coef.results <- cbind(coef.results, exp(confint(lr01)), coef(summary(lr01))[,4])
colnames(coef.results)[1] <- "Estimate"
colnames(coef.results)[7] <- "p value"
#OR represents the odds that an outcome will occur given a particular predictor, compared to the odds of the outcome occuring without the predictor
write.csv(coef.results,'WS_coefresults.csv')
df$fitted<-fitted(lr01)
( lrAUC<- auc(df$PA, df$fitted) )
#biserial correlation
bis<-cor.test(df$fitted,df$PA)
biserial<-biserial.cor(df$fitted,df$PA)*-1
biserial
#really just a measure of how well the fitted data correlates with the original PA data
pred <- prediction(df$fitted, df$PA)
#look at ROC curve
perf <- performance(pred, measure="tpr", x.measure="fpr")
plot(perf, col=rainbow(10),main='ROC Curve, AOC')
opt.cut = function(perf, pred){
cut.ind = mapply(FUN=function(x, y, p){
d = (x - 0)^2 + (y-1)^2
ind = which(d == min(d))
c(sensitivity = y[[ind]], specificity = 1-x[[ind]],
cutoff = p[[ind]])
}, perf@x.values, perf@y.values, pred@cutoffs)
}
perf <- data.frame(AUC=NA,sens=NA,spec=NA,TSS=NA,cutoff=NA)
pred <- predict(lr01, df, type = "response")
pred.lab <- as.data.frame(cbind(pred,df$PA))
colnames(pred.lab) <- c("predictions","labels")
pred.lab1 <- prediction(pred.lab$predictions,pred.lab$labels)
perf.auc <- performance(prediction(pred, df$PA),"auc")
perf.roc <- performance(pred.lab1, measure = "tpr", x.measure = "fpr")
perf.sens <- performance(prediction(pred,df$PA), "sens","spec")
cutoff <- opt.cut(perf.roc, pred.lab1)
perf$AUC <- perf.auc@y.values[[1]] # AUC
perf$sens <- cutoff["sensitivity",] # sensitivity
perf$spec <- cutoff["specificity",] # specificity
perf$cutoff <- cutoff["cutoff",] # optimal cutoff
perf$TSS <- perf$sens + perf$spec - 1
write.csv(perf,'WS_evalresults.csv')
| /model_eval.R | no_license | epaulukonis/vulnerability_coastal_ga | R | false | false | 24,222 | r |
library(AICcmodavg)
library(SDMTools)
library(ROCR)
library(raster)
library(rgdal)
library(lme4)
library(corrplot)
library(ggplot2)
library(dismo)
library(caret)
library(hier.part)
library(ltm)
#new eval with TSS/sens/spec/cutoff that optimizes TSS (basically same as max sens/spec)
###AOC
#setwd("E:\\FinalModels\\Predictors\\AOC")
setwd("C:\\Users\\eliza\\Documents\\UGA\\Thesis\\NicheModels_2019\\EvalEdits")
df<-read.csv('AOCfin.csv')
lr04<- glm(PA ~ pland_bh100 + I(pland_bh100^2) + ed_msh1km + I(ed_msh1km^2) + urb1kmf + ow1kmf + I(ow1kmf^2), family=binomial(link="logit"), df, na.action=na.pass)
#each coef represents the change in the log odds of being 1/0 for a unit increase in that predictor while holding all other predictors constant
#the exp of each coef
## Top model estimates, odds ratios, and 95% CI
coef.results<- cbind(coef(lr04),confint(lr04),OR = exp(coef(lr04)))
coef.results <- cbind(coef.results, exp(confint(lr04)), coef(summary(lr04))[,4])
coef.results <- round(coef.results,3)
colnames(coef.results)[1] <- "Estimate"
colnames(coef.results)[7] <- "p value"
#OR represents the odds that an outcome will occur given a particular predictor, compared to the odds of the outcome occuring without the predictor
write.csv(coef.results,'AOC_coefresultsn.csv')
df$fitted<-fitted(lr04)
( lrAUC<- auc(df$PA, df$fitted) )
#biserial correlation
bis<-cor.test(df$fitted,df$PA)
biserial<-biserial.cor(df$fitted,df$PA)*-1
biserial
#really just a measure of how well the fitted data correlates with the original PA data
pred <- prediction(df$fitted, df$PA)
perf <- performance(pred, measure="tpr", x.measure="fpr")
plot(perf, col=rainbow(10),main='ROC Curve, AOC')
opt.cut = function(perf, pred){
cut.ind = mapply(FUN=function(x, y, p){
d = (x - 0)^2 + (y-1)^2
ind = which(d == min(d))
c(sensitivity = y[[ind]], specificity = 1-x[[ind]],
cutoff = p[[ind]])
}, perf@x.values, perf@y.values, pred@cutoffs)
}
perf <- data.frame(AUC=NA,sens=NA,spec=NA,TSS=NA,cutoff=NA)
pred <- predict(lr04, df, type = "response")
pred.lab <- as.data.frame(cbind(pred,df$PA))
colnames(pred.lab) <- c("predictions","labels")
pred.lab1 <- prediction(pred.lab$predictions,pred.lab$labels)
perf.auc <- performance(prediction(pred, df$PA),"auc")
perf.roc <- performance(pred.lab1, measure = "tpr", x.measure = "fpr")
perf.sens <- performance(prediction(pred,df$PA), "sens","spec")
cutoff <- opt.cut(perf.roc, pred.lab1)
perf$AUC <- perf.auc@y.values[[1]] # AUC
perf$sens <- cutoff["sensitivity",] # sensitivity
perf$spec <- cutoff["specificity",] # specificity
perf$cutoff <- cutoff["cutoff",] # optimal cutoff
perf$TSS <- perf$sens + perf$spec - 1
write.csv(perf,'AOC_evalresults.csv')
###BASP
setwd("C:\\Users\\eliza\\Documents\\UGA\\Thesis\\NicheModels_2019\\EvalEdits")
df<-read.csv('BASP.csv')
lr04<- glm(PA ~ plandpine800 + I(plandpine800^2) + fire800 + I(fire800^2) + herbht800 + shrbht800 + can100 + I(can100^2), family=binomial(link="logit"), df, na.action=na.pass)
#each coef represents the change in the log odds of being 1/0 for a unit increase in that predictor while holding all other predictors constant
#the exp of each coef
## Top model estimates, odds ratios, and 95% CI
coef.results<- cbind(coef(lr04),confint(lr04),OR = exp(coef(lr04)))
coef.results <- cbind(coef.results, exp(confint(lr04)), coef(summary(lr04))[,4])
coef.results <- round(coef.results,3)
colnames(coef.results)[1] <- "Estimate"
colnames(coef.results)[7] <- "p value"
#OR represents the odds that an outcome will occur given a particular predictor, compared to the odds of the outcome occuring without the predictor
write.csv(coef.results,'BASP_coefresultsn.csv')
df$fitted<-fitted(lr04)
( lrAUC<- auc(df$PA, df$fitted) )
#biserial correlation
bis<-cor.test(df$fitted,df$PA)
biserial<-biserial.cor(df$fitted,df$PA)*-1
biserial
#really just a measure of how well the fitted data correlates with the original PA data
pred <- prediction(df$fitted, df$PA)
perf <- performance(pred, measure="tpr", x.measure="fpr")
plot(perf, col=rainbow(10),main='ROC Curve, AOC')
opt.cut = function(perf, pred){
cut.ind = mapply(FUN=function(x, y, p){
d = (x - 0)^2 + (y-1)^2
ind = which(d == min(d))
c(sensitivity = y[[ind]], specificity = 1-x[[ind]],
cutoff = p[[ind]])
}, perf@x.values, perf@y.values, pred@cutoffs)
}
perf <- data.frame(AUC=NA,sens=NA,spec=NA,TSS=NA,cutoff=NA)
pred <- predict(lr04, df, type = "response")
pred.lab <- as.data.frame(cbind(pred,df$PA))
colnames(pred.lab) <- c("predictions","labels")
pred.lab1 <- prediction(pred.lab$predictions,pred.lab$labels)
perf.auc <- performance(prediction(pred, df$PA),"auc")
perf.roc <- performance(pred.lab1, measure = "tpr", x.measure = "fpr")
perf.sens <- performance(prediction(pred,df$PA), "sens","spec")
cutoff <- opt.cut(perf.roc, pred.lab1)
perf$AUC <- perf.auc@y.values[[1]] # AUC
perf$sens <- cutoff["sensitivity",] # sensitivity
perf$spec <- cutoff["specificity",] # specificity
perf$cutoff <- cutoff["cutoff",] # optimal cutoff
perf$TSS <- perf$sens + perf$spec - 1
write.csv(perf,'BASP_evalresults.csv')
###DT
setwd("C:\\Users\\eliza\\Documents\\UGA\\Thesis\\NicheModels_2019\\EvalEdits")
df<-read.csv('DT.csv')
lr01<- glm(PA ~ marsh500 + landco_800 + I(landco_800^2) + urb_800 + I(urb_800^2) + elev500, family=binomial(link="logit"), df, na.action=na.pass)
summary(lr01)
#each coef represents the change in the log odds of being 1/0 for a unit increase in that predictor while holding all other predictors constant
#the exp of each coef
## Top model estimates, odds ratios, and 95% CI
coef.results<- cbind(coef(lr01),confint(lr01),OR = exp(coef(lr01)))
coef.results <- cbind(coef.results, exp(confint(lr01)), coef(summary(lr01))[,4])
colnames(coef.results)[1] <- "Estimate"
colnames(coef.results)[7] <- "p value"
#OR represents the odds that an outcome will occur given a particular predictor, compared to the odds of the outcome occuring without the predictor
write.csv(coef.results,'DT_coefresults.csv')
df$fitted<-fitted(lr01)
( lrAUC<- auc(df$PA, df$fitted) )
#biserial correlation
bis<-cor.test(df$fitted,df$PA)
biserial<-biserial.cor(df$fitted,df$PA)*-1
biserial
#really just a measure of how well the fitted data correlates with the original PA data
pred <- prediction(df$fitted, df$PA)
#look at ROC curve
perf <- performance(pred, measure="tpr", x.measure="fpr")
plot(perf, col=rainbow(10),main='ROC Curve, AOC')
opt.cut = function(perf, pred){
cut.ind = mapply(FUN=function(x, y, p){
d = (x - 0)^2 + (y-1)^2
ind = which(d == min(d))
c(sensitivity = y[[ind]], specificity = 1-x[[ind]],
cutoff = p[[ind]])
}, perf@x.values, perf@y.values, pred@cutoffs)
}
perf <- data.frame(AUC=NA,sens=NA,spec=NA,TSS=NA,cutoff=NA)
pred <- predict(lr01, df, type = "response")
pred.lab <- as.data.frame(cbind(pred,df$PA))
colnames(pred.lab) <- c("predictions","labels")
pred.lab1 <- prediction(pred.lab$predictions,pred.lab$labels)
perf.auc <- performance(prediction(pred, df$PA),"auc")
perf.roc <- performance(pred.lab1, measure = "tpr", x.measure = "fpr")
perf.sens <- performance(prediction(pred,df$PA), "sens","spec")
cutoff <- opt.cut(perf.roc, pred.lab1)
perf$AUC <- perf.auc@y.values[[1]] # AUC
perf$sens <- cutoff["sensitivity",] # sensitivity
perf$spec <- cutoff["specificity",] # specificity
perf$cutoff <- cutoff["cutoff",] # optimal cutoff
perf$TSS <- perf$sens + perf$spec - 1
write.csv(perf,'DT_evalresults.csv')
###EDR
setwd("C:\\Users\\eliza\\Documents\\UGA\\Thesis\\NicheModels_2019\\EvalEdits")
df<-read.csv('EDR.csv')
lr01<- glm(PA ~ can900 + dran250 + fire900 + landco250 + tpi_raw +urb_250 + precip_raw + I(precip_raw^2), family=binomial(link="logit"), df, na.action=na.pass)
summary(lr01)
#each coef represents the change in the log odds of being 1/0 for a unit increase in that predictor while holding all other predictors constant
#the exp of each coef
## Top model estimates, odds ratios, and 95% CI
coef.results<- cbind(coef(lr01),confint(lr01),OR = exp(coef(lr01)))
coef.results <- cbind(coef.results, exp(confint(lr01)), coef(summary(lr01))[,4])
colnames(coef.results)[1] <- "Estimate"
colnames(coef.results)[7] <- "p value"
#OR represents the odds that an outcome will occur given a particular predictor, compared to the odds of the outcome occuring without the predictor
write.csv(coef.results,'EDR_coefresults.csv')
df$fitted<-fitted(lr01)
( lrAUC<- auc(df$PA, df$fitted) )
#biserial correlation
bis<-cor.test(df$fitted,df$PA)
biserial<-biserial.cor(df$fitted,df$PA)*-1
biserial
#really just a measure of how well the fitted data correlates with the original PA data
pred <- prediction(df$fitted, df$PA)
#look at ROC curve
perf <- performance(pred, measure="tpr", x.measure="fpr")
plot(perf, col=rainbow(10),main='ROC Curve, AOC')
opt.cut = function(perf, pred){
cut.ind = mapply(FUN=function(x, y, p){
d = (x - 0)^2 + (y-1)^2
ind = which(d == min(d))
c(sensitivity = y[[ind]], specificity = 1-x[[ind]],
cutoff = p[[ind]])
}, perf@x.values, perf@y.values, pred@cutoffs)
}
perf <- data.frame(AUC=NA,sens=NA,spec=NA,TSS=NA,cutoff=NA)
pred <- predict(lr01, df, type = "response")
pred.lab <- as.data.frame(cbind(pred,df$PA))
colnames(pred.lab) <- c("predictions","labels")
pred.lab1 <- prediction(pred.lab$predictions,pred.lab$labels)
perf.auc <- performance(prediction(pred, df$PA),"auc")
perf.roc <- performance(pred.lab1, measure = "tpr", x.measure = "fpr")
perf.sens <- performance(prediction(pred,df$PA), "sens","spec")
cutoff <- opt.cut(perf.roc, pred.lab1)
perf$AUC <- perf.auc@y.values[[1]] # AUC
perf$sens <- cutoff["sensitivity",] # sensitivity
perf$spec <- cutoff["specificity",] # specificity
perf$cutoff <- cutoff["cutoff",] # optimal cutoff
perf$TSS <- perf$sens + perf$spec - 1
write.csv(perf,'EDR_evalresults.csv')
###EIS
setwd("C:\\Users\\eliza\\Documents\\UGA\\Thesis\\NicheModels_2019\\EvalEdits")
df<-read.csv('EIS.csv')
lr01<- glm(PA ~ rip_900 + can900 + pine900 + dran250 + landco250 + precip_raw + I(precip_raw^2) +urb_250 +hist900, family=binomial(link="logit"), df, na.action=na.pass)
summary(lr01)
#each coef represents the change in the log odds of being 1/0 for a unit increase in that predictor while holding all other predictors constant
#the exp of each coef
## Top model estimates, odds ratios, and 95% CI
coef.results<- cbind(coef(lr01),confint(lr01),OR = exp(coef(lr01)))
coef.results <- cbind(coef.results, exp(confint(lr01)), coef(summary(lr01))[,4])
colnames(coef.results)[1] <- "Estimate"
colnames(coef.results)[7] <- "p value"
#OR represents the odds that an outcome will occur given a particular predictor, compared to the odds of the outcome occuring without the predictor
write.csv(coef.results,'EIS_coefresults.csv')
df$fitted<-fitted(lr01)
( lrAUC<- auc(df$PA, df$fitted) )
#biserial correlation
bis<-cor.test(df$fitted,df$PA)
biserial<-biserial.cor(df$fitted,df$PA)*-1
biserial
#really just a measure of how well the fitted data correlates with the original PA data
pred <- prediction(df$fitted, df$PA)
#look at ROC curve
perf <- performance(pred, measure="tpr", x.measure="fpr")
plot(perf, col=rainbow(10),main='ROC Curve, AOC')
opt.cut = function(perf, pred){
cut.ind = mapply(FUN=function(x, y, p){
d = (x - 0)^2 + (y-1)^2
ind = which(d == min(d))
c(sensitivity = y[[ind]], specificity = 1-x[[ind]],
cutoff = p[[ind]])
}, perf@x.values, perf@y.values, pred@cutoffs)
}
perf <- data.frame(AUC=NA,sens=NA,spec=NA,TSS=NA,cutoff=NA)
pred <- predict(lr01, df, type = "response")
pred.lab <- as.data.frame(cbind(pred,df$PA))
colnames(pred.lab) <- c("predictions","labels")
pred.lab1 <- prediction(pred.lab$predictions,pred.lab$labels)
perf.auc <- performance(prediction(pred, df$PA),"auc")
perf.roc <- performance(pred.lab1, measure = "tpr", x.measure = "fpr")
perf.sens <- performance(prediction(pred,df$PA), "sens","spec")
cutoff <- opt.cut(perf.roc, pred.lab1)
perf$AUC <- perf.auc@y.values[[1]] # AUC
perf$sens <- cutoff["sensitivity",] # sensitivity
perf$spec <- cutoff["specificity",] # specificity
perf$cutoff <- cutoff["cutoff",] # optimal cutoff
perf$TSS <- perf$sens + perf$spec - 1
write.csv(perf,'EIS_evalresults.csv')
###PB
setwd("C:\\Users\\eliza\\Documents\\UGA\\Thesis\\NicheModels_2019\\EvalEdits")
df<-read.csv('PBfinedits.csv')
lr01<- glm(PA ~ patmshb700 + shrbht700 + edmar700 + can700 + I(can700^2), family=binomial(link="logit"), df, na.action=na.pass)
summary(lr01)
#each coef represents the change in the log odds of being 1/0 for a unit increase in that predictor while holding all other predictors constant
#the exp of each coef
## Top model estimates, odds ratios, and 95% CI
coef.results<- cbind(coef(lr01),confint(lr01),OR = exp(coef(lr01)))
coef.results <- cbind(coef.results, exp(confint(lr01)), coef(summary(lr01))[,4])
colnames(coef.results)[1] <- "Estimate"
colnames(coef.results)[7] <- "p value"
#OR represents the odds that an outcome will occur given a particular predictor, compared to the odds of the outcome occuring without the predictor
write.csv(coef.results,'PB_coefresults.csv')
df$fitted<-fitted(lr01)
( lrAUC<- auc(df$PA, df$fitted) )
#biserial correlation
bis<-cor.test(df$fitted,df$PA)
biserial<-biserial.cor(df$fitted,df$PA)*-1
biserial
#really just a measure of how well the fitted data correlates with the original PA data
pred <- prediction(df$fitted, df$PA)
#look at ROC curve
perf <- performance(pred, measure="tpr", x.measure="fpr")
plot(perf, col=rainbow(10),main='ROC Curve, AOC')
opt.cut = function(perf, pred){
cut.ind = mapply(FUN=function(x, y, p){
d = (x - 0)^2 + (y-1)^2
ind = which(d == min(d))
c(sensitivity = y[[ind]], specificity = 1-x[[ind]],
cutoff = p[[ind]])
}, perf@x.values, perf@y.values, pred@cutoffs)
}
perf <- data.frame(AUC=NA,sens=NA,spec=NA,TSS=NA,cutoff=NA)
pred <- predict(lr01, df, type = "response")
pred.lab <- as.data.frame(cbind(pred,df$PA))
colnames(pred.lab) <- c("predictions","labels")
pred.lab1 <- prediction(pred.lab$predictions,pred.lab$labels)
perf.auc <- performance(prediction(pred, df$PA),"auc")
perf.roc <- performance(pred.lab1, measure = "tpr", x.measure = "fpr")
perf.sens <- performance(prediction(pred,df$PA), "sens","spec")
cutoff <- opt.cut(perf.roc, pred.lab1)
perf$AUC <- perf.auc@y.values[[1]] # AUC
perf$sens <- cutoff["sensitivity",] # sensitivity
perf$spec <- cutoff["specificity",] # specificity
perf$cutoff <- cutoff["cutoff",] # optimal cutoff
perf$TSS <- perf$sens + perf$spec - 1
write.csv(perf,'PB_evalresults.csv')
###RCW
df<-read.csv('RCW.csv')
lr01<- glm(PA ~ plandpine800 + I(plandpine800^2) + fire800 + I(fire800^2) + herbht800 + shrbht800 + can800, family=binomial(link="logit"), df, na.action=na.pass)
summary(lr01)
## Top model estimates, odds ratios, and 95% CI
coef.results<- cbind(coef(lr01),confint(lr01),OR = exp(coef(lr01)))
coef.results <- cbind(coef.results, exp(confint(lr01)), coef(summary(lr01))[,4])
colnames(coef.results)[1] <- "Estimate"
colnames(coef.results)[7] <- "p value"
#OR represents the odds that an outcome will occur given a particular predictor, compared to the odds of the outcome occuring without the predictor
write.csv(coef.results,'RCW_coefresults.csv')
df$fitted<-fitted(lr01)
( lrAUC<- auc(df$PA, df$fitted) )
#biserial correlation
bis<-cor.test(df$fitted,df$PA)
biserial<-biserial.cor(df$fitted,df$PA)*-1
biserial
#really just a measure of how well the fitted data correlates with the original PA data
pred <- prediction(df$fitted, df$PA)
#look at ROC curve
perf <- performance(pred, measure="tpr", x.measure="fpr")
plot(perf, col=rainbow(10),main='ROC Curve, AOC')
opt.cut = function(perf, pred){
cut.ind = mapply(FUN=function(x, y, p){
d = (x - 0)^2 + (y-1)^2
ind = which(d == min(d))
c(sensitivity = y[[ind]], specificity = 1-x[[ind]],
cutoff = p[[ind]])
}, perf@x.values, perf@y.values, pred@cutoffs)
}
perf <- data.frame(AUC=NA,sens=NA,spec=NA,TSS=NA,cutoff=NA)
pred <- predict(lr01, df, type = "response")
pred.lab <- as.data.frame(cbind(pred,df$PA))
colnames(pred.lab) <- c("predictions","labels")
pred.lab1 <- prediction(pred.lab$predictions,pred.lab$labels)
perf.auc <- performance(prediction(pred, df$PA),"auc")
perf.roc <- performance(pred.lab1, measure = "tpr", x.measure = "fpr")
perf.sens <- performance(prediction(pred,df$PA), "sens","spec")
cutoff <- opt.cut(perf.roc, pred.lab1)
perf$AUC <- perf.auc@y.values[[1]] # AUC
perf$sens <- cutoff["sensitivity",] # sensitivity
perf$spec <- cutoff["specificity",] # specificity
perf$cutoff <- cutoff["cutoff",] # optimal cutoff
perf$TSS <- perf$sens + perf$spec - 1
write.csv(perf,'RCW_evalresults.csv')
###SSS
# df<-read.csv('SSS.csv')
#
# lr01<- glm(PA ~ ed_msh200 + elev200f + brack200 +urb_200 +for_200, family=binomial(link="logit"), df, na.action=na.pass)
# summary(lr01)
#
# ## Top model estimates, odds ratios, and 95% CI
# coef.results<- cbind(coef(lr01),confint(lr01),OR = exp(coef(lr01)))
# coef.results <- cbind(coef.results, exp(confint(lr01)), coef(summary(lr01))[,4])
# colnames(coef.results)[1] <- "Estimate"
# colnames(coef.results)[7] <- "p value"
# #OR represents the odds that an outcome will occur given a particular predictor, compared to the odds of the outcome occuring without the predictor
# write.csv(coef.results,'SSS_coefresults.csv')
#
# df$fitted<-fitted(lr01)
# ( lrAUC<- auc(df$PA, df$fitted) )
# #biserial correlation
# bis<-cor.test(df$fitted,df$PA)
# biserial<-biserial.cor(df$fitted,df$PA)*-1
# biserial
# #really just a measure of how well the fitted data correlates with the original PA data
# pred <- prediction(df$fitted, df$PA)
# #look at ROC curve
# perf <- performance(pred, measure="tpr", x.measure="fpr")
# plot(perf, col=rainbow(10),main='ROC Curve, AOC')
#
#
# opt.cut = function(perf, pred){
# cut.ind = mapply(FUN=function(x, y, p){
# d = (x - 0)^2 + (y-1)^2
# ind = which(d == min(d))
# c(sensitivity = y[[ind]], specificity = 1-x[[ind]],
# cutoff = p[[ind]])
# }, perf@x.values, perf@y.values, pred@cutoffs)
# }
#
# perf <- data.frame(AUC=NA,sens=NA,spec=NA,TSS=NA,cutoff=NA)
# pred <- predict(lr01, df, type = "response")
# pred.lab <- as.data.frame(cbind(pred,df$PA))
# colnames(pred.lab) <- c("predictions","labels")
# pred.lab1 <- prediction(pred.lab$predictions,pred.lab$labels)
# perf.auc <- performance(prediction(pred, df$PA),"auc")
# perf.roc <- performance(pred.lab1, measure = "tpr", x.measure = "fpr")
# perf.sens <- performance(prediction(pred,df$PA), "sens","spec")
# cutoff <- opt.cut(perf.roc, pred.lab1)
# perf$AUC <- perf.auc@y.values[[1]] # AUC
# perf$sens <- cutoff["sensitivity",] # sensitivity
# perf$spec <- cutoff["specificity",] # specificity
# perf$cutoff <- cutoff["cutoff",] # optimal cutoff
# perf$TSS <- perf$sens + perf$spec - 1
#
# write.csv(perf,'SSS_evalresults.csv')
#WP
#setwd("E:\\FinalModels\\Predictors\\WS")
setwd("C:\\Users\\eliza\Documents\\UGA\\Thesis\\NicheModels_2019\\EvalEdits")
df<-read.csv('WPnewedits.csv')
lr01<- glm(PA ~ pland_bh100f + ed_msh100 + landco100 + urb1kmf + elev1kmf, family=binomial(link="logit"), df, na.action=na.pass)
#each coef represents the change in the log odds of being 1/0 for a unit increase in that predictor while holding all other predictors constant
#the exp of each coef
## Top model estimates, odds ratios, and 95% CI
coef.results<- cbind(coef(lr01),confint(lr01),OR = exp(coef(lr01)))
coef.results <- cbind(coef.results, exp(confint(lr01)), coef(summary(lr01))[,4])
colnames(coef.results)[1] <- "Estimate"
colnames(coef.results)[7] <- "p value"
#OR represents the odds that an outcome will occur given a particular predictor, compared to the odds of the outcome occuring without the predictor
write.csv(coef.results,'WP_coefresults.csv')
df$fitted<-fitted(lr01)
( lrAUC<- auc(df$PA, df$fitted) )
#biserial correlation
bis<-cor.test(df$fitted,df$PA)
biserial<-biserial.cor(df$fitted,df$PA)*-1
biserial
#really just a measure of how well the fitted data correlates with the original PA data
pred <- prediction(df$fitted, df$PA)
#look at ROC curve
perf <- performance(pred, measure="tpr", x.measure="fpr")
plot(perf, col=rainbow(10),main='ROC Curve, AOC')
opt.cut = function(perf, pred){
cut.ind = mapply(FUN=function(x, y, p){
d = (x - 0)^2 + (y-1)^2
ind = which(d == min(d))
c(sensitivity = y[[ind]], specificity = 1-x[[ind]],
cutoff = p[[ind]])
}, perf@x.values, perf@y.values, pred@cutoffs)
}
perf <- data.frame(AUC=NA,sens=NA,spec=NA,TSS=NA,cutoff=NA)
pred <- predict(lr01, df, type = "response")
pred.lab <- as.data.frame(cbind(pred,df$PA))
colnames(pred.lab) <- c("predictions","labels")
pred.lab1 <- prediction(pred.lab$predictions,pred.lab$labels)
perf.auc <- performance(prediction(pred, df$PA),"auc")
perf.roc <- performance(pred.lab1, measure = "tpr", x.measure = "fpr")
perf.sens <- performance(prediction(pred,df$PA), "sens","spec")
cutoff <- opt.cut(perf.roc, pred.lab1)
perf$AUC <- perf.auc@y.values[[1]] # AUC
perf$sens <- cutoff["sensitivity",] # sensitivity
perf$spec <- cutoff["specificity",] # specificity
perf$cutoff <- cutoff["cutoff",] # optimal cutoff
perf$TSS <- perf$sens + perf$spec - 1
write.csv(perf,'WP_evalresults.csv')
#WS
#setwd("E:\\FinalModels\\Predictors\\WS")
setwd("C:\\Users\\eliza\Documents\\UGA\\Thesis\\NicheModels_2019\\EvalEdits")
df<-read.csv('WSfinfin1010.csv')
lr01 <- glm(PA ~ nwi2000 + wat2000 + I(wat2000^2) + nwifwd_2000 + nhd_2000 + landco2000 + I(landco2000^2) + can2000, family=binomial(link="logit"), df, na.action=na.pass)
summary(lr01)
#each coef represents the change in the log odds of being 1/0 for a unit increase in that predictor while holding all other predictors constant
#the exp of each coef
## Top model estimates, odds ratios, and 95% CI
coef.results<- cbind(coef(lr01),confint(lr01),OR = exp(coef(lr01)))
coef.results <- cbind(coef.results, exp(confint(lr01)), coef(summary(lr01))[,4])
colnames(coef.results)[1] <- "Estimate"
colnames(coef.results)[7] <- "p value"
#OR represents the odds that an outcome will occur given a particular predictor, compared to the odds of the outcome occuring without the predictor
write.csv(coef.results,'WS_coefresults.csv')
df$fitted<-fitted(lr01)
( lrAUC<- auc(df$PA, df$fitted) )
#biserial correlation
bis<-cor.test(df$fitted,df$PA)
biserial<-biserial.cor(df$fitted,df$PA)*-1
biserial
#really just a measure of how well the fitted data correlates with the original PA data
pred <- prediction(df$fitted, df$PA)
#look at ROC curve
perf <- performance(pred, measure="tpr", x.measure="fpr")
plot(perf, col=rainbow(10),main='ROC Curve, AOC')
opt.cut = function(perf, pred){
cut.ind = mapply(FUN=function(x, y, p){
d = (x - 0)^2 + (y-1)^2
ind = which(d == min(d))
c(sensitivity = y[[ind]], specificity = 1-x[[ind]],
cutoff = p[[ind]])
}, perf@x.values, perf@y.values, pred@cutoffs)
}
perf <- data.frame(AUC=NA,sens=NA,spec=NA,TSS=NA,cutoff=NA)
pred <- predict(lr01, df, type = "response")
pred.lab <- as.data.frame(cbind(pred,df$PA))
colnames(pred.lab) <- c("predictions","labels")
pred.lab1 <- prediction(pred.lab$predictions,pred.lab$labels)
perf.auc <- performance(prediction(pred, df$PA),"auc")
perf.roc <- performance(pred.lab1, measure = "tpr", x.measure = "fpr")
perf.sens <- performance(prediction(pred,df$PA), "sens","spec")
cutoff <- opt.cut(perf.roc, pred.lab1)
perf$AUC <- perf.auc@y.values[[1]] # AUC
perf$sens <- cutoff["sensitivity",] # sensitivity
perf$spec <- cutoff["specificity",] # specificity
perf$cutoff <- cutoff["cutoff",] # optimal cutoff
perf$TSS <- perf$sens + perf$spec - 1
write.csv(perf,'WS_evalresults.csv')
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils.R
\name{unfactorn}
\alias{unfactorn}
\title{unfactor shortcut for numeric factors}
\usage{
unfactorn(f)
}
\arguments{
\item{f}{factor}
}
\description{
unfactor shortcut for numeric factors
}
| /rrdkit/man/unfactorn.Rd | no_license | YonghuiDong/RRDKit | R | false | true | 276 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils.R
\name{unfactorn}
\alias{unfactorn}
\title{unfactor shortcut for numeric factors}
\usage{
unfactorn(f)
}
\arguments{
\item{f}{factor}
}
\description{
unfactor shortcut for numeric factors
}
|
npuniden.boundary <- function(X=NULL,
Y=NULL,
h=NULL,
a=min(X),
b=max(X),
bwmethod=c("cv.ls","cv.ml"),
cv=c("grid-hybrid","numeric"),
grid=NULL,
kertype=c("gaussian1","gaussian2",
"beta1","beta2",
"fb","fbl","fbu",
"rigaussian","gamma"),
nmulti=5,
proper=FALSE) {
kertype <- match.arg(kertype)
cv <- match.arg(cv)
bwmethod <- match.arg(bwmethod)
if(!is.null(grid) && any(grid<=0)) stop(" the grid vector must contain positive values")
if(is.null(X)) stop("you must pass a vector X")
if(kertype=="gamma" || kertype=="rigaussian") b <- Inf
if(kertype=="fbl") b <- Inf
if(kertype=="fbu") a <- -Inf
if(a>=b) stop("a must be less than b")
if(any(X<a)) stop("X must be >= a")
if(any(X>b)) stop("X must be <= b")
if(!is.null(Y) && any(Y<a)) stop("Y must be >= a")
if(!is.null(Y) && any(Y>b)) stop("Y must be <= b")
if(is.null(Y)) Y <- X
if(!is.null(h) && h <= 0) stop("bandwidth h must be positive")
if(nmulti < 1) stop("number of multistarts nmulti must be positive")
if(!is.logical(proper)) stop("proper must be either TRUE or FALSE")
if(kertype=="gaussian2" && (!is.finite(a) || !is.finite(b))) stop("finite bounds are required for kertype gaussian2")
h.opt <- NULL
if(kertype=="gaussian1") {
## Gaussian reweighted boundary kernel function (bias of O(h))
kernel <- function(x,X,h,a=0,b=1) {
dnorm((x-X)/h)/(h*(pnorm((b-x)/h)-pnorm((a-x)/h)))
}
} else if(kertype=="gaussian2") {
## Gaussian reweighted second-order boundary kernel function
## (bias of O(h^2)). Instability surfaces for extremely large
## bandwidths relative to range of the data, so we shrink to
## the uniform when h exceeds 10,000 times the range (b-a)
kernel <- function(x,X,h,a=0,b=1) {
z <- (x-X)/h
z.a <- (a-x)/h
z.b <- (b-x)/h
pnorm.zb.m.pnorm.za <- (pnorm(z.b)-pnorm(z.a))
mu.1 <- (dnorm(z.a)-dnorm(z.b))/(pnorm.zb.m.pnorm.za)
mu.2 <- 1+(z.a*dnorm(z.a)-z.b*dnorm(z.b))/(pnorm.zb.m.pnorm.za)
mu.3 <- ((z.a**2+2)*dnorm(z.a)-(z.b**2+2)*dnorm(z.b))/(pnorm.zb.m.pnorm.za)
aa <- mu.3/(mu.3-mu.1*mu.2)
bb <- -mu.1/(mu.3-mu.1*mu.2)
if((b-a)/h > 1e-04) {
(aa+bb*z**2)*dnorm(z)/(h*pnorm.zb.m.pnorm.za)
} else {
rep(1/(b-a),length(X))
}
}
} else if(kertype=="beta1") {
## Chen (1999), Beta 1 kernel function (bias of O(h), function
## of f' and f'', no division by h), need to rescale to
## integrate to 1 on [a,b]
kernel <- function(x,X,h,a=0,b=1) {
X <- (X-a)/(b-a)
x <- (x-a)/(b-a)
dbeta(X,x/h+1,(1-x)/h+1)/(b-a)
}
} else if(kertype=="beta2") {
## Chen (1999), Beta 2 kernel function (bias of O(h), function
## of f'' only, no division by h), need to rescale to
## integrate to 1 on [a,b]
rho <- function(x,h) {2*h**2+2.5-sqrt(4*h**4+6*h**2+2.25-x**2-x/h)}
kernel <- function(x,X,h,a=0,b=1) {
X <- (X-a)/(b-a)
x <- (x-a)/(b-a)
if(x < 2*h && h < (b-a)) {
dbeta(X,rho(x,h),(1-x)/h)/(b-a)
} else if((2*h <= x && x <= 1-2*h) || h >= (b-a)) {
dbeta(X,x/h,(1-x)/h)/(b-a)
} else if(x > 1-2*h && h < (b-a)) {
dbeta(X,x/h,rho(1-x,h))/(b-a)
}
}
} else if(kertype=="gamma") {
## Gamma kernel function for x in [a,Inf]
kernel <- function(x,X,h,a=0,b=1) {
## No division by h, rescale to lie in [0,Inf], b is a
## dummy, not used but needed to avoid warning about
## function kernel having different named arguments
X <- X-a
x <- x-a
dgamma(X,x/h+1,1/h)
}
} else if(kertype=="rigaussian") {
## Reverse inverse Gaussian for x in [a,Inf]
kernel <- function(x,X,h,a=0,b=1) {
## No division by h, rescale to lie in [0,Inf], b is a
## dummy, not used but needed to avoid warning about
## function kernel having different named arguments
X <- X - a
x <- x - a
x.res <- sqrt(x**2+h*x)
k <- exp(-x.res/(2*h)*(X/x.res+x.res/X-2))/sqrt(2*pi*h*X)
k[is.nan(k)] <- 0
k
}
} else if(kertype=="fb") {
## Floating boundary kernel (Scott (1992), Page 46), left and
## right bound, truncated biweight in interior
kernel <- function(x,X,h,a=0,b=1) {
t <- (X-x)/h
if(x < a+h && h < (b-a)) {
c <- (a-x)/h
ifelse(c <= t & t <= 2+c,.75*(c+1-1.25*(1+2*c)*(t-c)^2)*(t-(c+2))^2,0)/h
} else if((a+h <= x && x <= b-h) || h >= (b-a)) {
z.a <- (a-x)/h
z.b <- (b-x)/h
rw <- (3*(z.b^5-z.a^5)-10*(z.b^3-z.a^3)+15*(z.b-z.a))/16
rw[rw>1] <- 1
ifelse(abs(t)<1,(15/16)*(1-t**2)**2/(h*rw),0)
} else if(x > b-h && h < (b-a)) {
c <- (b-x)/h
ifelse(c-2 <= t & t <= c,.75*(1-c+1.25*(-1+2*c)*(t-c)^2)*(t-(c-2))^2,0)/h
}
}
} else if(kertype=="fbl") {
## Floating boundary kernel (Scott (1992), Page 46), left bound
kernel <- function(x,X,h,a=0,b=1) {
t <- (X-x)/h
if(x < a+h) {
c <- (a-x)/h
ifelse(c <= t & t <= 2+c,.75*(c+1-1.25*(1+2*c)*(t-c)^2)*(t-(c+2))^2,0)/h
} else {
ifelse(abs(t)<1,(15/16)*(1-t**2)**2/h,0)
}
}
} else if(kertype=="fbu") {
kernel <- function(x,X,h,a=0,b=1) {
## Floating boundary kernel (Scott (1992), Page 46), right bound
t <- (X-x)/h
if(x <= b-h) {
ifelse(abs(t)<1,(15/16)*(1-t**2)**2/h,0)
} else {
c <- (b-x)/h
ifelse(c-2 <= t & t <= c,.75*(1-c+1.25*(-1+2*c)*(t-c)^2)*(t-(c-2))^2,0)/h
}
}
}
int.kernel.squared <- function(X,h,a=a,b=b) {
## Use numeric integration to compute Kappa, the integral of
## the square of the kernel function needed for the asymptotic
## standard error of the density estimate seq(a,b) will barf
## on -Inf or Inf, trap these cases and use extendrange
if(is.finite(a) && is.finite(b)) X.seq <- seq(a,b,length=1000)
if(is.finite(a) && !is.finite(b)) X.seq <- seq(a,extendrange(X,f=10)[2],length=1000)
if(!is.finite(a) && is.finite(b)) X.seq <- seq(extendrange(X,f=10)[1],b,length=1000)
if(!is.finite(a) && !is.finite(b)) X.seq <- seq(extendrange(X,f=10)[1],extendrange(X,f=10)[2],length=1000)
sapply(1:length(X),function(i){integrate.trapezoidal(X.seq,h*kernel(X[i],X.seq,h,a,b)**2)[length(X.seq)]})
}
fhat <- function(X,Y,h,a=0,b=1,proper=FALSE) {
f <- sapply(1:length(Y),function(i){mean(kernel(Y[i],X,h,a,b))})
if(proper) {
if(is.finite(a) && is.finite(b)) X.seq <- seq(a,b,length=1000)
if(is.finite(a) && !is.finite(b)) X.seq <- seq(a,extendrange(X,f=10)[2],length=1000)
if(!is.finite(a) && is.finite(b)) X.seq <- seq(extendrange(X,f=10)[1],b,length=1000)
if(!is.finite(a) && !is.finite(b)) X.seq <- seq(extendrange(X,f=10)[1],extendrange(X,f=10)[2],length=1000)
f.seq <- sapply(1:length(X.seq),function(i){mean(kernel(X.seq[i],X,h,a,b))})
if(any(f.seq<0)) {
f <- f - min(f.seq)
f.seq <- f.seq - min(f.seq)
}
int.f.seq <- integrate.trapezoidal(X.seq,f.seq)[length(X.seq)]
f <- f/int.f.seq
}
return(f)
}
Fhat <- function(Y,f,a,b,proper=FALSE) {
## Numerical integration of f, check for aberrant values, if
## on range of data ensure F\in[0,1], if not make sure value
## is proper (negative boundary kernel functions can cause
## unwanted artifacts)
f[is.na(f)] <- 0
F <- integrate.trapezoidal(Y,f)
if(proper) {
if(min(Y)==a && max(Y)==b) {
F <- (F-min(F))/(max(F)-min(F))
} else {
if(min(F)<0) F <- F+min(F)
if(max(F)>1) F <- F/max(F)
}
}
F
}
fhat.loo <- function(X,h,a=0,b=1) {
sapply(1:length(X),function(i){mean(kernel(X[i],X[-i],h,a,b))})
}
if(bwmethod=="cv.ml") {
## Likelihood cross-validation function (maximizing)
fnscale <- list(fnscale = -1)
cv.function <- function(h,X,a=0,b=1) {
f.loo <- fhat.loo(X,h,a,b)
return(sum(log(ifelse(f.loo > 0 & is.finite(f.loo), f.loo, .Machine$double.xmin))))
}
} else {
## Least-squares cross-validation function (minimizing)
fnscale <- list(fnscale = 1)
cv.function <- function(h,X,a=0,b=1) {
cv.ls <- (integrate.trapezoidal(X,fhat(X,X,h,a,b)**2)[order(X)])[length(X)]-2*mean(fhat.loo(X,h,a,b))
ifelse(is.finite(cv.ls),cv.ls,sqrt(sqrt(.Machine$double.xmax)))
}
}
## Grid search and then numeric optimization search (no
## multistarting, but sound starting point always used for
## subsequent refinement by optim)
if(is.null(h) && cv == "grid-hybrid") {
## First establish a sound starting value using grid search,
## then use that starting value for numeric search
if(is.null(grid)) {
rob.spread <- c(sd(X),IQR(X)/1.349)
rob.spread <- min(rob.spread[rob.spread>0])
constant <- rob.spread*length(X)**(-0.2)
h.vec <- c(seq(0.25,1.75,length=10),2^(1:25))*constant
cv.vec <- sapply(1:length(h.vec),function(i){cv.function(h.vec[i],X,a,b)})
foo <- optim(h.vec[ifelse(bwmethod=="cv.ml",which.max(cv.vec),which.min(cv.vec))],
cv.function,
method="L-BFGS-B",
lower=sqrt(.Machine$double.eps),
upper=ifelse(kertype=="beta2",(b-a)/4,Inf),
control = fnscale,
X=X,
a=a,
b=b)
h.opt <- foo$par
cv.opt <- foo$value
} else {
cv.vec <- sapply(1:length(grid),function(i){cv.function(grid[i],X,a,b)})
foo <- optim(grid[ifelse(bwmethod=="cv.ml",which.max(cv.vec),which.min(cv.vec))],
cv.function,
method="L-BFGS-B",
lower=sqrt(.Machine$double.eps),
upper=ifelse(kertype=="beta2",(b-a)/4,Inf),
control = fnscale,
X=X,
a=a,
b=b)
h.opt <- foo$par
cv.opt <- foo$value
}
}
if(is.null(h.opt)) {
## Manual inputted bandwidth
f <- fhat(X,Y,h,a,b,proper=proper)
## Numerical integration via the trapezoidal rule
F <- Fhat(Y,f,a,b,proper=proper)
return(list(f=f,
F=F,
sd.f=sqrt(abs(f*int.kernel.squared(Y,h,a,b)/(h*length(f)))),
sd.F=sqrt(abs(F*(1-F)/length(F))),
h=h))
} else {
## Search bandwidth
f <- fhat(X,Y,h.opt,a,b,proper=proper)
## Numerical integration via the trapezoidal rule
F <- Fhat(Y,f,a,b,proper=proper)
return(list(f=f,
F=F,
sd.f=sqrt(abs(f*int.kernel.squared(Y,h.opt,a,b)/(h.opt*length(f)))),
sd.F=sqrt(abs(F*(1-F)/length(F))),
h=h.opt,
nmulti=nmulti,
cv.opt=cv.opt))
}
}
| /R/npuniden.boundary.R | no_license | JeffreyRacine/R-Package-np | R | false | false | 12,357 | r | npuniden.boundary <- function(X=NULL,
Y=NULL,
h=NULL,
a=min(X),
b=max(X),
bwmethod=c("cv.ls","cv.ml"),
cv=c("grid-hybrid","numeric"),
grid=NULL,
kertype=c("gaussian1","gaussian2",
"beta1","beta2",
"fb","fbl","fbu",
"rigaussian","gamma"),
nmulti=5,
proper=FALSE) {
kertype <- match.arg(kertype)
cv <- match.arg(cv)
bwmethod <- match.arg(bwmethod)
if(!is.null(grid) && any(grid<=0)) stop(" the grid vector must contain positive values")
if(is.null(X)) stop("you must pass a vector X")
if(kertype=="gamma" || kertype=="rigaussian") b <- Inf
if(kertype=="fbl") b <- Inf
if(kertype=="fbu") a <- -Inf
if(a>=b) stop("a must be less than b")
if(any(X<a)) stop("X must be >= a")
if(any(X>b)) stop("X must be <= b")
if(!is.null(Y) && any(Y<a)) stop("Y must be >= a")
if(!is.null(Y) && any(Y>b)) stop("Y must be <= b")
if(is.null(Y)) Y <- X
if(!is.null(h) && h <= 0) stop("bandwidth h must be positive")
if(nmulti < 1) stop("number of multistarts nmulti must be positive")
if(!is.logical(proper)) stop("proper must be either TRUE or FALSE")
if(kertype=="gaussian2" && (!is.finite(a) || !is.finite(b))) stop("finite bounds are required for kertype gaussian2")
h.opt <- NULL
if(kertype=="gaussian1") {
## Gaussian reweighted boundary kernel function (bias of O(h))
kernel <- function(x,X,h,a=0,b=1) {
dnorm((x-X)/h)/(h*(pnorm((b-x)/h)-pnorm((a-x)/h)))
}
} else if(kertype=="gaussian2") {
## Gaussian reweighted second-order boundary kernel function
## (bias of O(h^2)). Instability surfaces for extremely large
## bandwidths relative to range of the data, so we shrink to
## the uniform when h exceeds 10,000 times the range (b-a)
kernel <- function(x,X,h,a=0,b=1) {
z <- (x-X)/h
z.a <- (a-x)/h
z.b <- (b-x)/h
pnorm.zb.m.pnorm.za <- (pnorm(z.b)-pnorm(z.a))
mu.1 <- (dnorm(z.a)-dnorm(z.b))/(pnorm.zb.m.pnorm.za)
mu.2 <- 1+(z.a*dnorm(z.a)-z.b*dnorm(z.b))/(pnorm.zb.m.pnorm.za)
mu.3 <- ((z.a**2+2)*dnorm(z.a)-(z.b**2+2)*dnorm(z.b))/(pnorm.zb.m.pnorm.za)
aa <- mu.3/(mu.3-mu.1*mu.2)
bb <- -mu.1/(mu.3-mu.1*mu.2)
if((b-a)/h > 1e-04) {
(aa+bb*z**2)*dnorm(z)/(h*pnorm.zb.m.pnorm.za)
} else {
rep(1/(b-a),length(X))
}
}
} else if(kertype=="beta1") {
## Chen (1999), Beta 1 kernel function (bias of O(h), function
## of f' and f'', no division by h), need to rescale to
## integrate to 1 on [a,b]
kernel <- function(x,X,h,a=0,b=1) {
X <- (X-a)/(b-a)
x <- (x-a)/(b-a)
dbeta(X,x/h+1,(1-x)/h+1)/(b-a)
}
} else if(kertype=="beta2") {
## Chen (1999), Beta 2 kernel function (bias of O(h), function
## of f'' only, no division by h), need to rescale to
## integrate to 1 on [a,b]
rho <- function(x,h) {2*h**2+2.5-sqrt(4*h**4+6*h**2+2.25-x**2-x/h)}
kernel <- function(x,X,h,a=0,b=1) {
X <- (X-a)/(b-a)
x <- (x-a)/(b-a)
if(x < 2*h && h < (b-a)) {
dbeta(X,rho(x,h),(1-x)/h)/(b-a)
} else if((2*h <= x && x <= 1-2*h) || h >= (b-a)) {
dbeta(X,x/h,(1-x)/h)/(b-a)
} else if(x > 1-2*h && h < (b-a)) {
dbeta(X,x/h,rho(1-x,h))/(b-a)
}
}
} else if(kertype=="gamma") {
## Gamma kernel function for x in [a,Inf]
kernel <- function(x,X,h,a=0,b=1) {
## No division by h, rescale to lie in [0,Inf], b is a
## dummy, not used but needed to avoid warning about
## function kernel having different named arguments
X <- X-a
x <- x-a
dgamma(X,x/h+1,1/h)
}
} else if(kertype=="rigaussian") {
## Reverse inverse Gaussian for x in [a,Inf]
kernel <- function(x,X,h,a=0,b=1) {
## No division by h, rescale to lie in [0,Inf], b is a
## dummy, not used but needed to avoid warning about
## function kernel having different named arguments
X <- X - a
x <- x - a
x.res <- sqrt(x**2+h*x)
k <- exp(-x.res/(2*h)*(X/x.res+x.res/X-2))/sqrt(2*pi*h*X)
k[is.nan(k)] <- 0
k
}
} else if(kertype=="fb") {
## Floating boundary kernel (Scott (1992), Page 46), left and
## right bound, truncated biweight in interior
kernel <- function(x,X,h,a=0,b=1) {
t <- (X-x)/h
if(x < a+h && h < (b-a)) {
c <- (a-x)/h
ifelse(c <= t & t <= 2+c,.75*(c+1-1.25*(1+2*c)*(t-c)^2)*(t-(c+2))^2,0)/h
} else if((a+h <= x && x <= b-h) || h >= (b-a)) {
z.a <- (a-x)/h
z.b <- (b-x)/h
rw <- (3*(z.b^5-z.a^5)-10*(z.b^3-z.a^3)+15*(z.b-z.a))/16
rw[rw>1] <- 1
ifelse(abs(t)<1,(15/16)*(1-t**2)**2/(h*rw),0)
} else if(x > b-h && h < (b-a)) {
c <- (b-x)/h
ifelse(c-2 <= t & t <= c,.75*(1-c+1.25*(-1+2*c)*(t-c)^2)*(t-(c-2))^2,0)/h
}
}
} else if(kertype=="fbl") {
## Floating boundary kernel (Scott (1992), Page 46), left bound
kernel <- function(x,X,h,a=0,b=1) {
t <- (X-x)/h
if(x < a+h) {
c <- (a-x)/h
ifelse(c <= t & t <= 2+c,.75*(c+1-1.25*(1+2*c)*(t-c)^2)*(t-(c+2))^2,0)/h
} else {
ifelse(abs(t)<1,(15/16)*(1-t**2)**2/h,0)
}
}
} else if(kertype=="fbu") {
kernel <- function(x,X,h,a=0,b=1) {
## Floating boundary kernel (Scott (1992), Page 46), right bound
t <- (X-x)/h
if(x <= b-h) {
ifelse(abs(t)<1,(15/16)*(1-t**2)**2/h,0)
} else {
c <- (b-x)/h
ifelse(c-2 <= t & t <= c,.75*(1-c+1.25*(-1+2*c)*(t-c)^2)*(t-(c-2))^2,0)/h
}
}
}
int.kernel.squared <- function(X,h,a=a,b=b) {
## Use numeric integration to compute Kappa, the integral of
## the square of the kernel function needed for the asymptotic
## standard error of the density estimate seq(a,b) will barf
## on -Inf or Inf, trap these cases and use extendrange
if(is.finite(a) && is.finite(b)) X.seq <- seq(a,b,length=1000)
if(is.finite(a) && !is.finite(b)) X.seq <- seq(a,extendrange(X,f=10)[2],length=1000)
if(!is.finite(a) && is.finite(b)) X.seq <- seq(extendrange(X,f=10)[1],b,length=1000)
if(!is.finite(a) && !is.finite(b)) X.seq <- seq(extendrange(X,f=10)[1],extendrange(X,f=10)[2],length=1000)
sapply(1:length(X),function(i){integrate.trapezoidal(X.seq,h*kernel(X[i],X.seq,h,a,b)**2)[length(X.seq)]})
}
fhat <- function(X,Y,h,a=0,b=1,proper=FALSE) {
f <- sapply(1:length(Y),function(i){mean(kernel(Y[i],X,h,a,b))})
if(proper) {
if(is.finite(a) && is.finite(b)) X.seq <- seq(a,b,length=1000)
if(is.finite(a) && !is.finite(b)) X.seq <- seq(a,extendrange(X,f=10)[2],length=1000)
if(!is.finite(a) && is.finite(b)) X.seq <- seq(extendrange(X,f=10)[1],b,length=1000)
if(!is.finite(a) && !is.finite(b)) X.seq <- seq(extendrange(X,f=10)[1],extendrange(X,f=10)[2],length=1000)
f.seq <- sapply(1:length(X.seq),function(i){mean(kernel(X.seq[i],X,h,a,b))})
if(any(f.seq<0)) {
f <- f - min(f.seq)
f.seq <- f.seq - min(f.seq)
}
int.f.seq <- integrate.trapezoidal(X.seq,f.seq)[length(X.seq)]
f <- f/int.f.seq
}
return(f)
}
Fhat <- function(Y,f,a,b,proper=FALSE) {
## Numerical integration of f, check for aberrant values, if
## on range of data ensure F\in[0,1], if not make sure value
## is proper (negative boundary kernel functions can cause
## unwanted artifacts)
f[is.na(f)] <- 0
F <- integrate.trapezoidal(Y,f)
if(proper) {
if(min(Y)==a && max(Y)==b) {
F <- (F-min(F))/(max(F)-min(F))
} else {
if(min(F)<0) F <- F+min(F)
if(max(F)>1) F <- F/max(F)
}
}
F
}
fhat.loo <- function(X,h,a=0,b=1) {
sapply(1:length(X),function(i){mean(kernel(X[i],X[-i],h,a,b))})
}
if(bwmethod=="cv.ml") {
## Likelihood cross-validation function (maximizing)
fnscale <- list(fnscale = -1)
cv.function <- function(h,X,a=0,b=1) {
f.loo <- fhat.loo(X,h,a,b)
return(sum(log(ifelse(f.loo > 0 & is.finite(f.loo), f.loo, .Machine$double.xmin))))
}
} else {
## Least-squares cross-validation function (minimizing)
fnscale <- list(fnscale = 1)
cv.function <- function(h,X,a=0,b=1) {
cv.ls <- (integrate.trapezoidal(X,fhat(X,X,h,a,b)**2)[order(X)])[length(X)]-2*mean(fhat.loo(X,h,a,b))
ifelse(is.finite(cv.ls),cv.ls,sqrt(sqrt(.Machine$double.xmax)))
}
}
## Grid search and then numeric optimization search (no
## multistarting, but sound starting point always used for
## subsequent refinement by optim)
if(is.null(h) && cv == "grid-hybrid") {
## First establish a sound starting value using grid search,
## then use that starting value for numeric search
if(is.null(grid)) {
rob.spread <- c(sd(X),IQR(X)/1.349)
rob.spread <- min(rob.spread[rob.spread>0])
constant <- rob.spread*length(X)**(-0.2)
h.vec <- c(seq(0.25,1.75,length=10),2^(1:25))*constant
cv.vec <- sapply(1:length(h.vec),function(i){cv.function(h.vec[i],X,a,b)})
foo <- optim(h.vec[ifelse(bwmethod=="cv.ml",which.max(cv.vec),which.min(cv.vec))],
cv.function,
method="L-BFGS-B",
lower=sqrt(.Machine$double.eps),
upper=ifelse(kertype=="beta2",(b-a)/4,Inf),
control = fnscale,
X=X,
a=a,
b=b)
h.opt <- foo$par
cv.opt <- foo$value
} else {
cv.vec <- sapply(1:length(grid),function(i){cv.function(grid[i],X,a,b)})
foo <- optim(grid[ifelse(bwmethod=="cv.ml",which.max(cv.vec),which.min(cv.vec))],
cv.function,
method="L-BFGS-B",
lower=sqrt(.Machine$double.eps),
upper=ifelse(kertype=="beta2",(b-a)/4,Inf),
control = fnscale,
X=X,
a=a,
b=b)
h.opt <- foo$par
cv.opt <- foo$value
}
}
if(is.null(h.opt)) {
## Manual inputted bandwidth
f <- fhat(X,Y,h,a,b,proper=proper)
## Numerical integration via the trapezoidal rule
F <- Fhat(Y,f,a,b,proper=proper)
return(list(f=f,
F=F,
sd.f=sqrt(abs(f*int.kernel.squared(Y,h,a,b)/(h*length(f)))),
sd.F=sqrt(abs(F*(1-F)/length(F))),
h=h))
} else {
## Search bandwidth
f <- fhat(X,Y,h.opt,a,b,proper=proper)
## Numerical integration via the trapezoidal rule
F <- Fhat(Y,f,a,b,proper=proper)
return(list(f=f,
F=F,
sd.f=sqrt(abs(f*int.kernel.squared(Y,h.opt,a,b)/(h.opt*length(f)))),
sd.F=sqrt(abs(F*(1-F)/length(F))),
h=h.opt,
nmulti=nmulti,
cv.opt=cv.opt))
}
}
|
x11()
lambda <- 0.2
vec <- rexp(50000, lambda)
plot(sort(vec), main="Scatter Plot")
cat("Press any key to continue\n")
x <- readLines("stdin", 1)
mat = matrix(vec, nrow = 500, ncol = 100)
for (i in 1:5)
{
plot(ecdf(mat[i,]), ylab="F(X)", main=paste("CDF for vector", toString(i)))
cat("Press any key to continue")
x <- readLines("stdin", 1)
pdata <- rep(0, 100);
for(j in 1:100)
{
val=round(mat[i,j], 0);
if(val <= 100)
pdata[val] = pdata[val] + 1/100;
}
xcols <- c(0:99)
pdf <- predict(smooth.spline(xcols, pdata, spar=0.2))
plot(pdf, pch=19, xlab="x", ylab="f(X)", main=paste("PDF for vector", toString(i)))
lines(pdf)
#plot(density(mat[i,]), main=paste("PDF for vector", toString(i)))
cat("Press any key to continue")
x <- readLines("stdin", 1)
cat("Vector ", i, ", Mean = ", mean(mat[i,]), ", Standard deviation = ", sqrt(var(mat[i,])), "\n", sep = "")
}
means = apply(mat, 1, mean)
plot(table(round(means)), main="Frequency for means")
cat("Press any key to continue\n")
x <- readLines("stdin", 1)
plot(ecdf(means), main="CDF for means")
cat("Press any key to continue\n")
x <- readLines("stdin", 1)
plot(density(means), main="PDF for means")
cat("Press any key to continue\n")
x <- readLines("stdin", 1)
cat("For means, Mean = ", mean(means), ", Standard deviation = ", sqrt(var(means)), "\n", sep = "")
cat("According to CLT, Expected Mean = ", 1/lambda, ", Expected Standard deviation = ", 0.1/lambda, "\n", sep = "")
| /160552_a10/160552.r | no_license | rharish101/cs251 | R | false | false | 1,524 | r | x11()
lambda <- 0.2
vec <- rexp(50000, lambda)
plot(sort(vec), main="Scatter Plot")
cat("Press any key to continue\n")
x <- readLines("stdin", 1)
mat = matrix(vec, nrow = 500, ncol = 100)
for (i in 1:5)
{
plot(ecdf(mat[i,]), ylab="F(X)", main=paste("CDF for vector", toString(i)))
cat("Press any key to continue")
x <- readLines("stdin", 1)
pdata <- rep(0, 100);
for(j in 1:100)
{
val=round(mat[i,j], 0);
if(val <= 100)
pdata[val] = pdata[val] + 1/100;
}
xcols <- c(0:99)
pdf <- predict(smooth.spline(xcols, pdata, spar=0.2))
plot(pdf, pch=19, xlab="x", ylab="f(X)", main=paste("PDF for vector", toString(i)))
lines(pdf)
#plot(density(mat[i,]), main=paste("PDF for vector", toString(i)))
cat("Press any key to continue")
x <- readLines("stdin", 1)
cat("Vector ", i, ", Mean = ", mean(mat[i,]), ", Standard deviation = ", sqrt(var(mat[i,])), "\n", sep = "")
}
means = apply(mat, 1, mean)
plot(table(round(means)), main="Frequency for means")
cat("Press any key to continue\n")
x <- readLines("stdin", 1)
plot(ecdf(means), main="CDF for means")
cat("Press any key to continue\n")
x <- readLines("stdin", 1)
plot(density(means), main="PDF for means")
cat("Press any key to continue\n")
x <- readLines("stdin", 1)
cat("For means, Mean = ", mean(means), ", Standard deviation = ", sqrt(var(means)), "\n", sep = "")
cat("According to CLT, Expected Mean = ", 1/lambda, ", Expected Standard deviation = ", 0.1/lambda, "\n", sep = "")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/PlotTessellation.R
\name{PlotTessellation}
\alias{PlotTessellation}
\title{PlotTessellation}
\usage{
PlotTessellation(coordinates, path.mcmc,printit,path)
@param coordinates Spatial coordinates of individuals. A matrix with 2
columns and one line per individual.
@param path.mcmc Character : Path to output files directory
@param printit Logical : if TRUE, figures are also printed
@param path Character : Path to directory where figures
should be printed
}
\description{
Plots maps of posterior probabilities of population
membership for each population
}
| /man/PlotTessellation.Rd | no_license | gilles-guillot/Geneland | R | false | true | 655 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/PlotTessellation.R
\name{PlotTessellation}
\alias{PlotTessellation}
\title{PlotTessellation}
\usage{
PlotTessellation(coordinates, path.mcmc,printit,path)
@param coordinates Spatial coordinates of individuals. A matrix with 2
columns and one line per individual.
@param path.mcmc Character : Path to output files directory
@param printit Logical : if TRUE, figures are also printed
@param path Character : Path to directory where figures
should be printed
}
\description{
Plots maps of posterior probabilities of population
membership for each population
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/upload.R
\name{wndr_upload}
\alias{wndr_upload}
\title{Upload API}
\usage{
wndr_upload(filepath)
}
\arguments{
\item{filepath}{Path to a file}
}
\description{
Upload a file to use it via File API.
}
\seealso{
\url{https://developer.wunderlist.com/documentation/endpoints/upload}
}
| /man/wndr_upload.Rd | no_license | yutannihilation/wunderlistr | R | false | true | 360 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/upload.R
\name{wndr_upload}
\alias{wndr_upload}
\title{Upload API}
\usage{
wndr_upload(filepath)
}
\arguments{
\item{filepath}{Path to a file}
}
\description{
Upload a file to use it via File API.
}
\seealso{
\url{https://developer.wunderlist.com/documentation/endpoints/upload}
}
|
llik.coxph <- function(x,y,gam) {
#gam <- true.beta #debug only
tmp <- llik.coxph.int(x,y,gam)
observed <- tmp$observed
y.obs <- tmp$y.obs
y <- tmp$y
no.obs <- tmp$no.obs
#foo begins from each y.obs, moving ultimately to a construction of the groups of indices greater than each y.obs
foo <- function(u) {
K=(y>=u)
x.=x[K,]
foo <- sum(exp(x.%*%gam))
foo
}
parttwo <- -log(prod(sapply(y.obs,foo)))
partone <- sum(x%*%gam)
out <- partone + parttwo
out
}
llik.coxph.der <- function(x,y,gam) {
#gam <- true.beta #debug only
tmp <- llik.coxph.int(x,y,gam)
observed <- tmp$observed
y.obs <- tmp$y.obs
y <- tmp$y
no.obs <- tmp$no.obs
gen.der <- function(h) {
#only generates for one dimension
partone <- sum(x[observed,h])
foo <- function(u) {
K=(y>=u)
x.=x[K,]
a <- x.[,h]
b <- exp(x.%*%gam)
ab <- sum(a*b)
d <- sum(b)
ab/d
}
parttwo <- sum(sapply(y.obs,foo))
out <- partone-parttwo
out
}
outvec <- sapply(1:length(gam),gen.der) #split for all elem of gradient
outvec
}
llik.coxph.int <- function(x,y,gam) {
y <- as.matrix(y)
x <- as.matrix(x)
#y should be an nx2 matrix
# column 1 is the times
# column 2 is 1 when the time was observed; 0 when the obs is the result of censor
observed <- as.logical(y[,2])
y.obs <- as.vector(y[observed,1])
y <- y[,1]
no.obs <- sum(observed)
foo <- list(observed=observed,y.obs=y.obs,y=y,no.obs=no.obs)
foo
}
| /llik.coxph.R | no_license | xdavio/path | R | false | false | 1,525 | r | llik.coxph <- function(x,y,gam) {
#gam <- true.beta #debug only
tmp <- llik.coxph.int(x,y,gam)
observed <- tmp$observed
y.obs <- tmp$y.obs
y <- tmp$y
no.obs <- tmp$no.obs
#foo begins from each y.obs, moving ultimately to a construction of the groups of indices greater than each y.obs
foo <- function(u) {
K=(y>=u)
x.=x[K,]
foo <- sum(exp(x.%*%gam))
foo
}
parttwo <- -log(prod(sapply(y.obs,foo)))
partone <- sum(x%*%gam)
out <- partone + parttwo
out
}
llik.coxph.der <- function(x,y,gam) {
#gam <- true.beta #debug only
tmp <- llik.coxph.int(x,y,gam)
observed <- tmp$observed
y.obs <- tmp$y.obs
y <- tmp$y
no.obs <- tmp$no.obs
gen.der <- function(h) {
#only generates for one dimension
partone <- sum(x[observed,h])
foo <- function(u) {
K=(y>=u)
x.=x[K,]
a <- x.[,h]
b <- exp(x.%*%gam)
ab <- sum(a*b)
d <- sum(b)
ab/d
}
parttwo <- sum(sapply(y.obs,foo))
out <- partone-parttwo
out
}
outvec <- sapply(1:length(gam),gen.der) #split for all elem of gradient
outvec
}
llik.coxph.int <- function(x,y,gam) {
y <- as.matrix(y)
x <- as.matrix(x)
#y should be an nx2 matrix
# column 1 is the times
# column 2 is 1 when the time was observed; 0 when the obs is the result of censor
observed <- as.logical(y[,2])
y.obs <- as.vector(y[observed,1])
y <- y[,1]
no.obs <- sum(observed)
foo <- list(observed=observed,y.obs=y.obs,y=y,no.obs=no.obs)
foo
}
|
#### Download GO terms for P. relictum from PlasmoDB website
#### Vincenzo A. Ellis
#### 25 September 2018
prel_go <- function(){
gaf.colnames <- c("DB", "DB Object ID", "DB Object Symbol", "Qualifier", "GO_ID",
"DB_Reference", "Evidence_Code", "With_or_From", "Aspect", "DB_Object_Name",
"DB_Object_Synonym", "DB_Object_Type", "Taxon", "Date", "Assigned_By",
"Annotation_Extension", "Gene_Product_Form_ID")
x <- read.delim("http://plasmodb.org/common/downloads/Current_Release/PrelictumSGS1-like/gaf/PlasmoDB-39_PrelictumSGS1-like_GO.gaf",
skip = 1,
col.names = gaf.colnames)
return(x)
}
| /download_Prelictum_GOterms.R | no_license | vincenzoaellis/sequence_capture_code | R | false | false | 683 | r | #### Download GO terms for P. relictum from PlasmoDB website
#### Vincenzo A. Ellis
#### 25 September 2018
prel_go <- function(){
gaf.colnames <- c("DB", "DB Object ID", "DB Object Symbol", "Qualifier", "GO_ID",
"DB_Reference", "Evidence_Code", "With_or_From", "Aspect", "DB_Object_Name",
"DB_Object_Synonym", "DB_Object_Type", "Taxon", "Date", "Assigned_By",
"Annotation_Extension", "Gene_Product_Form_ID")
x <- read.delim("http://plasmodb.org/common/downloads/Current_Release/PrelictumSGS1-like/gaf/PlasmoDB-39_PrelictumSGS1-like_GO.gaf",
skip = 1,
col.names = gaf.colnames)
return(x)
}
|
library(ape)
generate_prob_binary <- function(alpha,beta,unit_lambda,number_br,number_cell,normal_genotype,mutation_genotype,initial_obs,ts){
############################################
# generating sub tree
# for this gene or location, variable singletip_exclude are those sample equal '-'
single_tip_exclude=c()
# for this gene or location, variable tip_exclude are all those equal '-'
tip_exclude=c()
# for this gene or location, obs_colnam are those sample names, excluding the '-'
obs_colnam=c()
#
obs_genotype=c()
# assign the mutation status for sample j of gene i
for (j in c(1:(number_cell))){
# exclude those tips with missing gene status
if (initial_obs[j]==c("-")) { single_tip_exclude=colnames(initial_obs)[j] # single_tip_exclude=colnames(initial_obs)[1,j]
tip_exclude=c(tip_exclude,single_tip_exclude)
}
# value is 0 if gene status is same as normal
else if (as.character(initial_obs[1,j])==as.character(normal_genotype)) {
obs_genotype=c(obs_genotype,0)
}
# value is 1 if gene status is same as mutant
else if (as.character(initial_obs[1,j])==as.character(mutation_genotype)) {
obs_genotype=c(obs_genotype,1)
}
# value is m if gene status is ambguity
else {obs_genotype=c(obs_genotype,"m")}
}
# for this gene or location, exclude the sample with missing gene status
subtree=drop.tip(ts, tip=tip_exclude)
# branch_time_list is the branch length of sub tree
branch_time_list= subtree$edge.length
##################################################
#for gene i, find the prob on each branch
#probability of mutation on each branch
#prob_Bj_ind=c()
#for (l in 1:length(branch_time_list)){
#assuming the exponential distribution on each branch,prob_Bj_ind[l] is the prob of mutation on branch l
#prob_Bj_ind[l]=1-(exp(-unit_lambda*branch_time_list[l]))
#}
# create a matrix. In each row, diagnonal is the prob on that branch, and off-branch is the mutation prob that not on that branch
# probability matrix when mutation exist on only one branch, the matrix is first created with same rows, and each row is the prob of no mutation
#prob_Bj=matrix(rep(1-prob_Bj_ind,length(branch_time_list)),nrow=length(branch_time_list),byrow=TRUE)
# diagonal is replaced with prob of mutation
#diag(prob_Bj) <- prob_Bj_ind
#for (br in 1:length(branch_time_list)){
# prob_Bj[br,c(descendant_branches[[i]][[br]])]=1
#}
####################################################
#take the product of each row to find the prob of mutation on branch but not on other branches
#first create a matrix of one column and each row represent the final prob
#prob_Bj_final=matrix(,nrow=length(branch_time_list),ncol=1)# probability of mutation on branch and other branch has no mutation
#for (l in 1:length(branch_time_list)){
# prob_Bj_final[l,]=tail(cumprod(prob_Bj[l,]),1)
#}
# assign the final mutation prob of gene i into the list the all genes
#allprob_Bj_final[[i]]=prob_Bj_final
####################################################
#generate the obs matrix
# obs_colnam are those with observations
obs_colnam=setdiff(colnames(initial_obs),tip_exclude)
# consider the ambguity status as missing
#obs_genotype_mat=matrix(,nrow=max(1,2^(count(obs_genotype=="m")[2,2])),ncol=length(obs_colnam))
#obs_genotype_mat=matrix(,nrow=1,ncol=length(obs_colnam))
obs_genotype_mat=matrix(,nrow=2^length(grep("m",obs_genotype)),ncol=length(obs_colnam))
colnames(obs_genotype_mat)=obs_colnam
# find the index of each gene status
ambiguity_index=which(obs_genotype=="m")
normal_index=which(obs_genotype=="0")
allele_index=which(obs_genotype=="1")
#create all possible situations for ambguity
inupt_list <- rep(list(0:1), length(ambiguity_index))
input_ambiguity=expand.grid(inupt_list)
# put the possible status into the matrix for gene i, each row represent one possible situation
obs_genotype_mat[,as.numeric(ambiguity_index)]=as.matrix(input_ambiguity)
obs_genotype_mat[,normal_index]=rep(0,dim(obs_genotype_mat)[1])
obs_genotype_mat[,allele_index]=rep(1,dim(obs_genotype_mat)[1])
# for each of the possible situation, assign weight to them, here, I use equal weights
ambiguity_weight=matrix(rep(1/dim(obs_genotype_mat)[1],dim(obs_genotype_mat)[1],nrow=1))
# put the weight of gene i into the allweight list
#length(allsubtree)
#length(allobs)
#length(allweight)
####################################################################################################
####################################################################################################
#extract the tree, if mutation is on one branch, then the corresponding tips will have mutation
####################################################################################################
####################################################################################################
left_right <- function(edge,parent){
child = c()
for (i in 1:nrow(edge)) {
if (edge[i,1] == parent) {
child = c(child,edge[i,2])
}
}
return(child)
}
build_tree <- function(edge,branch){
child_node = left_right(edge,branch[length(branch)])
new_branch=matrix(c(branch,child_node[1],branch,child_node[2]),nrow=2,byrow = TRUE)
return(new_branch)
}
#####################################modify begin################################
# find node parent
find_ancestor <- function(edge,node){
parent = 0
for (i in 1:nrow(edge)) {
if (edge[i,2] == node) {
parent = edge[i,1]
}
}
return(parent)
}
# get all unique nodes in the tree
get_all_nodes <- function(edge)
{
all_nodes = integer(length(edge))
for (i in 1:nrow(edge))
{
all_nodes[(i-1)*2+1] = edge[i,1]
all_nodes[(i-1)*2+2] = edge[i,2]
}
all_nodes = unique(all_nodes)
return(all_nodes)
}
# find root node
find_root <- function(edge)
{
all_nodes = get_all_nodes(edge)
for (i in 1:length(all_nodes))
{
parent = find_ancestor(edge, all_nodes[i])
if (parent == 0)
{
root_node = all_nodes[i]
break
}
}
}
# find two child branches and nodes if they exist. Otherwise all zeros matrix output
find_child_branches_and_nodes <- function(edge, parent_node){
child_branches_and_nodes = matrix(0, 2, 2)
child_id = 1
# first row are two nodes, second row are two branches
for (i in 1:nrow(edge))
{
if (edge[i,1] == parent_node) {
child_branches_and_nodes[1,child_id] = edge[i,2]
child_branches_and_nodes[2,child_id] = i
child_id = child_id + 1
}
}
return(child_branches_and_nodes)
}
# find all child branch for current branch
find_child_branches <- function(edge, current_edge, child_branches)
{
id = length(child_branches)
right_node = edge[current_edge, 2]
child_branches_and_nodes = find_child_branches_and_nodes(edge, right_node)
if (child_branches_and_nodes[1,1] != 0)
{
# if not leaf node
left_node = child_branches_and_nodes[1,1]
right_node = child_branches_and_nodes[1,2]
left_branch = child_branches_and_nodes[2,1]
right_branch = child_branches_and_nodes[2,2]
id = id + 1
child_branches[id] = left_branch
id = id + 1
child_branches[id] = right_branch
child_branches = find_child_branches(edge, left_branch, child_branches)
child_branches = find_child_branches(edge, right_branch, child_branches)
return(child_branches)
}
else
{
return(child_branches)
}
}
# find all child branch for all branches
find_all_child_branches <- function(edge){
# get root node
root_node = find_root(edge)
all_child_branches = rep(list(list()), nrow(edge))
for (i in 1:nrow(edge))
{
current_edge = i
# iterative find all its child branches
child_branches = integer(0)
all_child_branches[[i]] = find_child_branches(edge, current_edge, child_branches)
}
return(all_child_branches)
}
#child_branches=find_all_child_branches(ts$edge)
#####################################modify end################################
# save final stage of each tree on condition that the event occurs in one branch
num_rows = nrow(subtree$edge)
num_cols = length(subtree$tip.label)
# build the branch tree sturcture from each tip to the root
branch_trees = rep( list(list()),num_cols )
num_parent = 0
for (tip_i in 1:num_cols) {
branch_trees[[tip_i]] = tip_i
parent = find_ancestor(subtree$edge,tip_i)
branch_trees[[tip_i]][num_parent+2] = parent
num_parent=num_parent+1
while (parent != num_cols+1) {
tip_node = parent
parent = find_ancestor(subtree$edge,tip_node)
branch_trees[[tip_i]][num_parent+2] = parent
num_parent=num_parent+1
}
num_parent = 0
}
# loop over all the branches, and find the possible final stage
# if the event occurs in that branch
possible_true_genotype = matrix(rep(0,num_rows*num_cols),nrow=num_rows,ncol = num_cols)
for (branch_i in 1:num_rows) {
branch_edge = subtree$edge[branch_i,]
if (branch_edge[2] <= num_cols) {
possible_true_genotype[branch_i,branch_edge[2]] = 1
}else {
for (i in 1:num_cols) {
list_branch = branch_trees[[i]]
if (is.na(match(branch_edge[2],list_branch)) == FALSE) {
possible_true_genotype[branch_i,i] = 1
colnames(possible_true_genotype)=subtree$tip.label
}
}
}
}
possible_true_genotype
descendant_branches=find_all_child_branches(subtree$edge)
num_of_cases = (num_rows+1+2)*num_rows/2
possible_true_genotype_with_2 = matrix(rep(0,num_of_cases*(num_cols+2)),nrow=num_of_cases,ncol = num_cols+2)
colnames(possible_true_genotype_with_2)= c("First_branch", "Second_branch", subtree$tip.label)
# a matrix is created, where first two columns are the places for the mutation occuring.
# if it's NA, it means that no mutation occurs to stand for situation like 0, 1, 2
id_row = 1
for (branch_i in 1:num_rows) {
branch_edge = subtree$edge[branch_i,]
# if only this branch has one mutation
possible_true_genotype_with_2[id_row,1] = branch_i
possible_true_genotype_with_2[id_row,1:num_cols+2] = possible_true_genotype[branch_i, ]
id_row = id_row+1
# if this branch has one mutation, and other branch has another
for (branch_j in branch_i:num_rows) {
possible_true_genotype_with_2[id_row,1] = branch_i
possible_true_genotype_with_2[id_row,2] = branch_j
possible_true_genotype_with_2[id_row,1:num_cols+2] = possible_true_genotype[branch_i, ] + possible_true_genotype[branch_j, ]
id_row = id_row+1
}
}
###################################################################################
###################################################################################
#Mutation model:find the prob of mutation on each branch, but not on other branches
###################################################################################
###################################################################################
# branch_time_list is the branch length of sub tree
branch_time_list= subtree$edge.length
##################################################
#for gene i, find the prob on each branch
#for this gene or location, find the probability of mutation on each branch by function
find_mutation_prob_Bj <- function(branch_time_list, unit_lambda){
prob_Bj_ind=c()
for (l in 1:length(branch_time_list)){
#assuming the exponential distribution on each branch,prob_Bj_ind[l] is the prob of mutation on branch l
prob_Bj_ind[l]=1-(exp(-unit_lambda*branch_time_list[l]))
}
# create a matrix. In each row, diagnonal is the prob on that branch, and off-branch is the mutation prob that not on that branch
# probability matrix when mutation exist on only one branch, the matrix is first created with same rows, and each row is the prob of no mutation
prob_Bj_mat=matrix(rep(1-prob_Bj_ind,length(branch_time_list)),nrow=length(branch_time_list),byrow=TRUE)
# diagonal is replaced with prob of mutation
diag(prob_Bj_mat) <- prob_Bj_ind
# descendant branches carry the mutation so the probability is 1
for (l in 1:length(branch_time_list)){ prob_Bj_mat[l,c(descendant_branches[[l]])]=1}
#take the product of each row to find the prob of mutation on branch but not on other branches
#first create a matrix of one column and each row represent the final prob
prob_Bj_final=matrix(,nrow=length(branch_time_list),ncol=1)# probability of mutation on branch and other branch has no mutation
for (l in 1:length(branch_time_list)){
prob_Bj_final[l,]=tail(cumprod(prob_Bj_mat[l,]),1)
}
return(prob_Bj_final)
}
#use the above function to find the probability
prob_Bj_final = find_mutation_prob_Bj(branch_time_list, unit_lambda)
####################################################
# all_possible_true_genotype store the information of mutation of tips if mutation on a specific branch
# each row number represent the branch number, and if mutation on that branch, the mutation status on the tips
#################################################################
#################################################################
#create the error matrix
sequencing_error_model=matrix(c(1-alpha,alpha,beta,1-beta),nrow=2,byrow = TRUE)
# dim(all_possible_true_genotype[[t]])[1] is the number of branches in the subtree
# dim(obs_genotype_mat)[1] is the number of possible mutation situations in the data
# dim(obs_genotype_mat)[2] is the number of tips or samples in the subtree
# error_result_mat is a list of matrix, in the list, the number of matrix equals number of branches, and each matrix is as obs_genotype_mat, which is all possible situations
# each matrix is for one branch
error_result_mat=replicate(dim(possible_true_genotype)[1],
matrix(rep(0,dim(obs_genotype_mat)[1]*dim(obs_genotype_mat)[2]),nrow=dim(obs_genotype_mat)[1]),
simplify=FALSE)
####################################################################
####################################################################
# error_result_mat[[t]][[k]] is the error prob if the gene mutation occurs on branch k
# error_result_mat[[t]][[k]] each line correspond to the possible observed(ambiguity) in obs_genotype_mat
# branch k (mutation on branch k)
for (k in 1:dim(possible_true_genotype)[1]){
# situation j (possible situation for ambguity status),dim(obs_genotype_mat)[1]=2^(# of ambiguity sites)
for (j in 1:dim(obs_genotype_mat)[1]){
# tip or sample i
for (i in 1:dim(possible_true_genotype)[2]){
# if true is 1, and observed is 1, prob is 1-beta
if (as.matrix(possible_true_genotype)[k,i]==1 & as.matrix(obs_genotype_mat)[j,i]==1){error_result_mat[[k]][j,i]=1-beta}
# if true is 0 and observed is 0, prob is 1-alpha
else if (as.matrix(possible_true_genotype)[k,i]==0 & as.matrix(obs_genotype_mat)[j,i]==0){error_result_mat[[k]][j,i]=1-alpha}
# if true is 1 and observed is 0, false negative, the prob is beta
else if (as.matrix(possible_true_genotype)[k,i]==1 & as.matrix(obs_genotype_mat)[j,i]==0){error_result_mat[[k]][j,i]=beta}
# if true is 0 and observed is 1, false positive, the prob is alpha
else if (as.matrix(possible_true_genotype)[k,i]==0 & as.matrix(obs_genotype_mat)[j,i]==1){error_result_mat[[k]][j,i]=alpha}
}
}
}
length(error_result_mat)
##############################################################################
##############################################################################
##########
#
# error_prob is a matrix of ncol= number of possible situation(ambguity), nrow= number of branches
# error_prob, each column is one possible observed mutation, each line corresponds to the mutation branch
error_prob=matrix(, nrow = dim(possible_true_genotype)[1], ncol = dim(obs_genotype_mat)[1])
# branch k
for (k in 1:dim(possible_true_genotype)[1]){
# for situation j
for (j in 1:dim(obs_genotype_mat)[1]){
# find the product of the prob, error_prob[k,j] is the conditional prob of observed mutation when true mutation on branch(row) k, and ambiguity situation(column) j
error_prob[k,j]= tail(cumprod(error_result_mat[[k]][j,]),1)
}
}
###########################################################################
###########################################################################
###########################################################################
# weight is assigned to each possible situation(ambguity), and the total weighted prob is calculated
# weighted_error is the probability of observed conditioning on a mutation branch, each row represents one branch
weighted_error=error_prob%*%ambiguity_weight
# all_weighted_error is the prob of the observed mutations status, conditioning on each branch(each row represent the conditional prob on that branch)
############################################################################
############################################################################
# all_weighted_error is the prob of mutation condition on branch, allprob_Bj_final is the prob of mutation on branch and not on other branches
# take the sum of the prob will return the prob of mutation status
prob_S=t(weighted_error)%*%prob_Bj_final# probability of S vectors=sum of prob of S and X
#############################################################################
#############################################################################
#for gene t
prob_Bj_S=c()
#for branch k
for (k in 1:length(weighted_error)){
prob_Bj_S[k]=weighted_error[k]*prob_Bj_final[k]/prob_S
}
###############################
#print out the branch number that has the max value
##############################
br_index=which.max(prob_Bj_S)
newdata <- c(prob_Bj_S,rep(0,number_br-length(prob_Bj_S)))
return(result=newdata)
}
parameter_setting = expand.grid(alpha=c(0.05,0.1,0.2,0.4),
beta =c(0.05,0.1,0.2,0.4))
for(paraInd in 15:15){
alpha = parameter_setting[paraInd,1]
beta = parameter_setting[paraInd,2]
sequencing_error_model=matrix(c(1-alpha,alpha,
beta,1-beta),nrow=2,byrow = TRUE)
print(sequencing_error_model)
unit_theta = 10^(-7)
unit_gamma = 10^(-9)
unit_mu = 10 ^(-2)
number_br = 98
number_cell = 50
if (alpha < 0.1)
{
alpha_str = sprintf('0%s', alpha*100)
} else
{
alpha_str = sprintf('%s', alpha*10)
}
if (beta < 0.1)
{
beta_str = sprintf('0%s', beta*100)
} else
{
beta_str = sprintf('%s', beta*10)
}
binary_folder_form_result = sprintf('/fs/project/kubatko.2-temp/gao.957/DNA_alignment/bowtie2-2.3.4.2-linux-x86_64/reference/Simulation_Setting_LargeTree/SimulateData_EXP10_20Missing/Binary_alpha0%s_beta0%s_result',alpha_str, beta_str)
dir.create(binary_folder_form_result)
for (indexn in 1:100){
form = sprintf('/fs/project/kubatko.2-temp/gao.957/DNA_alignment/bowtie2-2.3.4.2-linux-x86_64/reference/Simulation_Setting_LargeTree/SimulateData_EXP10_20Missing/RandomTree/RandomTree_%s.tre', indexn)
sampletr=read.tree(form)
obs_form_0_1 = sprintf('/fs/project/kubatko.2-temp/gao.957/DNA_alignment/bowtie2-2.3.4.2-linux-x86_64/reference/Simulation_Setting_LargeTree/SimulateData_EXP10_20Missing/Binary_alpha0%s_beta0%s/binary_obs_0_1_tip_alpha_0%s_beta_0%s_matrix%s.csv', alpha_str, beta_str, alpha_str, beta_str,indexn)
mat_obs_form_0_1 = read.csv(obs_form_0_1)
mat_obs_form_0_1_sub=na.omit(mat_obs_form_0_1)
normal_genotype_0_1 = rep(0,dim(mat_obs_form_0_1_sub)[1])
mutation_genotype_0_1 = rep(1,dim(mat_obs_form_0_1_sub)[1])
initial_obs_0_1 = data.frame(mat_obs_form_0_1_sub[,-c(1,2,3)])
initial_obs_0_1_recode = data.frame(matrix(ncol = dim(initial_obs_0_1)[2], nrow = dim(initial_obs_0_1)[1]))
for ( nr in 1:dim(initial_obs_0_1)[1]){
for ( nc in 1:dim(initial_obs_0_1)[2]){
if (initial_obs_0_1[nr,nc] == "-"){initial_obs_0_1_recode[nr,nc] ="-"}
if (initial_obs_0_1[nr,nc] == "0"){initial_obs_0_1_recode[nr,nc] ="0"}
if (initial_obs_0_1[nr,nc] == "1"){initial_obs_0_1_recode[nr,nc] ="1"}
if (initial_obs_0_1[nr,nc] == "2"){initial_obs_0_1_recode[nr,nc] ="1"}
}
}
colnames(initial_obs_0_1_recode)=colnames(initial_obs_0_1)
binary_prob_matrix_all_0_1=c()
for (i in 1:dim(initial_obs_0_1_recode)[1]){
print(i)
#rd_unit_theta <- rbeta(10, (10^7)*unit_theta, (10^7)*(1-unit_theta))
#rd_unit_gamma <- rbeta(10, (10^14)*unit_gamma, (10^14)*(1-unit_gamma))
rd_unit_theta = rgamma(n = 3, shape = 100, scale = 0.01*unit_theta)
rd_unit_gamma = rgamma(3, shape = 100, scale = 0.01*unit_gamma)
generate_prob_br_all_dat=data.frame(matrix(NA, nrow = number_br, ncol = 3))
for (j in 1:3){
generate_prob_br <- generate_prob_binary(alpha,beta,rd_unit_theta[j]+rd_unit_gamma[j],number_br,number_cell,
normal_genotype_0_1[i],mutation_genotype_0_1[i],initial_obs_0_1_recode[i,],sampletr)
generate_prob_br_all_single <- c(generate_prob_br,rep(0,number_br-length(generate_prob_br)))
generate_prob_br_all_dat[,j] = generate_prob_br_all_single
}
generate_prob_br_all=rowMeans(generate_prob_br_all_dat, na.rm = FALSE, dims = 1)
binary_prob_matrix_all_0_1 = rbind(binary_prob_matrix_all_0_1,generate_prob_br_all)
}
binary_prob_matrix_all_0_1_out = sprintf('/fs/project/kubatko.2-temp/gao.957/DNA_alignment/bowtie2-2.3.4.2-linux-x86_64/reference/Simulation_Setting_LargeTree/SimulateData_EXP10_20Missing/Binary_alpha0%s_beta0%s_result/binary_prob_matrix_all_0_1_out_alpha_0%s_beta_0%s_matrix%s.csv', alpha_str, beta_str, alpha_str, beta_str,indexn)
binary_prob_matrix_all_0_1_rownames=cbind(mat_obs_form_0_1_sub[,c(2,3)],binary_prob_matrix_all_0_1)
write.csv(binary_prob_matrix_all_0_1_rownames,file=binary_prob_matrix_all_0_1_out)
}
} | /simulation/FiguresS2_S3_Scenarios3_4/Scenario4_result/MO_Binary_20missing/MO_Binary_Simulation_Study_15.R | no_license | DavidSimone/MO | R | false | false | 23,241 | r | library(ape)
generate_prob_binary <- function(alpha,beta,unit_lambda,number_br,number_cell,normal_genotype,mutation_genotype,initial_obs,ts){
############################################
# generating sub tree
# for this gene or location, variable singletip_exclude are those sample equal '-'
single_tip_exclude=c()
# for this gene or location, variable tip_exclude are all those equal '-'
tip_exclude=c()
# for this gene or location, obs_colnam are those sample names, excluding the '-'
obs_colnam=c()
#
obs_genotype=c()
# assign the mutation status for sample j of gene i
for (j in c(1:(number_cell))){
# exclude those tips with missing gene status
if (initial_obs[j]==c("-")) { single_tip_exclude=colnames(initial_obs)[j] # single_tip_exclude=colnames(initial_obs)[1,j]
tip_exclude=c(tip_exclude,single_tip_exclude)
}
# value is 0 if gene status is same as normal
else if (as.character(initial_obs[1,j])==as.character(normal_genotype)) {
obs_genotype=c(obs_genotype,0)
}
# value is 1 if gene status is same as mutant
else if (as.character(initial_obs[1,j])==as.character(mutation_genotype)) {
obs_genotype=c(obs_genotype,1)
}
# value is m if gene status is ambguity
else {obs_genotype=c(obs_genotype,"m")}
}
# for this gene or location, exclude the sample with missing gene status
subtree=drop.tip(ts, tip=tip_exclude)
# branch_time_list is the branch length of sub tree
branch_time_list= subtree$edge.length
##################################################
#for gene i, find the prob on each branch
#probability of mutation on each branch
#prob_Bj_ind=c()
#for (l in 1:length(branch_time_list)){
#assuming the exponential distribution on each branch,prob_Bj_ind[l] is the prob of mutation on branch l
#prob_Bj_ind[l]=1-(exp(-unit_lambda*branch_time_list[l]))
#}
# create a matrix. In each row, diagnonal is the prob on that branch, and off-branch is the mutation prob that not on that branch
# probability matrix when mutation exist on only one branch, the matrix is first created with same rows, and each row is the prob of no mutation
#prob_Bj=matrix(rep(1-prob_Bj_ind,length(branch_time_list)),nrow=length(branch_time_list),byrow=TRUE)
# diagonal is replaced with prob of mutation
#diag(prob_Bj) <- prob_Bj_ind
#for (br in 1:length(branch_time_list)){
# prob_Bj[br,c(descendant_branches[[i]][[br]])]=1
#}
####################################################
#take the product of each row to find the prob of mutation on branch but not on other branches
#first create a matrix of one column and each row represent the final prob
#prob_Bj_final=matrix(,nrow=length(branch_time_list),ncol=1)# probability of mutation on branch and other branch has no mutation
#for (l in 1:length(branch_time_list)){
# prob_Bj_final[l,]=tail(cumprod(prob_Bj[l,]),1)
#}
# assign the final mutation prob of gene i into the list the all genes
#allprob_Bj_final[[i]]=prob_Bj_final
####################################################
#generate the obs matrix
# obs_colnam are those with observations
obs_colnam=setdiff(colnames(initial_obs),tip_exclude)
# consider the ambguity status as missing
#obs_genotype_mat=matrix(,nrow=max(1,2^(count(obs_genotype=="m")[2,2])),ncol=length(obs_colnam))
#obs_genotype_mat=matrix(,nrow=1,ncol=length(obs_colnam))
obs_genotype_mat=matrix(,nrow=2^length(grep("m",obs_genotype)),ncol=length(obs_colnam))
colnames(obs_genotype_mat)=obs_colnam
# find the index of each gene status
ambiguity_index=which(obs_genotype=="m")
normal_index=which(obs_genotype=="0")
allele_index=which(obs_genotype=="1")
#create all possible situations for ambguity
inupt_list <- rep(list(0:1), length(ambiguity_index))
input_ambiguity=expand.grid(inupt_list)
# put the possible status into the matrix for gene i, each row represent one possible situation
obs_genotype_mat[,as.numeric(ambiguity_index)]=as.matrix(input_ambiguity)
obs_genotype_mat[,normal_index]=rep(0,dim(obs_genotype_mat)[1])
obs_genotype_mat[,allele_index]=rep(1,dim(obs_genotype_mat)[1])
# for each of the possible situation, assign weight to them, here, I use equal weights
ambiguity_weight=matrix(rep(1/dim(obs_genotype_mat)[1],dim(obs_genotype_mat)[1],nrow=1))
# put the weight of gene i into the allweight list
#length(allsubtree)
#length(allobs)
#length(allweight)
####################################################################################################
####################################################################################################
#extract the tree, if mutation is on one branch, then the corresponding tips will have mutation
####################################################################################################
####################################################################################################
left_right <- function(edge,parent){
child = c()
for (i in 1:nrow(edge)) {
if (edge[i,1] == parent) {
child = c(child,edge[i,2])
}
}
return(child)
}
build_tree <- function(edge,branch){
child_node = left_right(edge,branch[length(branch)])
new_branch=matrix(c(branch,child_node[1],branch,child_node[2]),nrow=2,byrow = TRUE)
return(new_branch)
}
#####################################modify begin################################
# find node parent
find_ancestor <- function(edge,node){
parent = 0
for (i in 1:nrow(edge)) {
if (edge[i,2] == node) {
parent = edge[i,1]
}
}
return(parent)
}
# get all unique nodes in the tree
get_all_nodes <- function(edge)
{
all_nodes = integer(length(edge))
for (i in 1:nrow(edge))
{
all_nodes[(i-1)*2+1] = edge[i,1]
all_nodes[(i-1)*2+2] = edge[i,2]
}
all_nodes = unique(all_nodes)
return(all_nodes)
}
# find root node
find_root <- function(edge)
{
all_nodes = get_all_nodes(edge)
for (i in 1:length(all_nodes))
{
parent = find_ancestor(edge, all_nodes[i])
if (parent == 0)
{
root_node = all_nodes[i]
break
}
}
}
# find two child branches and nodes if they exist. Otherwise all zeros matrix output
find_child_branches_and_nodes <- function(edge, parent_node){
child_branches_and_nodes = matrix(0, 2, 2)
child_id = 1
# first row are two nodes, second row are two branches
for (i in 1:nrow(edge))
{
if (edge[i,1] == parent_node) {
child_branches_and_nodes[1,child_id] = edge[i,2]
child_branches_and_nodes[2,child_id] = i
child_id = child_id + 1
}
}
return(child_branches_and_nodes)
}
# find all child branch for current branch
find_child_branches <- function(edge, current_edge, child_branches)
{
id = length(child_branches)
right_node = edge[current_edge, 2]
child_branches_and_nodes = find_child_branches_and_nodes(edge, right_node)
if (child_branches_and_nodes[1,1] != 0)
{
# if not leaf node
left_node = child_branches_and_nodes[1,1]
right_node = child_branches_and_nodes[1,2]
left_branch = child_branches_and_nodes[2,1]
right_branch = child_branches_and_nodes[2,2]
id = id + 1
child_branches[id] = left_branch
id = id + 1
child_branches[id] = right_branch
child_branches = find_child_branches(edge, left_branch, child_branches)
child_branches = find_child_branches(edge, right_branch, child_branches)
return(child_branches)
}
else
{
return(child_branches)
}
}
# find all child branch for all branches
find_all_child_branches <- function(edge){
# get root node
root_node = find_root(edge)
all_child_branches = rep(list(list()), nrow(edge))
for (i in 1:nrow(edge))
{
current_edge = i
# iterative find all its child branches
child_branches = integer(0)
all_child_branches[[i]] = find_child_branches(edge, current_edge, child_branches)
}
return(all_child_branches)
}
#child_branches=find_all_child_branches(ts$edge)
#####################################modify end################################
# save final stage of each tree on condition that the event occurs in one branch
num_rows = nrow(subtree$edge)
num_cols = length(subtree$tip.label)
# build the branch tree sturcture from each tip to the root
branch_trees = rep( list(list()),num_cols )
num_parent = 0
for (tip_i in 1:num_cols) {
branch_trees[[tip_i]] = tip_i
parent = find_ancestor(subtree$edge,tip_i)
branch_trees[[tip_i]][num_parent+2] = parent
num_parent=num_parent+1
while (parent != num_cols+1) {
tip_node = parent
parent = find_ancestor(subtree$edge,tip_node)
branch_trees[[tip_i]][num_parent+2] = parent
num_parent=num_parent+1
}
num_parent = 0
}
# loop over all the branches, and find the possible final stage
# if the event occurs in that branch
possible_true_genotype = matrix(rep(0,num_rows*num_cols),nrow=num_rows,ncol = num_cols)
for (branch_i in 1:num_rows) {
branch_edge = subtree$edge[branch_i,]
if (branch_edge[2] <= num_cols) {
possible_true_genotype[branch_i,branch_edge[2]] = 1
}else {
for (i in 1:num_cols) {
list_branch = branch_trees[[i]]
if (is.na(match(branch_edge[2],list_branch)) == FALSE) {
possible_true_genotype[branch_i,i] = 1
colnames(possible_true_genotype)=subtree$tip.label
}
}
}
}
possible_true_genotype
descendant_branches=find_all_child_branches(subtree$edge)
num_of_cases = (num_rows+1+2)*num_rows/2
possible_true_genotype_with_2 = matrix(rep(0,num_of_cases*(num_cols+2)),nrow=num_of_cases,ncol = num_cols+2)
colnames(possible_true_genotype_with_2)= c("First_branch", "Second_branch", subtree$tip.label)
# a matrix is created, where first two columns are the places for the mutation occuring.
# if it's NA, it means that no mutation occurs to stand for situation like 0, 1, 2
id_row = 1
for (branch_i in 1:num_rows) {
branch_edge = subtree$edge[branch_i,]
# if only this branch has one mutation
possible_true_genotype_with_2[id_row,1] = branch_i
possible_true_genotype_with_2[id_row,1:num_cols+2] = possible_true_genotype[branch_i, ]
id_row = id_row+1
# if this branch has one mutation, and other branch has another
for (branch_j in branch_i:num_rows) {
possible_true_genotype_with_2[id_row,1] = branch_i
possible_true_genotype_with_2[id_row,2] = branch_j
possible_true_genotype_with_2[id_row,1:num_cols+2] = possible_true_genotype[branch_i, ] + possible_true_genotype[branch_j, ]
id_row = id_row+1
}
}
###################################################################################
###################################################################################
#Mutation model:find the prob of mutation on each branch, but not on other branches
###################################################################################
###################################################################################
# branch_time_list is the branch length of sub tree
branch_time_list= subtree$edge.length
##################################################
#for gene i, find the prob on each branch
#for this gene or location, find the probability of mutation on each branch by function
find_mutation_prob_Bj <- function(branch_time_list, unit_lambda){
prob_Bj_ind=c()
for (l in 1:length(branch_time_list)){
#assuming the exponential distribution on each branch,prob_Bj_ind[l] is the prob of mutation on branch l
prob_Bj_ind[l]=1-(exp(-unit_lambda*branch_time_list[l]))
}
# create a matrix. In each row, diagnonal is the prob on that branch, and off-branch is the mutation prob that not on that branch
# probability matrix when mutation exist on only one branch, the matrix is first created with same rows, and each row is the prob of no mutation
prob_Bj_mat=matrix(rep(1-prob_Bj_ind,length(branch_time_list)),nrow=length(branch_time_list),byrow=TRUE)
# diagonal is replaced with prob of mutation
diag(prob_Bj_mat) <- prob_Bj_ind
# descendant branches carry the mutation so the probability is 1
for (l in 1:length(branch_time_list)){ prob_Bj_mat[l,c(descendant_branches[[l]])]=1}
#take the product of each row to find the prob of mutation on branch but not on other branches
#first create a matrix of one column and each row represent the final prob
prob_Bj_final=matrix(,nrow=length(branch_time_list),ncol=1)# probability of mutation on branch and other branch has no mutation
for (l in 1:length(branch_time_list)){
prob_Bj_final[l,]=tail(cumprod(prob_Bj_mat[l,]),1)
}
return(prob_Bj_final)
}
#use the above function to find the probability
prob_Bj_final = find_mutation_prob_Bj(branch_time_list, unit_lambda)
####################################################
# all_possible_true_genotype store the information of mutation of tips if mutation on a specific branch
# each row number represent the branch number, and if mutation on that branch, the mutation status on the tips
#################################################################
#################################################################
#create the error matrix
sequencing_error_model=matrix(c(1-alpha,alpha,beta,1-beta),nrow=2,byrow = TRUE)
# dim(all_possible_true_genotype[[t]])[1] is the number of branches in the subtree
# dim(obs_genotype_mat)[1] is the number of possible mutation situations in the data
# dim(obs_genotype_mat)[2] is the number of tips or samples in the subtree
# error_result_mat is a list of matrix, in the list, the number of matrix equals number of branches, and each matrix is as obs_genotype_mat, which is all possible situations
# each matrix is for one branch
error_result_mat=replicate(dim(possible_true_genotype)[1],
matrix(rep(0,dim(obs_genotype_mat)[1]*dim(obs_genotype_mat)[2]),nrow=dim(obs_genotype_mat)[1]),
simplify=FALSE)
####################################################################
####################################################################
# error_result_mat[[t]][[k]] is the error prob if the gene mutation occurs on branch k
# error_result_mat[[t]][[k]] each line correspond to the possible observed(ambiguity) in obs_genotype_mat
# branch k (mutation on branch k)
for (k in 1:dim(possible_true_genotype)[1]){
# situation j (possible situation for ambguity status),dim(obs_genotype_mat)[1]=2^(# of ambiguity sites)
for (j in 1:dim(obs_genotype_mat)[1]){
# tip or sample i
for (i in 1:dim(possible_true_genotype)[2]){
# if true is 1, and observed is 1, prob is 1-beta
if (as.matrix(possible_true_genotype)[k,i]==1 & as.matrix(obs_genotype_mat)[j,i]==1){error_result_mat[[k]][j,i]=1-beta}
# if true is 0 and observed is 0, prob is 1-alpha
else if (as.matrix(possible_true_genotype)[k,i]==0 & as.matrix(obs_genotype_mat)[j,i]==0){error_result_mat[[k]][j,i]=1-alpha}
# if true is 1 and observed is 0, false negative, the prob is beta
else if (as.matrix(possible_true_genotype)[k,i]==1 & as.matrix(obs_genotype_mat)[j,i]==0){error_result_mat[[k]][j,i]=beta}
# if true is 0 and observed is 1, false positive, the prob is alpha
else if (as.matrix(possible_true_genotype)[k,i]==0 & as.matrix(obs_genotype_mat)[j,i]==1){error_result_mat[[k]][j,i]=alpha}
}
}
}
length(error_result_mat)
##############################################################################
##############################################################################
##########
#
# error_prob is a matrix of ncol= number of possible situation(ambguity), nrow= number of branches
# error_prob, each column is one possible observed mutation, each line corresponds to the mutation branch
error_prob=matrix(, nrow = dim(possible_true_genotype)[1], ncol = dim(obs_genotype_mat)[1])
# branch k
for (k in 1:dim(possible_true_genotype)[1]){
# for situation j
for (j in 1:dim(obs_genotype_mat)[1]){
# find the product of the prob, error_prob[k,j] is the conditional prob of observed mutation when true mutation on branch(row) k, and ambiguity situation(column) j
error_prob[k,j]= tail(cumprod(error_result_mat[[k]][j,]),1)
}
}
###########################################################################
###########################################################################
###########################################################################
# weight is assigned to each possible situation(ambguity), and the total weighted prob is calculated
# weighted_error is the probability of observed conditioning on a mutation branch, each row represents one branch
weighted_error=error_prob%*%ambiguity_weight
# all_weighted_error is the prob of the observed mutations status, conditioning on each branch(each row represent the conditional prob on that branch)
############################################################################
############################################################################
# all_weighted_error is the prob of mutation condition on branch, allprob_Bj_final is the prob of mutation on branch and not on other branches
# take the sum of the prob will return the prob of mutation status
prob_S=t(weighted_error)%*%prob_Bj_final# probability of S vectors=sum of prob of S and X
#############################################################################
#############################################################################
#for gene t
prob_Bj_S=c()
#for branch k
for (k in 1:length(weighted_error)){
prob_Bj_S[k]=weighted_error[k]*prob_Bj_final[k]/prob_S
}
###############################
#print out the branch number that has the max value
##############################
br_index=which.max(prob_Bj_S)
newdata <- c(prob_Bj_S,rep(0,number_br-length(prob_Bj_S)))
return(result=newdata)
}
parameter_setting = expand.grid(alpha=c(0.05,0.1,0.2,0.4),
beta =c(0.05,0.1,0.2,0.4))
for(paraInd in 15:15){
alpha = parameter_setting[paraInd,1]
beta = parameter_setting[paraInd,2]
sequencing_error_model=matrix(c(1-alpha,alpha,
beta,1-beta),nrow=2,byrow = TRUE)
print(sequencing_error_model)
unit_theta = 10^(-7)
unit_gamma = 10^(-9)
unit_mu = 10 ^(-2)
number_br = 98
number_cell = 50
if (alpha < 0.1)
{
alpha_str = sprintf('0%s', alpha*100)
} else
{
alpha_str = sprintf('%s', alpha*10)
}
if (beta < 0.1)
{
beta_str = sprintf('0%s', beta*100)
} else
{
beta_str = sprintf('%s', beta*10)
}
binary_folder_form_result = sprintf('/fs/project/kubatko.2-temp/gao.957/DNA_alignment/bowtie2-2.3.4.2-linux-x86_64/reference/Simulation_Setting_LargeTree/SimulateData_EXP10_20Missing/Binary_alpha0%s_beta0%s_result',alpha_str, beta_str)
dir.create(binary_folder_form_result)
for (indexn in 1:100){
form = sprintf('/fs/project/kubatko.2-temp/gao.957/DNA_alignment/bowtie2-2.3.4.2-linux-x86_64/reference/Simulation_Setting_LargeTree/SimulateData_EXP10_20Missing/RandomTree/RandomTree_%s.tre', indexn)
sampletr=read.tree(form)
obs_form_0_1 = sprintf('/fs/project/kubatko.2-temp/gao.957/DNA_alignment/bowtie2-2.3.4.2-linux-x86_64/reference/Simulation_Setting_LargeTree/SimulateData_EXP10_20Missing/Binary_alpha0%s_beta0%s/binary_obs_0_1_tip_alpha_0%s_beta_0%s_matrix%s.csv', alpha_str, beta_str, alpha_str, beta_str,indexn)
mat_obs_form_0_1 = read.csv(obs_form_0_1)
mat_obs_form_0_1_sub=na.omit(mat_obs_form_0_1)
normal_genotype_0_1 = rep(0,dim(mat_obs_form_0_1_sub)[1])
mutation_genotype_0_1 = rep(1,dim(mat_obs_form_0_1_sub)[1])
initial_obs_0_1 = data.frame(mat_obs_form_0_1_sub[,-c(1,2,3)])
initial_obs_0_1_recode = data.frame(matrix(ncol = dim(initial_obs_0_1)[2], nrow = dim(initial_obs_0_1)[1]))
for ( nr in 1:dim(initial_obs_0_1)[1]){
for ( nc in 1:dim(initial_obs_0_1)[2]){
if (initial_obs_0_1[nr,nc] == "-"){initial_obs_0_1_recode[nr,nc] ="-"}
if (initial_obs_0_1[nr,nc] == "0"){initial_obs_0_1_recode[nr,nc] ="0"}
if (initial_obs_0_1[nr,nc] == "1"){initial_obs_0_1_recode[nr,nc] ="1"}
if (initial_obs_0_1[nr,nc] == "2"){initial_obs_0_1_recode[nr,nc] ="1"}
}
}
colnames(initial_obs_0_1_recode)=colnames(initial_obs_0_1)
binary_prob_matrix_all_0_1=c()
for (i in 1:dim(initial_obs_0_1_recode)[1]){
print(i)
#rd_unit_theta <- rbeta(10, (10^7)*unit_theta, (10^7)*(1-unit_theta))
#rd_unit_gamma <- rbeta(10, (10^14)*unit_gamma, (10^14)*(1-unit_gamma))
rd_unit_theta = rgamma(n = 3, shape = 100, scale = 0.01*unit_theta)
rd_unit_gamma = rgamma(3, shape = 100, scale = 0.01*unit_gamma)
generate_prob_br_all_dat=data.frame(matrix(NA, nrow = number_br, ncol = 3))
for (j in 1:3){
generate_prob_br <- generate_prob_binary(alpha,beta,rd_unit_theta[j]+rd_unit_gamma[j],number_br,number_cell,
normal_genotype_0_1[i],mutation_genotype_0_1[i],initial_obs_0_1_recode[i,],sampletr)
generate_prob_br_all_single <- c(generate_prob_br,rep(0,number_br-length(generate_prob_br)))
generate_prob_br_all_dat[,j] = generate_prob_br_all_single
}
generate_prob_br_all=rowMeans(generate_prob_br_all_dat, na.rm = FALSE, dims = 1)
binary_prob_matrix_all_0_1 = rbind(binary_prob_matrix_all_0_1,generate_prob_br_all)
}
binary_prob_matrix_all_0_1_out = sprintf('/fs/project/kubatko.2-temp/gao.957/DNA_alignment/bowtie2-2.3.4.2-linux-x86_64/reference/Simulation_Setting_LargeTree/SimulateData_EXP10_20Missing/Binary_alpha0%s_beta0%s_result/binary_prob_matrix_all_0_1_out_alpha_0%s_beta_0%s_matrix%s.csv', alpha_str, beta_str, alpha_str, beta_str,indexn)
binary_prob_matrix_all_0_1_rownames=cbind(mat_obs_form_0_1_sub[,c(2,3)],binary_prob_matrix_all_0_1)
write.csv(binary_prob_matrix_all_0_1_rownames,file=binary_prob_matrix_all_0_1_out)
}
} |
pollutantmean <- function(directory, pollutant, id = 1:332) {
#set the path
path = directory
#get the file List in that directory
fileList = list.files(path)
#extract the file names and store as numeric for comparison
file.names = as.numeric(sub("\\.csv$","",fileList))
#select files to be imported based on the user input or default
selected.files = fileList[match(id,file.names)]
#import data
Data = lapply(file.path(path,selected.files),read.csv)
#convert into data frame
Data = do.call(rbind.data.frame,Data)
#calculate mean
mean(Data[,pollutant],na.rm=TRUE)
} | /pollutantmean.R | no_license | amanbansal9/codes | R | false | false | 631 | r | pollutantmean <- function(directory, pollutant, id = 1:332) {
#set the path
path = directory
#get the file List in that directory
fileList = list.files(path)
#extract the file names and store as numeric for comparison
file.names = as.numeric(sub("\\.csv$","",fileList))
#select files to be imported based on the user input or default
selected.files = fileList[match(id,file.names)]
#import data
Data = lapply(file.path(path,selected.files),read.csv)
#convert into data frame
Data = do.call(rbind.data.frame,Data)
#calculate mean
mean(Data[,pollutant],na.rm=TRUE)
} |
#' Predict class labels on a test set using a single tree.
#'
#' This is the base function called by Predict.
#'
#' @param X an n sample by d feature matrix (preferable) or data frame which was used to train the provided forest.
#' @param tree a tree from a forest returned by RerF.
#'
#' @return predictions an n length vector of prediction based on the tree provided to this function
#'
RunPredict <-
function(X, tree){
tm <- 0L
currentNode<-0L
curr_ind <- 0L
num_classes <- ncol(tree$ClassProb)
n <- nrow(X)
# do we need to rotate the data?
if (!is.null(tree$rotmat)) {
if (is.null(tree$rotdims)) {
X[] <- X%*%tree$rotmat
} else {
X[, tree$rotdims] <- X[, tree$rotdims]%*%tree$rotmat
}
}
predictions <- integer(n)
Xnode <- double(n)
numNodes <- length(tree$treeMap)
depth <- integer(200)
if(file.exists("depth.Rdata")){
load(file="depth.Rdata")
}
currDepth <- integer(numNodes)
currDepth[1] <- 0
Assigned2Node <- vector("list", numNodes)
Assigned2Node[[1L]] <- 1L:n
for (m in 1:numNodes) {
nodeSize <- length(Assigned2Node[[m]])
if (nodeSize > 0L) {
if ((tm <- tree$treeMap[m]) > 0L) {
indexHigh <- tree$matAindex[tm+1L]
indexLow <- tree$matAindex[tm] + 1L
s <- (indexHigh - indexLow + 1L)/2L
Xnode[1:nodeSize] <- X[Assigned2Node[[m]],tree$matAstore[indexLow:indexHigh][(1:s)*2L-1L], drop = F]%*%
tree$matAstore[indexLow:indexHigh][(1:s)*2L]
moveLeft <- Xnode[1L:nodeSize] <= tree$CutPoint[tm]
Assigned2Node[[tm*2L]] <- Assigned2Node[[m]][moveLeft]
currDepth[tm*2L] <- currDepth[m]+1
Assigned2Node[[tm*2L+1L]] <- Assigned2Node[[m]][!moveLeft]
currDepth[tm*2L+1] <- currDepth[m]+1
} else {
predictions[Assigned2Node[[m]]] <- which.max(tree$ClassProb[tm*-1L, ])
depth[currDepth[m]] = depth[currDepth[m]] + length(Assigned2Node[[m]])
}
}
Assigned2Node[m] <-list(NULL)
}
save(depth,file="depth.Rdata")
return(predictions)
}
| /exper/exp0048/R-RerF/R/RunPredict.R | no_license | jbrowne6/exper | R | false | false | 1,981 | r | #' Predict class labels on a test set using a single tree.
#'
#' This is the base function called by Predict.
#'
#' @param X an n sample by d feature matrix (preferable) or data frame which was used to train the provided forest.
#' @param tree a tree from a forest returned by RerF.
#'
#' @return predictions an n length vector of prediction based on the tree provided to this function
#'
RunPredict <-
function(X, tree){
tm <- 0L
currentNode<-0L
curr_ind <- 0L
num_classes <- ncol(tree$ClassProb)
n <- nrow(X)
# do we need to rotate the data?
if (!is.null(tree$rotmat)) {
if (is.null(tree$rotdims)) {
X[] <- X%*%tree$rotmat
} else {
X[, tree$rotdims] <- X[, tree$rotdims]%*%tree$rotmat
}
}
predictions <- integer(n)
Xnode <- double(n)
numNodes <- length(tree$treeMap)
depth <- integer(200)
if(file.exists("depth.Rdata")){
load(file="depth.Rdata")
}
currDepth <- integer(numNodes)
currDepth[1] <- 0
Assigned2Node <- vector("list", numNodes)
Assigned2Node[[1L]] <- 1L:n
for (m in 1:numNodes) {
nodeSize <- length(Assigned2Node[[m]])
if (nodeSize > 0L) {
if ((tm <- tree$treeMap[m]) > 0L) {
indexHigh <- tree$matAindex[tm+1L]
indexLow <- tree$matAindex[tm] + 1L
s <- (indexHigh - indexLow + 1L)/2L
Xnode[1:nodeSize] <- X[Assigned2Node[[m]],tree$matAstore[indexLow:indexHigh][(1:s)*2L-1L], drop = F]%*%
tree$matAstore[indexLow:indexHigh][(1:s)*2L]
moveLeft <- Xnode[1L:nodeSize] <= tree$CutPoint[tm]
Assigned2Node[[tm*2L]] <- Assigned2Node[[m]][moveLeft]
currDepth[tm*2L] <- currDepth[m]+1
Assigned2Node[[tm*2L+1L]] <- Assigned2Node[[m]][!moveLeft]
currDepth[tm*2L+1] <- currDepth[m]+1
} else {
predictions[Assigned2Node[[m]]] <- which.max(tree$ClassProb[tm*-1L, ])
depth[currDepth[m]] = depth[currDepth[m]] + length(Assigned2Node[[m]])
}
}
Assigned2Node[m] <-list(NULL)
}
save(depth,file="depth.Rdata")
return(predictions)
}
|
##
# Testing parsing, splitting, modelling, and computation on data with UUID column
##
setwd(normalizePath(dirname(R.utils::commandArgs(asValues=TRUE)$"f")))
source('../h2o-runit.R')
test <- function() {
print("Reading in data (tiny airline with UUIDs).")
airline.hex <- h2o.uploadFile(locate("smalldata/airlines/uuid_airline.csv"), destination_frame="airline.hex", header=TRUE)
print("Summary of airline data: ")
print(summary(airline.hex))
print("Head of airline data: ")
print(head(airline.hex))
print("Take subset of rows where UUID is present.")
airline.uuid <- airline.hex[!is.na(airline.hex$uuid),]
print("Dimension of new set: ")
print(dim(airline.uuid))
print("Head of new set: ")
print(head(airline.uuid))
print("Take a random uniform test train split (30:70).")
# fails with some seeds/bad splits. use fixed seed.
airline.uuid$split <- ifelse(h2o.runif(airline.uuid, seed=12345)>0.3, yes=1, no=0)
airline.train.hex <- h2o.assign(airline.uuid[airline.uuid$split==1,1:32],key="airline.train.hex")
airline.test.hex <- h2o.assign(airline.uuid[airline.uuid$split==0,1:32],key="airline.test.hex")
print("Dimension of training set: ")
dim(airline.train.hex)
print("Dimension of test set: ")
dim(airline.test.hex)
print("Head of training set: ")
head(airline.train.hex)
print("Head of test set: ")
head(airline.test.hex)
print("Define variables for x and y.")
colnames(airline.hex)
x <- c("Year","Month","DayofMonth","DayOfWeek","UniqueCarrier","FlightNum","Origin","Dest","Distance")
y <- "IsArrDelayed"
print("Run glm model on train set.")
airline.glm <- h2o.glm(x=x, y=y, training_frame=airline.train.hex,family="binomial")
airline.glm
print("Extract UUIDs from test set.")
test.uuid <- h2o.assign(airline.test.hex$uuid,key="test.uuid")
print("Dimension of UUIDs from test set: ")
dim(test.uuid)
print("Head of UUIDs from test set: ")
head(test.uuid)
print("Run GLM prediction on test set")
airline.predict.uuid <- predict(object=airline.glm, newdata=airline.test.hex)
print("Head of prediction on test set: ")
head(airline.predict.uuid)
print("Splice UUIDs back to predictions with h2o.cbind()")
air.results <- h2o.assign(h2o.cbind(airline.predict.uuid, test.uuid), key="air.results")
print("Head of predictions with UUIDs: ")
head(air.results)
print("Tail of predictions with UUIDs: ")
tail(air.results)
print("Summary of predictions with UUIDs: ")
summary(air.results)
print("Check performce and AUC")
perf <- h2o.performance(airline.glm,airline.test.hex)
print(perf)
perf@metrics$AUC
print("Show distribution of predictions with quantile.")
print(quant <- quantile.Frame(air.results$'p1'))
print("Extract strongest predictions.")
top.air <- h2o.assign(air.results[air.results$'p1' > quant['75%'], ],key="top.air")
print("Dimension of strongest predictions: ")
dim(top.air)
print("Head of strongest predictions: ")
head(top.air)
}
doTest("Test parsing, splitting, modelling, and computation on data with UUID column", test)
| /h2o-r/tests/testdir_demos/runit_demo_glm_uuid.R | permissive | konor/h2o-3 | R | false | false | 3,200 | r | ##
# Testing parsing, splitting, modelling, and computation on data with UUID column
##
setwd(normalizePath(dirname(R.utils::commandArgs(asValues=TRUE)$"f")))
source('../h2o-runit.R')
test <- function() {
print("Reading in data (tiny airline with UUIDs).")
airline.hex <- h2o.uploadFile(locate("smalldata/airlines/uuid_airline.csv"), destination_frame="airline.hex", header=TRUE)
print("Summary of airline data: ")
print(summary(airline.hex))
print("Head of airline data: ")
print(head(airline.hex))
print("Take subset of rows where UUID is present.")
airline.uuid <- airline.hex[!is.na(airline.hex$uuid),]
print("Dimension of new set: ")
print(dim(airline.uuid))
print("Head of new set: ")
print(head(airline.uuid))
print("Take a random uniform test train split (30:70).")
# fails with some seeds/bad splits. use fixed seed.
airline.uuid$split <- ifelse(h2o.runif(airline.uuid, seed=12345)>0.3, yes=1, no=0)
airline.train.hex <- h2o.assign(airline.uuid[airline.uuid$split==1,1:32],key="airline.train.hex")
airline.test.hex <- h2o.assign(airline.uuid[airline.uuid$split==0,1:32],key="airline.test.hex")
print("Dimension of training set: ")
dim(airline.train.hex)
print("Dimension of test set: ")
dim(airline.test.hex)
print("Head of training set: ")
head(airline.train.hex)
print("Head of test set: ")
head(airline.test.hex)
print("Define variables for x and y.")
colnames(airline.hex)
x <- c("Year","Month","DayofMonth","DayOfWeek","UniqueCarrier","FlightNum","Origin","Dest","Distance")
y <- "IsArrDelayed"
print("Run glm model on train set.")
airline.glm <- h2o.glm(x=x, y=y, training_frame=airline.train.hex,family="binomial")
airline.glm
print("Extract UUIDs from test set.")
test.uuid <- h2o.assign(airline.test.hex$uuid,key="test.uuid")
print("Dimension of UUIDs from test set: ")
dim(test.uuid)
print("Head of UUIDs from test set: ")
head(test.uuid)
print("Run GLM prediction on test set")
airline.predict.uuid <- predict(object=airline.glm, newdata=airline.test.hex)
print("Head of prediction on test set: ")
head(airline.predict.uuid)
print("Splice UUIDs back to predictions with h2o.cbind()")
air.results <- h2o.assign(h2o.cbind(airline.predict.uuid, test.uuid), key="air.results")
print("Head of predictions with UUIDs: ")
head(air.results)
print("Tail of predictions with UUIDs: ")
tail(air.results)
print("Summary of predictions with UUIDs: ")
summary(air.results)
print("Check performce and AUC")
perf <- h2o.performance(airline.glm,airline.test.hex)
print(perf)
perf@metrics$AUC
print("Show distribution of predictions with quantile.")
print(quant <- quantile.Frame(air.results$'p1'))
print("Extract strongest predictions.")
top.air <- h2o.assign(air.results[air.results$'p1' > quant['75%'], ],key="top.air")
print("Dimension of strongest predictions: ")
dim(top.air)
print("Head of strongest predictions: ")
head(top.air)
}
doTest("Test parsing, splitting, modelling, and computation on data with UUID column", test)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/normalise_data.R
\name{normalise_data}
\alias{normalise_data}
\title{normalise_data
contains all the functions to normalise several Cytof datasets by removing the
unwanted variation between datasets arising from experimental artefacts.}
\usage{
normalise_data(
data,
raw_data,
rep_samples,
norm_clusters,
k,
num_clusters,
wd_data,
dir_norm_data
)
}
\arguments{
\item{raw_data}{Raw data}
\item{rep_samples}{Replicated samples}
\item{norm_clusters}{Clusters to be normalised}
\item{k}{Dimension of the unwanted variation}
\item{num_clusters}{Total number of clusters}
\item{wd_data}{Path to the directory containing all raw fcs files, the metadata file
and the panel file}
\item{dir_norm_data}{Directory name containing all norm fcs files, the metadata file
and the panel file}
\item{daf}{Datasets before normalisation}
}
\value{
Normalised metadata file
}
\description{
normalise_data
contains all the functions to normalise several Cytof datasets by removing the
unwanted variation between datasets arising from experimental artefacts.
}
| /man/normalise_data.Rd | no_license | StarXian/CytofRUV | R | false | true | 1,141 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/normalise_data.R
\name{normalise_data}
\alias{normalise_data}
\title{normalise_data
contains all the functions to normalise several Cytof datasets by removing the
unwanted variation between datasets arising from experimental artefacts.}
\usage{
normalise_data(
data,
raw_data,
rep_samples,
norm_clusters,
k,
num_clusters,
wd_data,
dir_norm_data
)
}
\arguments{
\item{raw_data}{Raw data}
\item{rep_samples}{Replicated samples}
\item{norm_clusters}{Clusters to be normalised}
\item{k}{Dimension of the unwanted variation}
\item{num_clusters}{Total number of clusters}
\item{wd_data}{Path to the directory containing all raw fcs files, the metadata file
and the panel file}
\item{dir_norm_data}{Directory name containing all norm fcs files, the metadata file
and the panel file}
\item{daf}{Datasets before normalisation}
}
\value{
Normalised metadata file
}
\description{
normalise_data
contains all the functions to normalise several Cytof datasets by removing the
unwanted variation between datasets arising from experimental artefacts.
}
|
get_cor <- function() {
s3read_using(qs::qread, bucket = conf$bucket, object = "cor_ec2.qs")
}
get_sub <- function() {
s3read_using(qs::qread, bucket = conf$bucket, object = "sub_ec2.qs")
}
get_sig <- function() {
s3read_using(qs::qread, bucket = conf$bucket, object = "sig_ec2.qs")
}
sizeof <- function(x) {
format(object.size(x), units = "Mb")
}
apply_style <- function(filename) {
styler::style_file(filename, transformers = styler::tidyverse_style(indent_by = 4))
}
get_most_recent_monday <- function(date_) {
date_ + days(1 - lubridate::wday(date_, week_start = 1))
}
get_date_from_string <- function(x) {
if (x == "yesterday") {
format(today() - days(1), "%Y-%m-%d")
} else if (!is.na(str_extract(x, "\\d+(?= days ago)"))) {
d <- str_extract(x, "\\d+(?= days ago)")
format(today() - days(d), "%Y-%m-%d")
} else {
x
}
}
get_usable_cores <- function() {
# Usable cores is one per 8 GB of RAM.
# Get RAM from system file and divide
if (Sys.info()["sysname"] == "Windows") {
x <- suppressWarnings(shell('systeminfo | findstr Memory', intern = TRUE))
memline <- x[grepl("Total Physical Memory", x)]
mem <- stringr::str_extract(string = memline, pattern = "\\d+,\\d+")
mem <- as.numeric(gsub(",", "", mem))
mem <- round(mem, -3)
max(floor(mem/8e3), 1)
} else if (Sys.info()["sysname"] == "Linux") {
x <- readLines('/proc/meminfo')
memline <- x[grepl("MemAvailable", x)]
mem <- stringr::str_extract(string = memline, pattern = "\\d+")
mem <- as.integer(mem)
mem <- round(mem, -6)
max(floor(mem/8e6), 1)
} else {
stop("Unknown operating system.")
}
}
# From: https://billpetti.github.io/2017-10-13-retry-scrape-function-automatically-r-rstats/
retry_function <- function(.f, max_attempts = 5,
wait_seconds = 5) {
force(max_attempts)
force(wait_seconds)
for (i in seq_len(max_attempts)) {
tryCatch({
output <- .f
}, error = function(e) {
if (wait_seconds > 0) {
message(paste0("Retrying at ", Sys.time() + wait_seconds))
Sys.sleep(wait_seconds)
}
})
}
stop()
}
split_wrapper <- function(FUN) {
# Creates a function that runs a function, splits by signalid and recombines
f <- function(df, split_size, ...) {
# Define temporary directory and file names
temp_dir <- tempdir()
if (!dir.exists(temp_dir)) {
dir.create(temp_dir)
}
temp_file_root <- stringi::stri_rand_strings(1,8)
temp_path_root <- file.path(temp_dir, temp_file_root)
print(temp_path_root)
print("Writing to temporary files by SignalID...")
signalids <- as.character(unique(df$SignalID))
splits <- split(signalids, ceiling(seq_along(signalids)/split_size))
lapply(
names(splits),
function(i) {
cat('.')
df %>%
filter(SignalID %in% splits[[i]]) %>%
write_fst(paste0(temp_path_root, "_", i, ".fst"))
})
cat('.', sep='\n')
file_names <- paste0(temp_path_root, "_", names(splits), ".fst")
# Read in each temporary file and run adjusted counts in parallel. Afterward, clean up.
print("Running for each SignalID...")
df <- mclapply(file_names, mc.cores = usable_cores, FUN = function(fn) {
#df <- lapply(file_names, function(fn) {
cat('.')
FUN(read_fst(fn), ...)
}) %>% bind_rows()
cat('.', sep='\n')
lapply(file_names, FUN = file.remove)
df
}
}
read_zipped_feather <- function(x) {
read_feather(unzip(x))
}
keep_trying <- function(func, n_tries, ..., sleep = 1, timeout = Inf) {
safely_func = purrr::safely(func, otherwise = NULL)
result <- NULL
error <- 1
try_number <- 1
while((!is.null(error) || is.null(result)) && try_number <= n_tries) {
x <- R.utils::withTimeout(safely_func(...), timeout=timeout, onTimeout="error")
result <- x$result
error <- x$error
if (!is.null(error)) {
print(glue("{deparse(substitute(func))} Attempt {try_number} failed: {error}"))
}
try_number = try_number + 1
Sys.sleep(sleep)
sleep = sleep * 2
}
return(result)
}
readRDS_multiple <- function(pattern) {
lf <- list.files(pattern = pattern)
lf <- lf[grepl(".rds", lf)]
bind_rows(lapply(lf, readRDS))
}
# Because of issue with Apache Arrow (feather, parquet)
# where R wants to convert UTC to local time zone on read
# Switch date or datetime fields back to UTC. Run on read.
convert_to_utc <- function(df) {
# -- This may be a more elegant alternative. Needs testing. --
#df %>% mutate_if(is.POSIXct, ~with_tz(., "UTC"))
is_datetime <- sapply(names(df), function(x) sum(class(df[[x]]) == "POSIXct"))
datetimes <- names(is_datetime[is_datetime==1])
for (col in datetimes) {
df[[col]] <- with_tz(df[[col]], "UTC")
}
df
}
week <- function(d) {
d0 <- ymd("2016-12-25")
as.integer(trunc((ymd(d) - d0)/dweeks(1)))
}
get_month_abbrs <- function(start_date, end_date) {
start_date <- ymd(start_date)
day(start_date) <- 1
end_date <- ymd(end_date)
sapply(seq(start_date, end_date, by = "1 month"), function(d) { format(d, "%Y-%m")} )
}
bind_rows_keep_factors <- function(dfs) {
## Identify all factors
factors <- unique(unlist(
map(list(dfs[[1]]), ~ select_if(dfs[[1]], is.factor) %>% names())
))
## Bind dataframes, convert characters back to factors
suppressWarnings(bind_rows(dfs)) %>%
mutate_at(vars(one_of(factors)), factor)
}
match_type <- function(val, val_type_to_match) {
eval(parse(text=paste0('as.',class(val_type_to_match), "(", val, ")")))
}
addtoRDS <- function(df, fn, delta_var, rsd, csd) {
#' combines data frame in local rds file with newly calculated data
#' trimming the current data and appending the new data to prevent overlaps
#' and/or duplicates. Used throughout Monthly_Report_Package code
#' to avoid having to recalculate entire report period (13 months) every time
#' which takes too long and runs into memory issues frequently.
#'
#' @param df newly calculated data frame on most recent data
#' @param fn filename of same data over entire reporting period (13 months)
#' @param rsd report_start_date: start of current report period (13 months prior)
#' @param csd calculation_start_date: start date of most recent data
#' @return a combined data frame
#' @examples
#' addtoRDS(avg_daily_detector_uptime, "avg_daily_detector_uptime.rds", report_start_date, calc_start_date)
combine_dfs <- function(df0, df, delta_var, rsd, csd) {
if (class(rsd) == "character") rsd <- as_date(rsd)
if (class(csd) == "character") csd <- as_date(csd)
# Extract aggregation period from the data fields
periods <- intersect(c("Month", "Date", "Hour", "Timeperiod"), names(df0))
per_ <- as.name(periods)
# Remove everything after calcs_start_date (csd)
# and before report_start_date (rsd) in original df
df0 <- df0 %>% filter(!!per_ >= rsd, !!per_ < csd)
# Make sure new data starts on csd
# This is especially important for when csd is the start of the month
# and we've run calcs going back to the start of the week, which is in
# the previous month, e.g., 3/31/2020 is a Tuesday.
df <- df %>% filter(!!per_ >= csd)
# Extract aggregation groupings from the data fields
# to calculate period-to-period deltas
groupings <- intersect(c("Zone_Group", "Corridor", "SignalID"), names(df0))
groups_ <- sapply(groupings, as.name)
group_arrange <- c(periods, groupings) %>%
sapply(as.name)
var_ <- as.name(delta_var)
# Combine old and new
x <- bind_rows_keep_factors(list(df0, df)) %>%
# Recalculate deltas from prior periods over combined df
group_by(!!!groups_) %>%
arrange(!!!group_arrange) %>%
mutate(lag_ = lag(!!var_),
delta = ((!!var_) - lag_)/lag_) %>%
ungroup() %>%
dplyr::select(-lag_)
x
}
if (!file.exists(fn)) {
saveRDS(df, fn)
} else {
df0 <- readRDS(fn)
if (is.list(df) && is.list(df0) &&
!is.data.frame(df) && !is.data.frame(df0) &&
identical(names(df), names(df0))) {
x <- purrr::map2(df0, df, combine_dfs, delta_var, rsd, csd)
} else {
x <- combine_dfs(df0, df, delta_var, rsd, csd)
}
saveRDS(x, fn)
x
}
}
write_fst_ <- function(df, fn, append = FALSE) {
if (append == TRUE & file.exists(fn)) {
factors <- unique(unlist(
map(list(df), ~ select_if(df, is.factor) %>% names())
))
df_ <- read_fst(fn)
df_ <- bind_rows(df, df_) %>%
mutate_at(vars(one_of(factors)), factor)
} else {
df_ <- df
}
write_fst(distinct(df_), fn)
}
get_unique_timestamps <- function(df) {
df %>%
dplyr::select(Timestamp) %>%
distinct() %>%
mutate(SignalID = 0) %>%
dplyr::select(SignalID, Timestamp)
}
multicore_decorator <- function(FUN) {
usable_cores <- get_usable_cores()
function(x) {
x %>%
split(.$SignalID) %>%
mclapply(FUN, mc.cores = usable_cores) %>% #floor(parallel::detectCores()*3/4)) %>%
bind_rows()
}
}
get_Tuesdays <- function(df) {
dates_ <- seq(min(df$Date) - days(6), max(df$Date) + days(6), by = "days")
tuesdays <- dates_[wday(dates_) == 3]
tuesdays <- pmax(min(df$Date), tuesdays)
data.frame(Week = week(tuesdays), Date = tuesdays)
}
walk_nested_list <- function(df, src, name=deparse(substitute(df)), indent=0) {
cat(paste(strrep(" ", indent)))
print(name)
if (!is.null(names(df))) {
if (is.null(names(df[[1]]))) {
print(head(df, 3))
if (startsWith(name, "sub") & "Zone_Group" %in% names(df)) {
dfp <- df %>% rename(Subcorridor = Corridor, Corridor = Zone_Group)
} else {
dfp <- df
}
if (src %in% c("main", "staging")) {
date_column <- intersect(names(dfp), c("Quarter", "Month", "Date", "Hour", "Timeperiod"))
if (date_column == "Quarter") {
dfp <- filter(dfp, Quarter <= lubridate::quarter(conf$production_report_end_date, with_year = TRUE))
print(max(dfp$Quarter))
} else if (date_column == "Month") {
dfp <- filter(dfp, Month <= conf$production_report_end_date)
print(max(dfp$Month))
} else if (date_column == "Date") {
dfp <- filter(dfp, Date < ymd(conf$production_report_end_date) + months(1))
print(max(dfp$Date))
} else if (date_column == "Hour") {
dfp <- filter(dfp, Hour < ymd(conf$production_report_end_date) + months(1))
print(max(dfp$Hour))
} else if (date_column == "Timeperiod") {
dfp <- filter(dfp, Timeperiod < ymd(conf$production_report_end_date) + months(1))
print(max(dfp$Timeperiod))
}
}
aws.s3::s3write_using(dfp, qsave, bucket = conf$bucket, object = glue("{src}/{name}.qs"))
#readr::write_csv(dfp, paste0(name, ".csv"))
}
for (n in names(df)) {
if (!is.null(names(df[[n]]))) {
walk_nested_list(df[[n]], src, name = paste(name, n, sep="-"), indent = indent+10)
}
}
} else {
print("--")
}
}
compare_dfs <- function(df1, df2) {
x <- dplyr::all_equal(df1, df2)
y <- str_split(x, "\n")[[1]] %>%
str_extract(pattern = "\\d+.*") %>%
str_split(", ") %>%
lapply(as.integer)
rows_in_x_but_not_in_y <- y[[1]]
rows_in_y_but_not_in_x <- y[[2]]
list(
rows_in_x_but_not_in_y = df1[rows_in_x_but_not_in_y,],
rows_in_y_but_not_in_x = df2[rows_in_y_but_not_in_x,]
)
}
get_corridor_summary_data <- function(cor) {
#' Converts cor data set to a single data frame for the current_month
#' for use in get_corridor_summary_table function
#'
#' @param cor cor data
#' @param current_month Current month in date format
#' @return A data frame, monthly data of all metrics by Zone and Corridor
data <- list(
rename(cor$mo$du, du = uptime, du.delta = delta), # detector uptime - note that zone group is factor not character
rename(cor$mo$pau, pau = uptime, pau.delta = delta),
rename(cor$mo$cctv, cctvu = uptime, cctvu.delta = delta),
rename(cor$mo$cu, cu = uptime, cu.delta = delta),
rename(cor$mo$tp, tp = vph, tp.delta = delta), # no longer pulling from vpd (volume) table - this is throughput
rename(cor$mo$aogd, aog.delta = delta),
rename(cor$mo$qsd, qs = qs_freq, qs.delta = delta),
rename(cor$mo$sfd, sf = sf_freq, sf.delta = delta),
rename(cor$mo$tti, tti.delta = delta),
rename(cor$mo$pti, pti.delta = delta),
rename(cor$mo$tasks, tasks = Outstanding, tasks.delta = delta.out) #tasks added 10/29/19
) %>%
reduce(left_join, by = c("Zone_Group", "Corridor", "Month")) %>%
filter(
grepl("^Zone", Zone_Group),
!grepl("^Zone", Corridor)
) %>%
select(
-uptime.sb,
-uptime.pr,
-num,
-starts_with("ones"),
-starts_with("cycles"),
-starts_with("pct"),
-starts_with("vol"),
-starts_with("Description"),
-c(All,Reported,Resolved,cum_Reported,cum_Resolved,delta.rep,delta.res) #tasks added 10/29/19
)
return(data)
}
write_signal_details <- function(plot_date, conf, signals_list = NULL) {
print(plot_date)
#--- This takes approx one minute per day -----------------------
rc <- s3_read_parquet(
bucket = conf$bucket,
object = glue("mark/counts_1hr/date={plot_date}/counts_1hr_{plot_date}.parquet")) %>%
convert_to_utc() %>%
select(
SignalID, Date, Timeperiod, Detector, CallPhase, vol)
fc <- s3_read_parquet(
bucket = conf$bucket,
object = glue("mark/filtered_counts_1hr/date={plot_date}/filtered_counts_1hr_{plot_date}.parquet")) %>%
convert_to_utc() %>%
select(
SignalID, Date, Timeperiod, Detector, CallPhase, Good_Day)
ac <- s3_read_parquet(
bucket = conf$bucket,
object = glue("mark/adjusted_counts_1hr/date={plot_date}/adjusted_counts_1hr_{plot_date}.parquet")) %>%
convert_to_utc() %>%
select(
SignalID, Date, Timeperiod, Detector, CallPhase, vol)
if (!is.null(signals_list)) {
rc <- rc %>%
filter(as.character(SignalID) %in% signals_list)
fc <- fc %>%
filter(as.character(SignalID) %in% signals_list)
ac <- ac %>%
filter(as.character(SignalID) %in% signals_list)
}
df <- list(
rename(rc, vol_rc = vol),
fc,
rename(ac, vol_ac = vol)) %>%
reduce(full_join, by = c("SignalID", "Date", "Timeperiod", "Detector", "CallPhase")
) %>%
mutate(bad_day = if_else(Good_Day==0, TRUE, FALSE)) %>%
transmute(
SignalID = as.integer(SignalID),
Timeperiod = Timeperiod,
Detector = as.integer(Detector),
CallPhase = as.integer(CallPhase),
vol_rc = as.integer(vol_rc),
vol_ac = ifelse(bad_day, as.integer(vol_ac), NA),
bad_day) %>%
arrange(
SignalID,
Detector,
Timeperiod)
#----------------------------------------------------------------
df <- df %>% mutate(
Hour = hour(Timeperiod)) %>%
select(-Timeperiod) %>%
relocate(Hour) %>%
nest(data = -c(SignalID))
table_name <- "signal_details"
prefix <- "sg"
date_ <- plot_date
fn = glue("{prefix}_{date_}")
keep_trying(
s3write_using,
n_tries = 5,
df,
write_parquet,
use_deprecated_int96_timestamps = TRUE,
bucket = conf$bucket,
object = glue("mark/{table_name}/date={date_}/{fn}.parquet"),
opts = list(multipart = TRUE, body_as_string = TRUE)
)
}
cleanup_cycle_data <- function(date_) {
print(glue("Removing local cycles and detections files for {date_}"))
system(glue("rm -r -f ../cycles/date={date_}"))
system(glue("rm -r -f ../detections/date={date_}"))
}
# Get chunks of size 'rows' from a data source that answers to 'collect()'
get_signals_chunks <- function(df, rows = 1e6) {
chunk <- function(d, n) {
split(d, ceiling(seq_along(d)/n))
}
records <- df %>% count() %>% collect()
records <- records$n
if ("SignalID" %in% colnames(df)) {
signals_list <- df %>% distinct(SignalID) %>% arrange(SignalID) %>% collect()
signals_list <- signals_list$SignalID
} else if ("signalid" %in% colnames(df)) {
signals_list <- df %>% distinct(signalid) %>% arrange(signalid) %>% collect()
signals_list <- signals_list$signalid
}
# keep this to about a million records per core
# based on the average number of records per signal.
# number of chunks will increase (chunk_length will decrease) with more days
# but memory usage should stay consistent per core
chunk_length <- round(rows/(records/length(signals_list)))
chunk(signals_list, chunk_length)
}
# Get chunks of size 'rows' from an arrow data source
get_signals_chunks_arrow <- function(df, rows = 1e6) {
chunk <- function(d, n) {
split(d, ceiling(seq_along(d)/n))
}
extract <- df %>% select(SignalID) %>% collect()
records <- nrow(extract)
signals_list <- (extract %>% distinct(SignalID) %>% arrange(SignalID))$SignalID
# keep this to about a million records per core
# based on the average number of records per signal.
# number of chunks will increase (chunk_length will decrease) with more days
# but memory usage should stay consistent per core
chunk_length <- round(rows/(records/length(signals_list)))
chunk(signals_list, chunk_length)
}
show_largest_objects <- function(n=20) {
df <- sapply(
ls(envir = globalenv()),
function(x) { object.size(get(x)) }
) %>% as.data.frame()
names(df) <- c('Size')
df %>% arrange(desc(Size)) %>% head(n)
}
| /Utilities.R | no_license | atops/GDOT-Flexdashboard-Report | R | false | false | 19,071 | r |
get_cor <- function() {
s3read_using(qs::qread, bucket = conf$bucket, object = "cor_ec2.qs")
}
get_sub <- function() {
s3read_using(qs::qread, bucket = conf$bucket, object = "sub_ec2.qs")
}
get_sig <- function() {
s3read_using(qs::qread, bucket = conf$bucket, object = "sig_ec2.qs")
}
sizeof <- function(x) {
format(object.size(x), units = "Mb")
}
apply_style <- function(filename) {
styler::style_file(filename, transformers = styler::tidyverse_style(indent_by = 4))
}
get_most_recent_monday <- function(date_) {
date_ + days(1 - lubridate::wday(date_, week_start = 1))
}
get_date_from_string <- function(x) {
if (x == "yesterday") {
format(today() - days(1), "%Y-%m-%d")
} else if (!is.na(str_extract(x, "\\d+(?= days ago)"))) {
d <- str_extract(x, "\\d+(?= days ago)")
format(today() - days(d), "%Y-%m-%d")
} else {
x
}
}
get_usable_cores <- function() {
# Usable cores is one per 8 GB of RAM.
# Get RAM from system file and divide
if (Sys.info()["sysname"] == "Windows") {
x <- suppressWarnings(shell('systeminfo | findstr Memory', intern = TRUE))
memline <- x[grepl("Total Physical Memory", x)]
mem <- stringr::str_extract(string = memline, pattern = "\\d+,\\d+")
mem <- as.numeric(gsub(",", "", mem))
mem <- round(mem, -3)
max(floor(mem/8e3), 1)
} else if (Sys.info()["sysname"] == "Linux") {
x <- readLines('/proc/meminfo')
memline <- x[grepl("MemAvailable", x)]
mem <- stringr::str_extract(string = memline, pattern = "\\d+")
mem <- as.integer(mem)
mem <- round(mem, -6)
max(floor(mem/8e6), 1)
} else {
stop("Unknown operating system.")
}
}
# From: https://billpetti.github.io/2017-10-13-retry-scrape-function-automatically-r-rstats/
retry_function <- function(.f, max_attempts = 5,
wait_seconds = 5) {
force(max_attempts)
force(wait_seconds)
for (i in seq_len(max_attempts)) {
tryCatch({
output <- .f
}, error = function(e) {
if (wait_seconds > 0) {
message(paste0("Retrying at ", Sys.time() + wait_seconds))
Sys.sleep(wait_seconds)
}
})
}
stop()
}
split_wrapper <- function(FUN) {
# Creates a function that runs a function, splits by signalid and recombines
f <- function(df, split_size, ...) {
# Define temporary directory and file names
temp_dir <- tempdir()
if (!dir.exists(temp_dir)) {
dir.create(temp_dir)
}
temp_file_root <- stringi::stri_rand_strings(1,8)
temp_path_root <- file.path(temp_dir, temp_file_root)
print(temp_path_root)
print("Writing to temporary files by SignalID...")
signalids <- as.character(unique(df$SignalID))
splits <- split(signalids, ceiling(seq_along(signalids)/split_size))
lapply(
names(splits),
function(i) {
cat('.')
df %>%
filter(SignalID %in% splits[[i]]) %>%
write_fst(paste0(temp_path_root, "_", i, ".fst"))
})
cat('.', sep='\n')
file_names <- paste0(temp_path_root, "_", names(splits), ".fst")
# Read in each temporary file and run adjusted counts in parallel. Afterward, clean up.
print("Running for each SignalID...")
df <- mclapply(file_names, mc.cores = usable_cores, FUN = function(fn) {
#df <- lapply(file_names, function(fn) {
cat('.')
FUN(read_fst(fn), ...)
}) %>% bind_rows()
cat('.', sep='\n')
lapply(file_names, FUN = file.remove)
df
}
}
read_zipped_feather <- function(x) {
read_feather(unzip(x))
}
keep_trying <- function(func, n_tries, ..., sleep = 1, timeout = Inf) {
safely_func = purrr::safely(func, otherwise = NULL)
result <- NULL
error <- 1
try_number <- 1
while((!is.null(error) || is.null(result)) && try_number <= n_tries) {
x <- R.utils::withTimeout(safely_func(...), timeout=timeout, onTimeout="error")
result <- x$result
error <- x$error
if (!is.null(error)) {
print(glue("{deparse(substitute(func))} Attempt {try_number} failed: {error}"))
}
try_number = try_number + 1
Sys.sleep(sleep)
sleep = sleep * 2
}
return(result)
}
readRDS_multiple <- function(pattern) {
lf <- list.files(pattern = pattern)
lf <- lf[grepl(".rds", lf)]
bind_rows(lapply(lf, readRDS))
}
# Because of issue with Apache Arrow (feather, parquet)
# where R wants to convert UTC to local time zone on read
# Switch date or datetime fields back to UTC. Run on read.
convert_to_utc <- function(df) {
# -- This may be a more elegant alternative. Needs testing. --
#df %>% mutate_if(is.POSIXct, ~with_tz(., "UTC"))
is_datetime <- sapply(names(df), function(x) sum(class(df[[x]]) == "POSIXct"))
datetimes <- names(is_datetime[is_datetime==1])
for (col in datetimes) {
df[[col]] <- with_tz(df[[col]], "UTC")
}
df
}
week <- function(d) {
d0 <- ymd("2016-12-25")
as.integer(trunc((ymd(d) - d0)/dweeks(1)))
}
get_month_abbrs <- function(start_date, end_date) {
start_date <- ymd(start_date)
day(start_date) <- 1
end_date <- ymd(end_date)
sapply(seq(start_date, end_date, by = "1 month"), function(d) { format(d, "%Y-%m")} )
}
bind_rows_keep_factors <- function(dfs) {
## Identify all factors
factors <- unique(unlist(
map(list(dfs[[1]]), ~ select_if(dfs[[1]], is.factor) %>% names())
))
## Bind dataframes, convert characters back to factors
suppressWarnings(bind_rows(dfs)) %>%
mutate_at(vars(one_of(factors)), factor)
}
match_type <- function(val, val_type_to_match) {
eval(parse(text=paste0('as.',class(val_type_to_match), "(", val, ")")))
}
addtoRDS <- function(df, fn, delta_var, rsd, csd) {
#' combines data frame in local rds file with newly calculated data
#' trimming the current data and appending the new data to prevent overlaps
#' and/or duplicates. Used throughout Monthly_Report_Package code
#' to avoid having to recalculate entire report period (13 months) every time
#' which takes too long and runs into memory issues frequently.
#'
#' @param df newly calculated data frame on most recent data
#' @param fn filename of same data over entire reporting period (13 months)
#' @param rsd report_start_date: start of current report period (13 months prior)
#' @param csd calculation_start_date: start date of most recent data
#' @return a combined data frame
#' @examples
#' addtoRDS(avg_daily_detector_uptime, "avg_daily_detector_uptime.rds", report_start_date, calc_start_date)
combine_dfs <- function(df0, df, delta_var, rsd, csd) {
if (class(rsd) == "character") rsd <- as_date(rsd)
if (class(csd) == "character") csd <- as_date(csd)
# Extract aggregation period from the data fields
periods <- intersect(c("Month", "Date", "Hour", "Timeperiod"), names(df0))
per_ <- as.name(periods)
# Remove everything after calcs_start_date (csd)
# and before report_start_date (rsd) in original df
df0 <- df0 %>% filter(!!per_ >= rsd, !!per_ < csd)
# Make sure new data starts on csd
# This is especially important for when csd is the start of the month
# and we've run calcs going back to the start of the week, which is in
# the previous month, e.g., 3/31/2020 is a Tuesday.
df <- df %>% filter(!!per_ >= csd)
# Extract aggregation groupings from the data fields
# to calculate period-to-period deltas
groupings <- intersect(c("Zone_Group", "Corridor", "SignalID"), names(df0))
groups_ <- sapply(groupings, as.name)
group_arrange <- c(periods, groupings) %>%
sapply(as.name)
var_ <- as.name(delta_var)
# Combine old and new
x <- bind_rows_keep_factors(list(df0, df)) %>%
# Recalculate deltas from prior periods over combined df
group_by(!!!groups_) %>%
arrange(!!!group_arrange) %>%
mutate(lag_ = lag(!!var_),
delta = ((!!var_) - lag_)/lag_) %>%
ungroup() %>%
dplyr::select(-lag_)
x
}
if (!file.exists(fn)) {
saveRDS(df, fn)
} else {
df0 <- readRDS(fn)
if (is.list(df) && is.list(df0) &&
!is.data.frame(df) && !is.data.frame(df0) &&
identical(names(df), names(df0))) {
x <- purrr::map2(df0, df, combine_dfs, delta_var, rsd, csd)
} else {
x <- combine_dfs(df0, df, delta_var, rsd, csd)
}
saveRDS(x, fn)
x
}
}
write_fst_ <- function(df, fn, append = FALSE) {
if (append == TRUE & file.exists(fn)) {
factors <- unique(unlist(
map(list(df), ~ select_if(df, is.factor) %>% names())
))
df_ <- read_fst(fn)
df_ <- bind_rows(df, df_) %>%
mutate_at(vars(one_of(factors)), factor)
} else {
df_ <- df
}
write_fst(distinct(df_), fn)
}
get_unique_timestamps <- function(df) {
df %>%
dplyr::select(Timestamp) %>%
distinct() %>%
mutate(SignalID = 0) %>%
dplyr::select(SignalID, Timestamp)
}
multicore_decorator <- function(FUN) {
usable_cores <- get_usable_cores()
function(x) {
x %>%
split(.$SignalID) %>%
mclapply(FUN, mc.cores = usable_cores) %>% #floor(parallel::detectCores()*3/4)) %>%
bind_rows()
}
}
get_Tuesdays <- function(df) {
dates_ <- seq(min(df$Date) - days(6), max(df$Date) + days(6), by = "days")
tuesdays <- dates_[wday(dates_) == 3]
tuesdays <- pmax(min(df$Date), tuesdays)
data.frame(Week = week(tuesdays), Date = tuesdays)
}
walk_nested_list <- function(df, src, name=deparse(substitute(df)), indent=0) {
cat(paste(strrep(" ", indent)))
print(name)
if (!is.null(names(df))) {
if (is.null(names(df[[1]]))) {
print(head(df, 3))
if (startsWith(name, "sub") & "Zone_Group" %in% names(df)) {
dfp <- df %>% rename(Subcorridor = Corridor, Corridor = Zone_Group)
} else {
dfp <- df
}
if (src %in% c("main", "staging")) {
date_column <- intersect(names(dfp), c("Quarter", "Month", "Date", "Hour", "Timeperiod"))
if (date_column == "Quarter") {
dfp <- filter(dfp, Quarter <= lubridate::quarter(conf$production_report_end_date, with_year = TRUE))
print(max(dfp$Quarter))
} else if (date_column == "Month") {
dfp <- filter(dfp, Month <= conf$production_report_end_date)
print(max(dfp$Month))
} else if (date_column == "Date") {
dfp <- filter(dfp, Date < ymd(conf$production_report_end_date) + months(1))
print(max(dfp$Date))
} else if (date_column == "Hour") {
dfp <- filter(dfp, Hour < ymd(conf$production_report_end_date) + months(1))
print(max(dfp$Hour))
} else if (date_column == "Timeperiod") {
dfp <- filter(dfp, Timeperiod < ymd(conf$production_report_end_date) + months(1))
print(max(dfp$Timeperiod))
}
}
aws.s3::s3write_using(dfp, qsave, bucket = conf$bucket, object = glue("{src}/{name}.qs"))
#readr::write_csv(dfp, paste0(name, ".csv"))
}
for (n in names(df)) {
if (!is.null(names(df[[n]]))) {
walk_nested_list(df[[n]], src, name = paste(name, n, sep="-"), indent = indent+10)
}
}
} else {
print("--")
}
}
compare_dfs <- function(df1, df2) {
x <- dplyr::all_equal(df1, df2)
y <- str_split(x, "\n")[[1]] %>%
str_extract(pattern = "\\d+.*") %>%
str_split(", ") %>%
lapply(as.integer)
rows_in_x_but_not_in_y <- y[[1]]
rows_in_y_but_not_in_x <- y[[2]]
list(
rows_in_x_but_not_in_y = df1[rows_in_x_but_not_in_y,],
rows_in_y_but_not_in_x = df2[rows_in_y_but_not_in_x,]
)
}
get_corridor_summary_data <- function(cor) {
#' Converts cor data set to a single data frame for the current_month
#' for use in get_corridor_summary_table function
#'
#' @param cor cor data
#' @param current_month Current month in date format
#' @return A data frame, monthly data of all metrics by Zone and Corridor
data <- list(
rename(cor$mo$du, du = uptime, du.delta = delta), # detector uptime - note that zone group is factor not character
rename(cor$mo$pau, pau = uptime, pau.delta = delta),
rename(cor$mo$cctv, cctvu = uptime, cctvu.delta = delta),
rename(cor$mo$cu, cu = uptime, cu.delta = delta),
rename(cor$mo$tp, tp = vph, tp.delta = delta), # no longer pulling from vpd (volume) table - this is throughput
rename(cor$mo$aogd, aog.delta = delta),
rename(cor$mo$qsd, qs = qs_freq, qs.delta = delta),
rename(cor$mo$sfd, sf = sf_freq, sf.delta = delta),
rename(cor$mo$tti, tti.delta = delta),
rename(cor$mo$pti, pti.delta = delta),
rename(cor$mo$tasks, tasks = Outstanding, tasks.delta = delta.out) #tasks added 10/29/19
) %>%
reduce(left_join, by = c("Zone_Group", "Corridor", "Month")) %>%
filter(
grepl("^Zone", Zone_Group),
!grepl("^Zone", Corridor)
) %>%
select(
-uptime.sb,
-uptime.pr,
-num,
-starts_with("ones"),
-starts_with("cycles"),
-starts_with("pct"),
-starts_with("vol"),
-starts_with("Description"),
-c(All,Reported,Resolved,cum_Reported,cum_Resolved,delta.rep,delta.res) #tasks added 10/29/19
)
return(data)
}
write_signal_details <- function(plot_date, conf, signals_list = NULL) {
print(plot_date)
#--- This takes approx one minute per day -----------------------
rc <- s3_read_parquet(
bucket = conf$bucket,
object = glue("mark/counts_1hr/date={plot_date}/counts_1hr_{plot_date}.parquet")) %>%
convert_to_utc() %>%
select(
SignalID, Date, Timeperiod, Detector, CallPhase, vol)
fc <- s3_read_parquet(
bucket = conf$bucket,
object = glue("mark/filtered_counts_1hr/date={plot_date}/filtered_counts_1hr_{plot_date}.parquet")) %>%
convert_to_utc() %>%
select(
SignalID, Date, Timeperiod, Detector, CallPhase, Good_Day)
ac <- s3_read_parquet(
bucket = conf$bucket,
object = glue("mark/adjusted_counts_1hr/date={plot_date}/adjusted_counts_1hr_{plot_date}.parquet")) %>%
convert_to_utc() %>%
select(
SignalID, Date, Timeperiod, Detector, CallPhase, vol)
if (!is.null(signals_list)) {
rc <- rc %>%
filter(as.character(SignalID) %in% signals_list)
fc <- fc %>%
filter(as.character(SignalID) %in% signals_list)
ac <- ac %>%
filter(as.character(SignalID) %in% signals_list)
}
df <- list(
rename(rc, vol_rc = vol),
fc,
rename(ac, vol_ac = vol)) %>%
reduce(full_join, by = c("SignalID", "Date", "Timeperiod", "Detector", "CallPhase")
) %>%
mutate(bad_day = if_else(Good_Day==0, TRUE, FALSE)) %>%
transmute(
SignalID = as.integer(SignalID),
Timeperiod = Timeperiod,
Detector = as.integer(Detector),
CallPhase = as.integer(CallPhase),
vol_rc = as.integer(vol_rc),
vol_ac = ifelse(bad_day, as.integer(vol_ac), NA),
bad_day) %>%
arrange(
SignalID,
Detector,
Timeperiod)
#----------------------------------------------------------------
df <- df %>% mutate(
Hour = hour(Timeperiod)) %>%
select(-Timeperiod) %>%
relocate(Hour) %>%
nest(data = -c(SignalID))
table_name <- "signal_details"
prefix <- "sg"
date_ <- plot_date
fn = glue("{prefix}_{date_}")
keep_trying(
s3write_using,
n_tries = 5,
df,
write_parquet,
use_deprecated_int96_timestamps = TRUE,
bucket = conf$bucket,
object = glue("mark/{table_name}/date={date_}/{fn}.parquet"),
opts = list(multipart = TRUE, body_as_string = TRUE)
)
}
cleanup_cycle_data <- function(date_) {
print(glue("Removing local cycles and detections files for {date_}"))
system(glue("rm -r -f ../cycles/date={date_}"))
system(glue("rm -r -f ../detections/date={date_}"))
}
# Get chunks of size 'rows' from a data source that answers to 'collect()'
get_signals_chunks <- function(df, rows = 1e6) {
chunk <- function(d, n) {
split(d, ceiling(seq_along(d)/n))
}
records <- df %>% count() %>% collect()
records <- records$n
if ("SignalID" %in% colnames(df)) {
signals_list <- df %>% distinct(SignalID) %>% arrange(SignalID) %>% collect()
signals_list <- signals_list$SignalID
} else if ("signalid" %in% colnames(df)) {
signals_list <- df %>% distinct(signalid) %>% arrange(signalid) %>% collect()
signals_list <- signals_list$signalid
}
# keep this to about a million records per core
# based on the average number of records per signal.
# number of chunks will increase (chunk_length will decrease) with more days
# but memory usage should stay consistent per core
chunk_length <- round(rows/(records/length(signals_list)))
chunk(signals_list, chunk_length)
}
# Get chunks of size 'rows' from an arrow data source
get_signals_chunks_arrow <- function(df, rows = 1e6) {
chunk <- function(d, n) {
split(d, ceiling(seq_along(d)/n))
}
extract <- df %>% select(SignalID) %>% collect()
records <- nrow(extract)
signals_list <- (extract %>% distinct(SignalID) %>% arrange(SignalID))$SignalID
# keep this to about a million records per core
# based on the average number of records per signal.
# number of chunks will increase (chunk_length will decrease) with more days
# but memory usage should stay consistent per core
chunk_length <- round(rows/(records/length(signals_list)))
chunk(signals_list, chunk_length)
}
show_largest_objects <- function(n=20) {
df <- sapply(
ls(envir = globalenv()),
function(x) { object.size(get(x)) }
) %>% as.data.frame()
names(df) <- c('Size')
df %>% arrange(desc(Size)) %>% head(n)
}
|
##Assingment 5
library(tidyverse)
library(readr)
library(dplyr)
data <- read_csv("most-recent-cohorts-all-data-elements-1.csv")
library(maps)
us_states <- map_data("state")
view(data$C150_4)
p <- ggplot(data = us_states,
mapping = aes(x = long, y = lat,
group =group))
p + geom_polygon(color = "gray90", size = 0.1)
schools <-select(data, STABBR, C150_4, INSTNM)
schools <- mutate(schools, state.name[match(schools$STABBR, state.abb)])
schools <- rename(schools, region = state)
schools <- subset(schools, C150_4 != 0)
schools <- schools[!schools$C150_4== "NULL", ]
schools <- select(schools, C150_4, state)
dt <- schools %>%
group_by(state) %>% # i could not figure out how to get the average
summarize(retention_mean= mean[C150_4))
dt <- rename(dt, region = state)
us_states <- left_join(us_states, dt, by = 'region')
p <- ggplot(data = us_states,
mapping = aes(x = long, y = lat,
group =group, fill = retention_mean))
p + geom_polygon() + coord_map(projection = "albers", lat0 =39, lat1 = 45)
p <-p + geom_polygon() + coord_map(projection = "albers", lat0 =39, lat1 = 45)
p <-
| /Assingetn 5.R | no_license | wbh223/BIS-244-hollierwilliam | R | false | false | 1,267 | r | ##Assingment 5
library(tidyverse)
library(readr)
library(dplyr)
data <- read_csv("most-recent-cohorts-all-data-elements-1.csv")
library(maps)
us_states <- map_data("state")
view(data$C150_4)
p <- ggplot(data = us_states,
mapping = aes(x = long, y = lat,
group =group))
p + geom_polygon(color = "gray90", size = 0.1)
schools <-select(data, STABBR, C150_4, INSTNM)
schools <- mutate(schools, state.name[match(schools$STABBR, state.abb)])
schools <- rename(schools, region = state)
schools <- subset(schools, C150_4 != 0)
schools <- schools[!schools$C150_4== "NULL", ]
schools <- select(schools, C150_4, state)
dt <- schools %>%
group_by(state) %>% # i could not figure out how to get the average
summarize(retention_mean= mean[C150_4))
dt <- rename(dt, region = state)
us_states <- left_join(us_states, dt, by = 'region')
p <- ggplot(data = us_states,
mapping = aes(x = long, y = lat,
group =group, fill = retention_mean))
p + geom_polygon() + coord_map(projection = "albers", lat0 =39, lat1 = 45)
p <-p + geom_polygon() + coord_map(projection = "albers", lat0 =39, lat1 = 45)
p <-
|
## Aim of this script file is to cache inverse of matrix. Functions will check if inverse of matrix
## is already calculated and if it is present in cache(value of matrix is not changed) then it will return value from cache and if not then it will calculate inverse
## of matrix. makeCacheMatrix will generate object matrix and cacheSolve will calculate inverse of matrix
## by checking the value in cache.
## Assumption - It is assumed that the matrix taken for calcluation is always square invertible matrix.
## Author - Ankur Thakkar
## makeCacheMatrix function will generate special matrix. It will have list of functions, normally they are
## getters and setters for matrix. Input should be sqaure invertible matrix.
makeCacheMatrix <- function(x = matrix()) {
IM <- NULL #initializing Inverse matrix to NULL
# Function setMatrix to set matrix
setMatrix <- function(y) {
x <<- y
IM <<- NULL
}
# Function getMatrix to get matrix
getMatrix <- function() x
# Function setInv to set inverse
setInv <- function(I){
IM <<- I
}
# Function getInv to get inverse
getInv <- function() IM
# generate function list
list(set=setMatrix, get=getMatrix, setInv= setInv, getInv=getInv)
}
## cacheSolve function will calculate inverse of matrix taken from makeCacheMatrix. It will
## first check if inverse is already available in cache and if matrix is not changed then it will
## returned same inverse matrix from cache. Thus saving cacluation time.
cacheSolve <- function(x, ...) {
#getting Inverse from function
IM <- x$getInv()
# if inverse is alreadt present than dont calculate inverse of the matrix
if(!is.null(IM)) {
message("getting cached data")
return(IM)
}
# else calculate inverse of the matrix
m <- x$get()
IM <- solve(m)
x$setInv(IM)
IM
}
| /cachematrix.R | no_license | ankurthakkar007/ProgrammingAssignment2 | R | false | false | 1,918 | r | ## Aim of this script file is to cache inverse of matrix. Functions will check if inverse of matrix
## is already calculated and if it is present in cache(value of matrix is not changed) then it will return value from cache and if not then it will calculate inverse
## of matrix. makeCacheMatrix will generate object matrix and cacheSolve will calculate inverse of matrix
## by checking the value in cache.
## Assumption - It is assumed that the matrix taken for calcluation is always square invertible matrix.
## Author - Ankur Thakkar
## makeCacheMatrix function will generate special matrix. It will have list of functions, normally they are
## getters and setters for matrix. Input should be sqaure invertible matrix.
makeCacheMatrix <- function(x = matrix()) {
IM <- NULL #initializing Inverse matrix to NULL
# Function setMatrix to set matrix
setMatrix <- function(y) {
x <<- y
IM <<- NULL
}
# Function getMatrix to get matrix
getMatrix <- function() x
# Function setInv to set inverse
setInv <- function(I){
IM <<- I
}
# Function getInv to get inverse
getInv <- function() IM
# generate function list
list(set=setMatrix, get=getMatrix, setInv= setInv, getInv=getInv)
}
## cacheSolve function will calculate inverse of matrix taken from makeCacheMatrix. It will
## first check if inverse is already available in cache and if matrix is not changed then it will
## returned same inverse matrix from cache. Thus saving cacluation time.
cacheSolve <- function(x, ...) {
#getting Inverse from function
IM <- x$getInv()
# if inverse is alreadt present than dont calculate inverse of the matrix
if(!is.null(IM)) {
message("getting cached data")
return(IM)
}
# else calculate inverse of the matrix
m <- x$get()
IM <- solve(m)
x$setInv(IM)
IM
}
|
\name{index-class}
\Rdversion{1.1}
\docType{class}
\alias{index-class}
\title{Class \code{"index"}}
\description{
A virtual class for matrix extraction, copied from the \code{Matrix}
package.
}
\section{Objects from the Class}{A virtual Class: No objects may be created from it.}
\section{Methods}{
\describe{
\item{[}{\code{signature(x = "brobmat", i = "index", j = "index")}: ... }
\item{[}{\code{signature(x = "brobmat", i = "index", j = "missing")}: ... }
\item{[}{\code{signature(x = "brobmat", i = "missing", j = "index")}: ... }
\item{[<-}{\code{signature(x = "brobmat", i = "index", j = "index")}: ... }
\item{[<-}{\code{signature(x = "brobmat", i = "index", j = "missing")}: ... }
\item{[<-}{\code{signature(x = "brobmat", i = "missing", j = "index")}: ... }
}
}
\references{
Douglas Bates and Martin Maechler (2019). Matrix: Sparse and Dense
Matrix Classes and Methods. {R} package version 1.2-18.
\url{https://CRAN.R-project.org/package=Matrix}
}
\author{Bates and Maechler, I guess}
\seealso{\code{\linkS4class{brobmat}}}
\examples{
showClass("index")
}
\keyword{classes}
| /man/index-class.Rd | no_license | RobinHankin/Brobdingnag | R | false | false | 1,116 | rd | \name{index-class}
\Rdversion{1.1}
\docType{class}
\alias{index-class}
\title{Class \code{"index"}}
\description{
A virtual class for matrix extraction, copied from the \code{Matrix}
package.
}
\section{Objects from the Class}{A virtual Class: No objects may be created from it.}
\section{Methods}{
\describe{
\item{[}{\code{signature(x = "brobmat", i = "index", j = "index")}: ... }
\item{[}{\code{signature(x = "brobmat", i = "index", j = "missing")}: ... }
\item{[}{\code{signature(x = "brobmat", i = "missing", j = "index")}: ... }
\item{[<-}{\code{signature(x = "brobmat", i = "index", j = "index")}: ... }
\item{[<-}{\code{signature(x = "brobmat", i = "index", j = "missing")}: ... }
\item{[<-}{\code{signature(x = "brobmat", i = "missing", j = "index")}: ... }
}
}
\references{
Douglas Bates and Martin Maechler (2019). Matrix: Sparse and Dense
Matrix Classes and Methods. {R} package version 1.2-18.
\url{https://CRAN.R-project.org/package=Matrix}
}
\author{Bates and Maechler, I guess}
\seealso{\code{\linkS4class{brobmat}}}
\examples{
showClass("index")
}
\keyword{classes}
|
## 라이브러리 로드
library(tidyverse)
library(data.table)
library(readxl)
library(lubridate)
## 데이터 로드 - wfood
getwd()
wfood = data.frame()
for(i in 1:11) {
a = read_excel("./ankus-lite-wfood_110718/wfood_salsdb.xls", i)
a = as.data.frame(a)
wfood = rbind(wfood, a)
print(i)
}
b = read_excel("./ankus-lite-wfood_110718/wfood_salsdb(1).xls", 1)
b = as.data.frame(b)
wfood = rbind(wfood, b)
for(i in 1:3) {
a = read_excel("./ankus-lite-wfood_110718/wfood_salsdb(2).xls", i)
a = as.data.frame(a)
wfood = rbind(wfood, a)
print(i)
}
wfood = as.data.table(wfood)
wfood$invoicedate = as.Date(wfood$invoicedate, '%Y%m%d')
wfood = wfood %>% arrange(invoicedate) %>% as.data.table() # 날짜별 정렬
## 데이터 로드 - pub_hol
pub_hol = read_excel("./ankus-lite-wfood_113211/pubholiday.xls", 1)
pub_hol = as.data.table(pub_hol)
pub_hol$locdate = as.Date(pub_hol$locdate, "%Y%m%d")
names(pub_hol)[1] = "invoicedate"
## 조인(wfood + pub_hol = wfood)
wfood = full_join(wfood, pub_hol)
wfood$isholiday[is.na(wfood$isholiday)] = "N"
wfood = wfood %>% arrange(invoicedate) %>% as.data.table() # 날짜별 정렬
## season 파생변수 생성
wfood$season = NA
wfood$season[substr(wfood$invoicedate, 6, 7) %in% c("06","07","08")] = "summer"
wfood$season[substr(wfood$invoicedate, 6, 7) %in% c("09","10")] = "fall"
wfood$season[substr(wfood$invoicedate, 6, 7) %in% c("11","12","01","02")] = "winter"
wfood$season[substr(wfood$invoicedate, 6, 7) %in% c("03","04","05")] = "spring"
# 2019년 공휴일 지정
wfood[wfood$invoicedate == "2019-01-01",]$isholiday = "Y"
wfood[wfood$invoicedate == "2019-02-04",]$isholiday = "Y"
wfood[wfood$invoicedate == "2019-02-05",]$isholiday = "Y"
wfood[wfood$invoicedate == "2019-02-06",]$isholiday = "Y"
wfood[wfood$invoicedate == "2019-03-01",]$isholiday = "Y"
wfood[wfood$invoicedate == "2019-05-05",]$isholiday = "Y"
wfood[wfood$invoicedate == "2019-05-12",]$isholiday = "Y"
wfood
| /src/02data-load.R | permissive | HyeonjongPark/ankus-lite-ver2 | R | false | false | 1,972 | r |
## 라이브러리 로드
library(tidyverse)
library(data.table)
library(readxl)
library(lubridate)
## 데이터 로드 - wfood
getwd()
wfood = data.frame()
for(i in 1:11) {
a = read_excel("./ankus-lite-wfood_110718/wfood_salsdb.xls", i)
a = as.data.frame(a)
wfood = rbind(wfood, a)
print(i)
}
b = read_excel("./ankus-lite-wfood_110718/wfood_salsdb(1).xls", 1)
b = as.data.frame(b)
wfood = rbind(wfood, b)
for(i in 1:3) {
a = read_excel("./ankus-lite-wfood_110718/wfood_salsdb(2).xls", i)
a = as.data.frame(a)
wfood = rbind(wfood, a)
print(i)
}
wfood = as.data.table(wfood)
wfood$invoicedate = as.Date(wfood$invoicedate, '%Y%m%d')
wfood = wfood %>% arrange(invoicedate) %>% as.data.table() # 날짜별 정렬
## 데이터 로드 - pub_hol
pub_hol = read_excel("./ankus-lite-wfood_113211/pubholiday.xls", 1)
pub_hol = as.data.table(pub_hol)
pub_hol$locdate = as.Date(pub_hol$locdate, "%Y%m%d")
names(pub_hol)[1] = "invoicedate"
## 조인(wfood + pub_hol = wfood)
wfood = full_join(wfood, pub_hol)
wfood$isholiday[is.na(wfood$isholiday)] = "N"
wfood = wfood %>% arrange(invoicedate) %>% as.data.table() # 날짜별 정렬
## season 파생변수 생성
wfood$season = NA
wfood$season[substr(wfood$invoicedate, 6, 7) %in% c("06","07","08")] = "summer"
wfood$season[substr(wfood$invoicedate, 6, 7) %in% c("09","10")] = "fall"
wfood$season[substr(wfood$invoicedate, 6, 7) %in% c("11","12","01","02")] = "winter"
wfood$season[substr(wfood$invoicedate, 6, 7) %in% c("03","04","05")] = "spring"
# 2019년 공휴일 지정
wfood[wfood$invoicedate == "2019-01-01",]$isholiday = "Y"
wfood[wfood$invoicedate == "2019-02-04",]$isholiday = "Y"
wfood[wfood$invoicedate == "2019-02-05",]$isholiday = "Y"
wfood[wfood$invoicedate == "2019-02-06",]$isholiday = "Y"
wfood[wfood$invoicedate == "2019-03-01",]$isholiday = "Y"
wfood[wfood$invoicedate == "2019-05-05",]$isholiday = "Y"
wfood[wfood$invoicedate == "2019-05-12",]$isholiday = "Y"
wfood
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/collection_migrate.R
\name{collection_migrate}
\alias{collection_migrate}
\title{Migrate documents to another collection}
\usage{
collection_migrate(conn, name, target.collection, split.key,
forward.timeout = NULL, async = NULL, raw = FALSE,
callopts = list())
}
\arguments{
\item{conn}{A solrium connection object, see \link{SolrClient}}
\item{name}{(character) The name of the core to be created. Required}
\item{target.collection}{(character) Required. The name of the target collection
to which documents will be migrated}
\item{split.key}{(character) Required. The routing key prefix. For example, if
uniqueKey is a!123, then you would use split.key=a!}
\item{forward.timeout}{(integer) The timeout (seconds), until which write requests
made to the source collection for the given \code{split.key} will be forwarded to the
target shard. Default: 60}
\item{async}{(character) Request ID to track this action which will be processed
asynchronously}
\item{raw}{(logical) If \code{TRUE}, returns raw data}
\item{callopts}{curl options passed on to \link[crul:HttpClient]{crul::HttpClient}}
}
\description{
Migrate documents to another collection
}
\examples{
\dontrun{
(conn <- SolrClient$new())
# create collection
if (!conn$collection_exists("migrate_from")) {
conn$collection_create(name = "migrate_from")
# OR: bin/solr create -c migrate_from
}
# create another collection
if (!conn$collection_exists("migrate_to")) {
conn$collection_create(name = "migrate_to")
# OR bin/solr create -c migrate_to
}
# add some documents
file <- system.file("examples", "books.csv", package = "solrium")
x <- read.csv(file, stringsAsFactors = FALSE)
conn$add(x, "migrate_from")
# migrate some documents from one collection to the other
## FIXME - not sure if this is actually working....
# conn$collection_migrate("migrate_from", "migrate_to", split.key = "05535")
}
}
| /man/collection_migrate.Rd | permissive | abhik1368/solrium | R | false | true | 1,957 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/collection_migrate.R
\name{collection_migrate}
\alias{collection_migrate}
\title{Migrate documents to another collection}
\usage{
collection_migrate(conn, name, target.collection, split.key,
forward.timeout = NULL, async = NULL, raw = FALSE,
callopts = list())
}
\arguments{
\item{conn}{A solrium connection object, see \link{SolrClient}}
\item{name}{(character) The name of the core to be created. Required}
\item{target.collection}{(character) Required. The name of the target collection
to which documents will be migrated}
\item{split.key}{(character) Required. The routing key prefix. For example, if
uniqueKey is a!123, then you would use split.key=a!}
\item{forward.timeout}{(integer) The timeout (seconds), until which write requests
made to the source collection for the given \code{split.key} will be forwarded to the
target shard. Default: 60}
\item{async}{(character) Request ID to track this action which will be processed
asynchronously}
\item{raw}{(logical) If \code{TRUE}, returns raw data}
\item{callopts}{curl options passed on to \link[crul:HttpClient]{crul::HttpClient}}
}
\description{
Migrate documents to another collection
}
\examples{
\dontrun{
(conn <- SolrClient$new())
# create collection
if (!conn$collection_exists("migrate_from")) {
conn$collection_create(name = "migrate_from")
# OR: bin/solr create -c migrate_from
}
# create another collection
if (!conn$collection_exists("migrate_to")) {
conn$collection_create(name = "migrate_to")
# OR bin/solr create -c migrate_to
}
# add some documents
file <- system.file("examples", "books.csv", package = "solrium")
x <- read.csv(file, stringsAsFactors = FALSE)
conn$add(x, "migrate_from")
# migrate some documents from one collection to the other
## FIXME - not sure if this is actually working....
# conn$collection_migrate("migrate_from", "migrate_to", split.key = "05535")
}
}
|
library(magrittr)
library(tidyverse)
library(tidyquant)
library(timetk)
library(ggfortify)
library(forecast)
library(tidyverse)
library(timetk)
library(egg)
library(readr )
# get time series for quarterly
rpfi <-
tq_get("B007RA3Q086SBEA", get = "economic.data", from = "1947-01-01", to = "2015-04-01")
str(rpfi)
glimpse(rpfi)
rpfi_tbl <-
rpfi %>%
rename(rpfi = price) %>%
mutate(dlrpfi = log(rpfi) - lag(log(rpfi)))
# plot time series - using ggplot
rpfi_tbl %>%
gather(measure, value, c(rpfi, dlrpfi)) %>%
ggplot(aes(x = date, y = value)) +
geom_line() +
# geom_hline(yintercept = 0, color = "gray50") +
labs(x = "", y = "",
title = "Real Private Fixed Investment, Seasonally Adjusted Annual Rate") +
facet_wrap(~measure, ncol = 1, scales = "free",
labeller = labeller(measure = c(dlrpfi = "Log Change",
rpfi = "Thousands of Dollars"))) +
theme(strip.text = element_text(hjust = 0))
dlrpfi <- rpfi_tbl %>%
filter(!is.na(dlrpfi)) %>%
tk_xts(date_var = date, select = dlrpfi)
str(dlrpfi)
# number of lags for ACF and PACF plots
nlags <- 24
ggAcf(dlrpfi, lag.max = nlags)
ggPacf(dlrpfi, lag.max = nlags)
# plot ACF and PACF together in same figure
g1 <- ggAcf(dlrpfi, lag.max = nlags)
g2 <- ggPacf(dlrpfi, lag.max = nlags)
ggarrange(g1, g2, ncol = 1)
ar1 <- arima(dlrpfi, order=c(1,0,0))
ar1
ar2 <- arima(dlrpfi, order=c(2,0,0))
ar2
ma1 <- arima(dlrpfi, order=c(0,0,1))
ma1
ma2 <- arima(dlrpfi, order=c(0,0,2))
ma2
ma3 <-arima(dlrpfi, order=c(0,0,3))
ma3
ma4 <-arima(dlrpfi, order=c(0,0,4))
ma4
ARIMA1 <- arima(dlrpfi, order=c(1,0,1))
ARIMA1
# estimate ARMA models by using ggtsdiag
m1 <- arima(dlrpfi, order=c(1,0,0))
m1
ggtsdiag(m1, gof.lag = nlags)
ARIMA3 <- auto.arima(dlrpfi, ic = "bic", stationary = TRUE, seasonal = FALSE, approximation = FALSE, stepwise = FALSE)
ARIMA3
ggtsdiag(ARIMA3, gof.lag = nlags)
ARIMA3 <- auto.arima(dlrpfi, ic = "aicc", stationary = TRUE, seasonal = FALSE, approximation = FALSE, stepwise = FALSE)
ARIMA3
ggtsdiag(ARIMA3, gof.lag = nlags)
| /students/alsubaie/alsubaie_ec5316_HW3-Q2.r | no_license | Alsubaie1989/TTU-ECO5316--1 | R | false | false | 2,108 | r |
library(magrittr)
library(tidyverse)
library(tidyquant)
library(timetk)
library(ggfortify)
library(forecast)
library(tidyverse)
library(timetk)
library(egg)
library(readr )
# get time series for quarterly
rpfi <-
tq_get("B007RA3Q086SBEA", get = "economic.data", from = "1947-01-01", to = "2015-04-01")
str(rpfi)
glimpse(rpfi)
rpfi_tbl <-
rpfi %>%
rename(rpfi = price) %>%
mutate(dlrpfi = log(rpfi) - lag(log(rpfi)))
# plot time series - using ggplot
rpfi_tbl %>%
gather(measure, value, c(rpfi, dlrpfi)) %>%
ggplot(aes(x = date, y = value)) +
geom_line() +
# geom_hline(yintercept = 0, color = "gray50") +
labs(x = "", y = "",
title = "Real Private Fixed Investment, Seasonally Adjusted Annual Rate") +
facet_wrap(~measure, ncol = 1, scales = "free",
labeller = labeller(measure = c(dlrpfi = "Log Change",
rpfi = "Thousands of Dollars"))) +
theme(strip.text = element_text(hjust = 0))
dlrpfi <- rpfi_tbl %>%
filter(!is.na(dlrpfi)) %>%
tk_xts(date_var = date, select = dlrpfi)
str(dlrpfi)
# number of lags for ACF and PACF plots
nlags <- 24
ggAcf(dlrpfi, lag.max = nlags)
ggPacf(dlrpfi, lag.max = nlags)
# plot ACF and PACF together in same figure
g1 <- ggAcf(dlrpfi, lag.max = nlags)
g2 <- ggPacf(dlrpfi, lag.max = nlags)
ggarrange(g1, g2, ncol = 1)
ar1 <- arima(dlrpfi, order=c(1,0,0))
ar1
ar2 <- arima(dlrpfi, order=c(2,0,0))
ar2
ma1 <- arima(dlrpfi, order=c(0,0,1))
ma1
ma2 <- arima(dlrpfi, order=c(0,0,2))
ma2
ma3 <-arima(dlrpfi, order=c(0,0,3))
ma3
ma4 <-arima(dlrpfi, order=c(0,0,4))
ma4
ARIMA1 <- arima(dlrpfi, order=c(1,0,1))
ARIMA1
# estimate ARMA models by using ggtsdiag
m1 <- arima(dlrpfi, order=c(1,0,0))
m1
ggtsdiag(m1, gof.lag = nlags)
ARIMA3 <- auto.arima(dlrpfi, ic = "bic", stationary = TRUE, seasonal = FALSE, approximation = FALSE, stepwise = FALSE)
ARIMA3
ggtsdiag(ARIMA3, gof.lag = nlags)
ARIMA3 <- auto.arima(dlrpfi, ic = "aicc", stationary = TRUE, seasonal = FALSE, approximation = FALSE, stepwise = FALSE)
ARIMA3
ggtsdiag(ARIMA3, gof.lag = nlags)
|
plot2 <- function() {
## PLEASE USE R CONSOLE; DO NOT USE R STUDIO
## first download the data and extract the txt file into the working folder (in this case ~/R)
## the next lines of codes are intended to read and extract the relevant data into a simpler table
setwd("~/R")
original <- read.delim("household_power_consumption.txt", sep = ";", header = TRUE)
original$Date <- as.Date(original$Date, "%d/%m/%Y")
extract <- subset(original, Date > "2007-01-31")
extract <- subset(extract, Date < "2007-02-03")
write.table(extract, file = "household.txt", sep = ";", col.names = TRUE, row.names = FALSE)
table <- read.delim("household.txt", sep = ";", header = TRUE)
## the next lines of codes are intended to create the second plot
table$DateTime <- as.POSIXct(paste(table$Date, as.character(table$Time)))
plot(table$DateTime, table$Global_active_power, type="l", xlab = "", ylab = "Global Active Power (kilowatts)")
}
| /plot2.R | no_license | v0dk4l1m3/ExData_Plotting1 | R | false | false | 943 | r | plot2 <- function() {
## PLEASE USE R CONSOLE; DO NOT USE R STUDIO
## first download the data and extract the txt file into the working folder (in this case ~/R)
## the next lines of codes are intended to read and extract the relevant data into a simpler table
setwd("~/R")
original <- read.delim("household_power_consumption.txt", sep = ";", header = TRUE)
original$Date <- as.Date(original$Date, "%d/%m/%Y")
extract <- subset(original, Date > "2007-01-31")
extract <- subset(extract, Date < "2007-02-03")
write.table(extract, file = "household.txt", sep = ";", col.names = TRUE, row.names = FALSE)
table <- read.delim("household.txt", sep = ";", header = TRUE)
## the next lines of codes are intended to create the second plot
table$DateTime <- as.POSIXct(paste(table$Date, as.character(table$Time)))
plot(table$DateTime, table$Global_active_power, type="l", xlab = "", ylab = "Global Active Power (kilowatts)")
}
|
the_plan <-
drake_plan(
page_links = get_doc_links(),
pdf_links = target(
get_pdf_link(page_links),
dynamic = map(page_links)
),
reports = target(
download_report(pdf_links),
dynamic = map(pdf_links)
),
reports_type_1 = reports %>%
keep(reports %in% c(
"output/quartlery-update-september-2013_1.pdf",
"output/quartlery-update-june-2013.pdf",
"output/4pp-adapting-climate-change-remote-roviana.pdf",
"output/nggi-quartery-dec-2012.pdf",
"output/quarterly-update-nggi-sept-2012.pdf",
"output/quarterly-update-nggi-2012-june.pdf",
"output/quarterly-update-nggi-2012-march.pdf",
"output/nggi-quarterly-2011-dec.pdf",
"output/nggi-quarterly-2011-sept.pdf"
)),
reports_type_1_data = target(
get_type1_data(reports_type_1),
dynamic = map(reports_type_1)
)
)
| /R/plan.R | no_license | MilesMcBain/carbon_au | R | false | false | 911 | r | the_plan <-
drake_plan(
page_links = get_doc_links(),
pdf_links = target(
get_pdf_link(page_links),
dynamic = map(page_links)
),
reports = target(
download_report(pdf_links),
dynamic = map(pdf_links)
),
reports_type_1 = reports %>%
keep(reports %in% c(
"output/quartlery-update-september-2013_1.pdf",
"output/quartlery-update-june-2013.pdf",
"output/4pp-adapting-climate-change-remote-roviana.pdf",
"output/nggi-quartery-dec-2012.pdf",
"output/quarterly-update-nggi-sept-2012.pdf",
"output/quarterly-update-nggi-2012-june.pdf",
"output/quarterly-update-nggi-2012-march.pdf",
"output/nggi-quarterly-2011-dec.pdf",
"output/nggi-quarterly-2011-sept.pdf"
)),
reports_type_1_data = target(
get_type1_data(reports_type_1),
dynamic = map(reports_type_1)
)
)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/psrwe_ps.R
\name{rwe_cut}
\alias{rwe_cut}
\title{Create strata}
\usage{
rwe_cut(x, y = x, breaks = 5, keep_inx = NULL)
}
\arguments{
\item{x}{Vector of values based on which cut points will be determined}
\item{y}{Vector of values to be cut, default to be the same as \code{x}}
\item{breaks}{Number of cut points}
\item{keep_inx}{Indices of y that will be categorized as 1 or the largest bin
even if their values are out of range of x, i.e. the y's that will not be
trimmed}
}
\value{
A vector of stratum assignment for \code{y}. The y's that are outside
the range of \code{x} and not in \code{keep_inx} are assigned \code{NA}
in the result.
}
\description{
The cut points are chosen such that there will with equal numbers in each bin
for \code{x}. By default, values of \code{y} that are outside the range of
\code{x} will be excluded from the bins, unless they are in the
\code{keep_inx}.
}
\details{
Cut a sequence of numbers into bins.
}
\examples{
x <- rnorm(100, mean = 0, sd = 1)
y <- rnorm(1000, mean = 1, sd = 2)
rwe_cut(x, y, breaks = 5)
}
| /psrwe/man/rwe_cut.Rd | no_license | akhikolla/InformationHouse | R | false | true | 1,143 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/psrwe_ps.R
\name{rwe_cut}
\alias{rwe_cut}
\title{Create strata}
\usage{
rwe_cut(x, y = x, breaks = 5, keep_inx = NULL)
}
\arguments{
\item{x}{Vector of values based on which cut points will be determined}
\item{y}{Vector of values to be cut, default to be the same as \code{x}}
\item{breaks}{Number of cut points}
\item{keep_inx}{Indices of y that will be categorized as 1 or the largest bin
even if their values are out of range of x, i.e. the y's that will not be
trimmed}
}
\value{
A vector of stratum assignment for \code{y}. The y's that are outside
the range of \code{x} and not in \code{keep_inx} are assigned \code{NA}
in the result.
}
\description{
The cut points are chosen such that there will with equal numbers in each bin
for \code{x}. By default, values of \code{y} that are outside the range of
\code{x} will be excluded from the bins, unless they are in the
\code{keep_inx}.
}
\details{
Cut a sequence of numbers into bins.
}
\examples{
x <- rnorm(100, mean = 0, sd = 1)
y <- rnorm(1000, mean = 1, sd = 2)
rwe_cut(x, y, breaks = 5)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get_spline.R
\name{get_surface}
\alias{get_surface}
\title{Get Two-Dimensional Control Net and Surface from n-dimensional Control Nets}
\usage{
get_surface(x, margin = 1:2, at, n = 100)
}
\arguments{
\item{x}{a \code{cpr_cn} object}
\item{margin}{an integer identifying the marginal of the control net to slice
along. Only used when working \code{x} is a \code{cpr_cn} object.}
\item{at}{point value for marginals not defined in the \code{margin}. Only
used when \code{x} is a \code{cpr_cn} object. Expected input is a list of
length \code{length(attr(x, "bspline_list"))}. Entries for elements
\code{marginal} are ignored. If omitted, the midpoint between the boundary
knots for each marginal is used.}
\item{n}{the length of sequence to use for interpolating the spline function.}
}
\description{
Get Two-Dimensional Control Net and Surface from n-dimensional Control Nets
}
\examples{
## Extract the control net and surface from a cpr_cn object.
a_cn <- cn(pdg ~ btensor(list(day, age), df = list(15, 3), order = list(3, 2)),
data = spdg)
cn_and_surface <- get_surface(a_cn, n = 50)
str(cn_and_surface, max.level = 2)
par(mfrow = c(1, 2))
with(cn_and_surface$cn,
plot3D::persp3D(unique(Var1),
unique(Var2),
matrix(z,
nrow = length(unique(Var1)),
ncol = length(unique(Var2))),
main = "Control Net")
)
with(cn_and_surface$surface,
plot3D::persp3D(unique(Var1),
unique(Var2),
matrix(z,
nrow = length(unique(Var1)),
ncol = length(unique(Var2))),
main = "Surface")
)
}
\seealso{
\code{\link{get_spline}}
}
| /man/get_surface.Rd | no_license | dewittpe/cpr | R | false | true | 1,861 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get_spline.R
\name{get_surface}
\alias{get_surface}
\title{Get Two-Dimensional Control Net and Surface from n-dimensional Control Nets}
\usage{
get_surface(x, margin = 1:2, at, n = 100)
}
\arguments{
\item{x}{a \code{cpr_cn} object}
\item{margin}{an integer identifying the marginal of the control net to slice
along. Only used when working \code{x} is a \code{cpr_cn} object.}
\item{at}{point value for marginals not defined in the \code{margin}. Only
used when \code{x} is a \code{cpr_cn} object. Expected input is a list of
length \code{length(attr(x, "bspline_list"))}. Entries for elements
\code{marginal} are ignored. If omitted, the midpoint between the boundary
knots for each marginal is used.}
\item{n}{the length of sequence to use for interpolating the spline function.}
}
\description{
Get Two-Dimensional Control Net and Surface from n-dimensional Control Nets
}
\examples{
## Extract the control net and surface from a cpr_cn object.
a_cn <- cn(pdg ~ btensor(list(day, age), df = list(15, 3), order = list(3, 2)),
data = spdg)
cn_and_surface <- get_surface(a_cn, n = 50)
str(cn_and_surface, max.level = 2)
par(mfrow = c(1, 2))
with(cn_and_surface$cn,
plot3D::persp3D(unique(Var1),
unique(Var2),
matrix(z,
nrow = length(unique(Var1)),
ncol = length(unique(Var2))),
main = "Control Net")
)
with(cn_and_surface$surface,
plot3D::persp3D(unique(Var1),
unique(Var2),
matrix(z,
nrow = length(unique(Var1)),
ncol = length(unique(Var2))),
main = "Surface")
)
}
\seealso{
\code{\link{get_spline}}
}
|
context("Write graph backups")
test_that("Graph backups for `add_balanced_tree()` works", {
#
# Backup from `add_balanced_tree()`
#
path <- tempdir()
on.exit(unlink(path))
setwd(path)
create_graph(write_backups = TRUE) %>%
add_balanced_tree(
k = 2, h = 2)
expect_equal(
list.files(path = path) %>% length(), 1)
})
test_that("Graph backups for `add_cycle()` works", {
#
# Backup from `add_cycle()`
#
path <- tempdir()
on.exit(unlink(path))
setwd(path)
create_graph(write_backups = TRUE) %>%
add_cycle(n = 6)
expect_equal(
list.files(path = path) %>% length(), 2)
})
test_that("Graph backups for `add_edge()` works", {
#
# Backup from `add_edge()`
#
path <- tempdir()
on.exit(unlink(path))
setwd(path)
create_graph(write_backups = TRUE) %>%
add_n_nodes(
n = 2) %>%
add_edge(
from = 1,
to = 2)
expect_equal(
list.files(path = path) %>% length(), 3)
})
test_that("Graph backups for `add_edge_clone()` works", {
#
# Backup from `add_edge_clone()`
#
path <- tempdir()
on.exit(unlink(path))
setwd(path)
create_graph(write_backups = TRUE) %>%
add_n_nodes(
n = 3) %>%
add_edge(
from = 1,
to = 2) %>%
add_edge_clone(
edge = 1,
from = 2,
to = 3)
expect_equal(
list.files(path = path) %>% length(), 4)
})
test_that("Graph backups for `add_edge_clone()` works", {
#
# Backup from `add_edge_clone()`
#
path <- tempdir()
on.exit(unlink(path))
setwd(path)
create_graph(write_backups = TRUE) %>%
add_n_nodes(
n = 3) %>%
add_edge(
from = 1,
to = 2) %>%
add_edge_clone(
edge = 1,
from = 2,
to = 3)
expect_equal(
list.files(path = path) %>% length(), 5)
})
test_that("Graph backups for `add_edge_df()` works", {
#
# Backup from `add_edge_df()`
#
path <- tempdir()
on.exit(unlink(path))
setwd(path)
ndf <-
create_node_df(
n = 4,
type = "letter",
color = c(
"red", "green",
"grey", "blue"),
value = c(
3.5, 2.6, 9.4, 2.7))
edf <-
create_edge_df(
from = c(1, 2, 3),
to = c(4, 3, 1),
rel = "leading_to")
create_graph(
nodes_df = ndf,
write_backups = TRUE) %>%
add_edge_df(
edge_df = edf)
expect_equal(
list.files(path = path) %>% length(), 6)
})
test_that("Graph backups for `add_edge_df()` works", {
#
# Backup from `add_edges_from_table()`
#
path <- tempdir()
on.exit(unlink(path))
setwd(path)
create_graph(write_backups = TRUE) %>%
add_nodes_from_table(
table = currencies) %>%
add_edges_from_table(
table = usd_exchange_rates,
from_col = from_currency,
to_col = to_currency,
from_to_map = iso_4217_code)
expect_equal(
list.files(path = path) %>% length(), 7)
})
test_that("Graph backups for `add_edges_w_string()` works", {
#
# Backup from `add_edges_w_string()`
#
path <- tempdir()
on.exit(unlink(path))
setwd(path)
create_graph(write_backups = TRUE) %>%
add_n_nodes(n = 4) %>%
add_edges_w_string(
edges = "1->2 1->3 2->4 2->3")
expect_equal(
list.files(path = path) %>% length(), 8)
})
test_that("Graph backups for `add_forward_edges_ws()` works", {
#
# Backup from `add_forward_edges_ws()`
#
path <- tempdir()
on.exit(unlink(path))
setwd(path)
create_graph(write_backups = TRUE) %>%
add_n_nodes(
n = 2,
type = "type_a",
label = c("a_1", "a_2")) %>%
add_edge(
from = 1, to = 2, rel = "a") %>%
select_edges() %>%
add_forward_edges_ws(rel = "b")
expect_equal(
list.files(path = path) %>% length(), 9)
})
test_that("Graph backups for `add_full_graph()` works", {
#
# Backup from `add_full_graph()`
#
path <- tempdir()
on.exit(unlink(path))
setwd(path)
create_graph(write_backups = TRUE) %>%
add_full_graph(n = 5)
expect_equal(
list.files(path = path) %>% length(), 10)
})
test_that("Graph backups for `add_gnm_graph()` works", {
#
# Backup from `add_gnm_graph()`
#
path <- tempdir()
on.exit(unlink(path))
setwd(path)
create_graph(write_backups = TRUE) %>%
add_gnm_graph(n = 100, m = 120)
expect_equal(
list.files(path = path) %>% length(), 11)
})
test_that("Graph backups for `add_gnp_graph()` works", {
#
# Backup from `add_gnp_graph()`
#
path <- tempdir()
on.exit(unlink(path))
setwd(path)
create_graph(write_backups = TRUE) %>%
add_gnp_graph(n = 100, p = 0.05)
expect_equal(
list.files(path = path) %>% length(), 12)
})
test_that("Graph backups for `add_node_clones_ws()` works", {
#
# Backup from `add_node_clones_ws()`
#
path <- tempdir()
on.exit(unlink(path))
setwd(path)
create_graph(write_backups = TRUE) %>%
add_gnm_graph(
n = 10, m = 22) %>%
select_nodes() %>%
add_node_clones_ws()
expect_equal(
list.files(path = path) %>% length(), 13)
})
test_that("Graph backups for `add_n_node_clones()` works", {
#
# Backup from `add_n_node_clones()`
#
path <- tempdir()
on.exit(unlink(path))
setwd(path)
create_graph(write_backups = TRUE) %>%
add_gnm_graph(
n = 10, m = 22) %>%
add_n_node_clones(
n = 2,
node = 1)
expect_equal(
list.files(path = path) %>% length(), 14)
})
test_that("Graph backups for `add_global_graph_attrs()` works", {
#
# Backup from `add_global_graph_attrs()`
#
path <- tempdir()
on.exit(unlink(path))
setwd(path)
create_graph(write_backups = TRUE) %>%
add_global_graph_attrs(
attr = "penwidth",
value = 12,
attr_type = "node")
expect_equal(
list.files(path = path) %>% length(), 15)
})
test_that("Graph backups for `delete_global_graph_attrs()` works", {
#
# Backup from `delete_global_graph_attrs()`
#
path <- tempdir()
on.exit(unlink(path))
setwd(path)
create_graph(write_backups = TRUE) %>%
delete_global_graph_attrs(
attr = "outputorder",
attr_type = "graph")
expect_equal(
list.files(path = path) %>% length(), 16)
})
test_that("Graph backups for `clear_global_graph_attrs()` works", {
#
# Backup from `clear_global_graph_attrs()`
#
path <- tempdir()
on.exit(unlink(path))
setwd(path)
create_graph(write_backups = TRUE) %>%
clear_global_graph_attrs()
expect_equal(
list.files(path = path) %>% length(), 17)
})
test_that("Graph backups for `add_graph_action()` works", {
#
# Backup from `add_graph_action()`
#
path <- tempdir()
on.exit(unlink(path))
setwd(path)
create_graph(write_backups = TRUE) %>%
add_gnm_graph(
n = 10, m = 22) %>%
add_graph_action(
fcn = "set_node_attr_w_fcn",
node_attr_fcn = "get_betweenness",
column_name = "btwns",
action_name = "get_btwns")
expect_equal(
list.files(path = path) %>% length(), 18)
})
test_that("Graph backups for `delete_graph_actions()` works", {
#
# Backup from `delete_graph_actions()`
#
path <- tempdir()
on.exit(unlink(path))
setwd(path)
create_graph(write_backups = TRUE) %>%
add_gnm_graph(
n = 10, m = 22) %>%
add_graph_action(
fcn = "set_node_attr_w_fcn",
node_attr_fcn = "get_betweenness",
column_name = "btwns",
action_name = "get_btwns") %>%
delete_graph_actions(
actions = "get_btwns")
expect_equal(
list.files(path = path) %>% length(), 19)
})
test_that("Graph backups for `reorder_graph_actions()` works", {
#
# Backup from `reorder_graph_actions()`
#
path <- tempdir()
on.exit(unlink(path))
setwd(path)
create_graph(write_backups = TRUE) %>%
add_gnm_graph(
n = 10, m = 22) %>%
add_graph_action(
fcn = "rescale_node_attrs",
node_attr_from = "pagerank",
node_attr_to = "width",
action_name = "pgrnk_to_width") %>%
add_graph_action(
fcn = "set_node_attr_w_fcn",
node_attr_fcn = "get_pagerank",
column_name = "pagerank",
action_name = "get_pagerank") %>%
reorder_graph_actions(
indices = c(2, 1))
expect_equal(
list.files(path = path) %>% length(), 20)
})
test_that("Graph backups for `transform_to_complement_graph()` works", {
#
# Backup from `transform_to_complement_graph()`
#
path <- tempdir()
on.exit(unlink(path))
setwd(path)
create_graph(write_backups = TRUE) %>%
add_cycle(n = 4) %>%
transform_to_complement_graph()
expect_equal(
list.files(path = path) %>% length(), 21)
})
test_that("Graph backups for `copy_node_attrs()` works", {
#
# Backup from `copy_node_attrs()`
#
path <- tempdir()
on.exit(unlink(path))
setwd(path)
create_graph(write_backups = TRUE) %>%
add_path(
n = 4,
node_aes = node_aes(
color = "blue")) %>%
copy_node_attrs(
node_attr_from = color,
node_attr_to = color_2)
expect_equal(
list.files(path = path) %>% length(), 22)
})
test_that("Graph backups for `copy_edge_attrs()` works", {
#
# Backup from `copy_edge_attrs()`
#
path <- tempdir()
on.exit(unlink(path))
setwd(path)
create_graph(write_backups = TRUE) %>%
add_path(
n = 4,
edge_aes = edge_aes(
color = "blue")) %>%
copy_edge_attrs(
edge_attr_from = color,
edge_attr_to = color_2)
expect_equal(
list.files(path = path) %>% length(), 23)
})
test_that("Graph backups for `colorize_node_attrs()` works", {
#
# Backup from `colorize_node_attrs()`
#
path <- tempdir()
on.exit(unlink(path))
setwd(path)
graph <-
create_graph(write_backups = TRUE) %>%
add_path(
n = 8,
node_data = node_data(
weight = c(
8.2, 3.7, 6.3, 9.2,
1.6, 2.5, 7.2, 5.4))) %>%
colorize_node_attrs(
node_attr_from = weight,
node_attr_to = fillcolor,
palette = "Greens",
cut_points = c(1, 3, 5, 7, 9),
alpha = 90)
expect_equal(
list.files(path = path) %>% length(), 24)
})
test_that("Graph backups for `colorize_edge_attrs()` works", {
#
# Backup from `colorize_edge_attrs()`
#
path <- tempdir()
on.exit(unlink(path))
setwd(path)
graph <-
create_graph(write_backups = TRUE) %>%
add_path(
n = 8,
edge_data = edge_data(
weight = c(
8.2, 3.7, 6.3, 9.2,
1.6, 2.5, 7.2))) %>%
colorize_edge_attrs(
edge_attr_from = weight,
edge_attr_to = color,
palette = "Greens",
cut_points = c(1, 3, 5, 7, 9),
alpha = 90)
expect_equal(
list.files(path = path) %>% length(), 25)
})
test_that("Graph backups for `drop_node_attrs()` works", {
#
# Backup from `drop_node_attrs()`
#
path <- tempdir()
on.exit(unlink(path))
setwd(path)
create_graph(write_backups = TRUE) %>%
add_path(
n = 4,
node_aes = node_aes(
color = "blue")) %>%
drop_node_attrs(
node_attr = color)
expect_equal(
list.files(path = path) %>% length(), 26)
})
test_that("Graph backups for `drop_edge_attrs()` works", {
#
# Backup from `drop_edge_attrs()`
#
path <- tempdir()
on.exit(unlink(path))
setwd(path)
create_graph(write_backups = TRUE) %>%
add_path(
n = 4,
edge_aes = edge_aes(
color = "blue")) %>%
drop_edge_attrs(
edge_attr = color)
expect_equal(
list.files(path = path) %>% length(), 27)
})
test_that("Graph backups for `join_node_attrs()` works", {
#
# Backup from `join_node_attrs()`
#
path <- tempdir()
on.exit(unlink(path))
setwd(path)
df <-
data.frame(
id = c(1, 2, 3, 4, 5),
values = c(5.5, 2.3, 6.3, 2.1, 8.7))
create_graph(write_backups = TRUE) %>%
add_path(n = 5) %>%
join_node_attrs(df = df)
expect_equal(
list.files(path = path) %>% length(), 28)
})
test_that("Graph backups for `join_edge_attrs()` works", {
#
# Backup from `join_edge_attrs()`
#
path <- tempdir()
on.exit(unlink(path))
setwd(path)
df <-
data.frame(
from = c(1, 2, 3, 4),
to = c(2, 3, 4, 5),
values = c(5.5, 2.3, 6.3, 2.1))
create_graph(write_backups = TRUE) %>%
add_path(n = 5) %>%
join_edge_attrs(df = df)
expect_equal(
list.files(path = path) %>% length(), 29)
})
test_that("Graph backups for `mutate_node_attrs()` works", {
#
# Backup from `mutate_node_attrs()`
#
path <- tempdir()
on.exit(unlink(path))
setwd(path)
create_graph(write_backups = TRUE) %>%
add_path(
n = 4,
node_data = node_data(
weight = c(
8.2, 3.7, 6.3, 9.2))) %>%
mutate_node_attrs(
half_weight = weight / 2)
expect_equal(
list.files(path = path) %>% length(), 30)
})
test_that("Graph backups for `mutate_edge_attrs()` works", {
#
# Backup from `mutate_edge_attrs()`
#
path <- tempdir()
on.exit(unlink(path))
setwd(path)
create_graph(write_backups = TRUE) %>%
add_path(
n = 4,
edge_data = edge_data(
weight = c(
8.2, 3.7, 6.3))) %>%
mutate_edge_attrs(
half_weight = weight / 2)
expect_equal(
list.files(path = path) %>% length(), 31)
})
test_that("Graph backups for `mutate_node_attrs_ws()` works", {
#
# Backup from `mutate_node_attrs_ws()`
#
path <- tempdir()
on.exit(unlink(path))
setwd(path)
create_graph(write_backups = TRUE) %>%
add_path(
n = 4,
node_data = node_data(
weight = c(
8.2, 3.7, 6.3, 9.2))) %>%
select_nodes_by_id(nodes = c(1, 2)) %>%
mutate_node_attrs_ws(
half_weight = weight / 2)
expect_equal(
list.files(path = path) %>% length(), 32)
})
test_that("Graph backups for `mutate_edge_attrs_ws()` works", {
#
# Backup from `mutate_edge_attrs_ws()`
#
path <- tempdir()
on.exit(unlink(path))
setwd(path)
create_graph(write_backups = TRUE) %>%
add_path(
n = 4,
edge_data = edge_data(
weight = c(
8.2, 3.7, 6.3))) %>%
select_edges_by_edge_id(edges = c(1, 2)) %>%
mutate_edge_attrs_ws(
half_weight = weight / 2)
expect_equal(
list.files(path = path) %>% length(), 33)
})
test_that("Graph backups for `rename_node_attrs()` works", {
#
# Backup from `rename_node_attrs()`
#
path <- tempdir()
on.exit(unlink(path))
setwd(path)
create_graph(write_backups = TRUE) %>%
add_path(
n = 4,
node_data = node_data(
weight = c(
8.2, 3.7, 6.3, 9.2))) %>%
rename_node_attrs(
node_attr_from = weight,
node_attr_to = weight_2)
expect_equal(
list.files(path = path) %>% length(), 34)
})
test_that("Graph backups for `rename_edge_attrs()` works", {
#
# Backup from `rename_edge_attrs()`
#
path <- tempdir()
on.exit(unlink(path))
setwd(path)
create_graph(write_backups = TRUE) %>%
add_path(
n = 4,
edge_data = edge_data(
weight = c(
8.2, 3.7, 6.3))) %>%
rename_edge_attrs(
edge_attr_from = weight,
edge_attr_to = weight_2)
expect_equal(
list.files(path = path) %>% length(), 35)
})
test_that("Graph backups for `recode_node_attrs()` works", {
#
# Backup from `recode_node_attrs()`
#
path <- tempdir()
on.exit(unlink(path))
setwd(path)
create_graph(write_backups = TRUE) %>%
add_gnm_graph(
n = 5,
m = 10,
set_seed = 23) %>%
set_node_attrs(
node_attr = shape,
values =
c("circle", "hexagon",
"rectangle", "rectangle",
"circle")) %>%
recode_node_attrs(
node_attr_from = shape,
"circle -> square",
"rectangle -> triangle")
expect_equal(
list.files(path = path) %>% length(), 36)
})
test_that("Graph backups for `recode_edge_attrs()` works", {
#
# Backup from `recode_edge_attrs()`
#
path <- tempdir()
on.exit(unlink(path))
setwd(path)
create_graph(write_backups = TRUE) %>%
add_path(
n = 6,
edge_data = edge_data(
values = c(
"circle", "hexagon",
"rectangle", "rectangle",
"circle"))) %>%
recode_edge_attrs(
edge_attr_from = values,
"circle -> square",
"rectangle -> triangle")
expect_equal(
list.files(path = path) %>% length(), 37)
})
test_that("Graph backups for `rescale_node_attrs()` works", {
#
# Backup from `rescale_node_attrs()`
#
path <- tempdir()
on.exit(unlink(path))
setwd(path)
create_graph(write_backups = TRUE) %>%
add_path(
n = 4,
node_data = node_data(
weight = c(
8.2, 3.7, 6.3, 9.2))) %>%
rescale_node_attrs(
node_attr_from = weight,
to_lower_bound = 0,
to_upper_bound = 1)
expect_equal(
list.files(path = path) %>% length(), 38)
})
test_that("Graph backups for `rescale_edge_attrs()` works", {
#
# Backup from `rescale_edge_attrs()`
#
path <- tempdir()
on.exit(unlink(path))
setwd(path)
create_graph(write_backups = TRUE) %>%
add_path(
n = 5,
edge_data = edge_data(
weight = c(
8.2, 3.7, 6.3, 9.2))) %>%
rescale_edge_attrs(
edge_attr_from = weight,
to_lower_bound = 0,
to_upper_bound = 1)
expect_equal(
list.files(path = path) %>% length(), 39)
})
test_that("Graph backups for `rev_edge_dir()` works", {
#
# Backup from `rev_edge_dir()`
#
path <- tempdir()
on.exit(unlink(path))
setwd(path)
create_graph(write_backups = TRUE) %>%
add_path(n = 2) %>%
rev_edge_dir()
expect_equal(
list.files(path = path) %>% length(), 40)
})
test_that("Graph backups for `rev_edge_dir_ws()` works", {
#
# Backup from `rev_edge_dir_ws()`
#
path <- tempdir()
on.exit(unlink(path))
setwd(path)
create_graph(write_backups = TRUE) %>%
add_path(n = 3) %>%
select_edges_by_edge_id(edges = 1) %>%
rev_edge_dir_ws()
expect_equal(
list.files(path = path) %>% length(), 41)
})
test_that("Graph backups for `set_node_position()` works", {
#
# Backup from `set_node_position()`
#
path <- tempdir()
on.exit(unlink(path))
setwd(path)
create_graph(write_backups = TRUE) %>%
add_node() %>%
set_node_position(
node = 1, x = 1, y = 1)
expect_equal(
list.files(path = path) %>% length(), 42)
})
test_that("Graph backups for `nudge_node_positions_ws()` works", {
#
# Backup from `nudge_node_positions_ws()`
#
path <- tempdir()
on.exit(unlink(path))
setwd(path)
create_graph(write_backups = TRUE) %>%
add_node() %>%
set_node_position(
node = 1, x = 1, y = 1) %>%
select_nodes() %>%
nudge_node_positions_ws(
dx = 2, dy = 0)
expect_equal(
list.files(path = path) %>% length(), 43)
})
test_that("Graph backups for `nudge_node_positions_ws()` works", {
#
# Backup from `nudge_node_positions_ws()`
#
path <- tempdir()
on.exit(unlink(path))
setwd(path)
create_graph(write_backups = TRUE) %>%
add_node() %>%
set_node_position(
node = 1, x = 1, y = 1) %>%
select_nodes() %>%
nudge_node_positions_ws(
dx = 2, dy = 0)
expect_equal(
list.files(path = path) %>% length(), 44)
})
| /tests/testthat/test-write_graph_backup.R | permissive | OleksiyAnokhin/DiagrammeR | R | false | false | 19,181 | r | context("Write graph backups")
test_that("Graph backups for `add_balanced_tree()` works", {
#
# Backup from `add_balanced_tree()`
#
path <- tempdir()
on.exit(unlink(path))
setwd(path)
create_graph(write_backups = TRUE) %>%
add_balanced_tree(
k = 2, h = 2)
expect_equal(
list.files(path = path) %>% length(), 1)
})
test_that("Graph backups for `add_cycle()` works", {
#
# Backup from `add_cycle()`
#
path <- tempdir()
on.exit(unlink(path))
setwd(path)
create_graph(write_backups = TRUE) %>%
add_cycle(n = 6)
expect_equal(
list.files(path = path) %>% length(), 2)
})
test_that("Graph backups for `add_edge()` works", {
#
# Backup from `add_edge()`
#
path <- tempdir()
on.exit(unlink(path))
setwd(path)
create_graph(write_backups = TRUE) %>%
add_n_nodes(
n = 2) %>%
add_edge(
from = 1,
to = 2)
expect_equal(
list.files(path = path) %>% length(), 3)
})
test_that("Graph backups for `add_edge_clone()` works", {
#
# Backup from `add_edge_clone()`
#
path <- tempdir()
on.exit(unlink(path))
setwd(path)
create_graph(write_backups = TRUE) %>%
add_n_nodes(
n = 3) %>%
add_edge(
from = 1,
to = 2) %>%
add_edge_clone(
edge = 1,
from = 2,
to = 3)
expect_equal(
list.files(path = path) %>% length(), 4)
})
test_that("Graph backups for `add_edge_clone()` works", {
#
# Backup from `add_edge_clone()`
#
path <- tempdir()
on.exit(unlink(path))
setwd(path)
create_graph(write_backups = TRUE) %>%
add_n_nodes(
n = 3) %>%
add_edge(
from = 1,
to = 2) %>%
add_edge_clone(
edge = 1,
from = 2,
to = 3)
expect_equal(
list.files(path = path) %>% length(), 5)
})
test_that("Graph backups for `add_edge_df()` works", {
#
# Backup from `add_edge_df()`
#
path <- tempdir()
on.exit(unlink(path))
setwd(path)
ndf <-
create_node_df(
n = 4,
type = "letter",
color = c(
"red", "green",
"grey", "blue"),
value = c(
3.5, 2.6, 9.4, 2.7))
edf <-
create_edge_df(
from = c(1, 2, 3),
to = c(4, 3, 1),
rel = "leading_to")
create_graph(
nodes_df = ndf,
write_backups = TRUE) %>%
add_edge_df(
edge_df = edf)
expect_equal(
list.files(path = path) %>% length(), 6)
})
test_that("Graph backups for `add_edge_df()` works", {
#
# Backup from `add_edges_from_table()`
#
path <- tempdir()
on.exit(unlink(path))
setwd(path)
create_graph(write_backups = TRUE) %>%
add_nodes_from_table(
table = currencies) %>%
add_edges_from_table(
table = usd_exchange_rates,
from_col = from_currency,
to_col = to_currency,
from_to_map = iso_4217_code)
expect_equal(
list.files(path = path) %>% length(), 7)
})
test_that("Graph backups for `add_edges_w_string()` works", {
#
# Backup from `add_edges_w_string()`
#
path <- tempdir()
on.exit(unlink(path))
setwd(path)
create_graph(write_backups = TRUE) %>%
add_n_nodes(n = 4) %>%
add_edges_w_string(
edges = "1->2 1->3 2->4 2->3")
expect_equal(
list.files(path = path) %>% length(), 8)
})
test_that("Graph backups for `add_forward_edges_ws()` works", {
#
# Backup from `add_forward_edges_ws()`
#
path <- tempdir()
on.exit(unlink(path))
setwd(path)
create_graph(write_backups = TRUE) %>%
add_n_nodes(
n = 2,
type = "type_a",
label = c("a_1", "a_2")) %>%
add_edge(
from = 1, to = 2, rel = "a") %>%
select_edges() %>%
add_forward_edges_ws(rel = "b")
expect_equal(
list.files(path = path) %>% length(), 9)
})
test_that("Graph backups for `add_full_graph()` works", {
#
# Backup from `add_full_graph()`
#
path <- tempdir()
on.exit(unlink(path))
setwd(path)
create_graph(write_backups = TRUE) %>%
add_full_graph(n = 5)
expect_equal(
list.files(path = path) %>% length(), 10)
})
test_that("Graph backups for `add_gnm_graph()` works", {
#
# Backup from `add_gnm_graph()`
#
path <- tempdir()
on.exit(unlink(path))
setwd(path)
create_graph(write_backups = TRUE) %>%
add_gnm_graph(n = 100, m = 120)
expect_equal(
list.files(path = path) %>% length(), 11)
})
test_that("Graph backups for `add_gnp_graph()` works", {
#
# Backup from `add_gnp_graph()`
#
path <- tempdir()
on.exit(unlink(path))
setwd(path)
create_graph(write_backups = TRUE) %>%
add_gnp_graph(n = 100, p = 0.05)
expect_equal(
list.files(path = path) %>% length(), 12)
})
test_that("Graph backups for `add_node_clones_ws()` works", {
#
# Backup from `add_node_clones_ws()`
#
path <- tempdir()
on.exit(unlink(path))
setwd(path)
create_graph(write_backups = TRUE) %>%
add_gnm_graph(
n = 10, m = 22) %>%
select_nodes() %>%
add_node_clones_ws()
expect_equal(
list.files(path = path) %>% length(), 13)
})
test_that("Graph backups for `add_n_node_clones()` works", {
#
# Backup from `add_n_node_clones()`
#
path <- tempdir()
on.exit(unlink(path))
setwd(path)
create_graph(write_backups = TRUE) %>%
add_gnm_graph(
n = 10, m = 22) %>%
add_n_node_clones(
n = 2,
node = 1)
expect_equal(
list.files(path = path) %>% length(), 14)
})
test_that("Graph backups for `add_global_graph_attrs()` works", {
#
# Backup from `add_global_graph_attrs()`
#
path <- tempdir()
on.exit(unlink(path))
setwd(path)
create_graph(write_backups = TRUE) %>%
add_global_graph_attrs(
attr = "penwidth",
value = 12,
attr_type = "node")
expect_equal(
list.files(path = path) %>% length(), 15)
})
test_that("Graph backups for `delete_global_graph_attrs()` works", {
#
# Backup from `delete_global_graph_attrs()`
#
path <- tempdir()
on.exit(unlink(path))
setwd(path)
create_graph(write_backups = TRUE) %>%
delete_global_graph_attrs(
attr = "outputorder",
attr_type = "graph")
expect_equal(
list.files(path = path) %>% length(), 16)
})
test_that("Graph backups for `clear_global_graph_attrs()` works", {
#
# Backup from `clear_global_graph_attrs()`
#
path <- tempdir()
on.exit(unlink(path))
setwd(path)
create_graph(write_backups = TRUE) %>%
clear_global_graph_attrs()
expect_equal(
list.files(path = path) %>% length(), 17)
})
test_that("Graph backups for `add_graph_action()` works", {
#
# Backup from `add_graph_action()`
#
path <- tempdir()
on.exit(unlink(path))
setwd(path)
create_graph(write_backups = TRUE) %>%
add_gnm_graph(
n = 10, m = 22) %>%
add_graph_action(
fcn = "set_node_attr_w_fcn",
node_attr_fcn = "get_betweenness",
column_name = "btwns",
action_name = "get_btwns")
expect_equal(
list.files(path = path) %>% length(), 18)
})
test_that("Graph backups for `delete_graph_actions()` works", {
#
# Backup from `delete_graph_actions()`
#
path <- tempdir()
on.exit(unlink(path))
setwd(path)
create_graph(write_backups = TRUE) %>%
add_gnm_graph(
n = 10, m = 22) %>%
add_graph_action(
fcn = "set_node_attr_w_fcn",
node_attr_fcn = "get_betweenness",
column_name = "btwns",
action_name = "get_btwns") %>%
delete_graph_actions(
actions = "get_btwns")
expect_equal(
list.files(path = path) %>% length(), 19)
})
test_that("Graph backups for `reorder_graph_actions()` works", {
#
# Backup from `reorder_graph_actions()`
#
path <- tempdir()
on.exit(unlink(path))
setwd(path)
create_graph(write_backups = TRUE) %>%
add_gnm_graph(
n = 10, m = 22) %>%
add_graph_action(
fcn = "rescale_node_attrs",
node_attr_from = "pagerank",
node_attr_to = "width",
action_name = "pgrnk_to_width") %>%
add_graph_action(
fcn = "set_node_attr_w_fcn",
node_attr_fcn = "get_pagerank",
column_name = "pagerank",
action_name = "get_pagerank") %>%
reorder_graph_actions(
indices = c(2, 1))
expect_equal(
list.files(path = path) %>% length(), 20)
})
test_that("Graph backups for `transform_to_complement_graph()` works", {
#
# Backup from `transform_to_complement_graph()`
#
path <- tempdir()
on.exit(unlink(path))
setwd(path)
create_graph(write_backups = TRUE) %>%
add_cycle(n = 4) %>%
transform_to_complement_graph()
expect_equal(
list.files(path = path) %>% length(), 21)
})
test_that("Graph backups for `copy_node_attrs()` works", {
#
# Backup from `copy_node_attrs()`
#
path <- tempdir()
on.exit(unlink(path))
setwd(path)
create_graph(write_backups = TRUE) %>%
add_path(
n = 4,
node_aes = node_aes(
color = "blue")) %>%
copy_node_attrs(
node_attr_from = color,
node_attr_to = color_2)
expect_equal(
list.files(path = path) %>% length(), 22)
})
test_that("Graph backups for `copy_edge_attrs()` works", {
#
# Backup from `copy_edge_attrs()`
#
path <- tempdir()
on.exit(unlink(path))
setwd(path)
create_graph(write_backups = TRUE) %>%
add_path(
n = 4,
edge_aes = edge_aes(
color = "blue")) %>%
copy_edge_attrs(
edge_attr_from = color,
edge_attr_to = color_2)
expect_equal(
list.files(path = path) %>% length(), 23)
})
test_that("Graph backups for `colorize_node_attrs()` works", {
#
# Backup from `colorize_node_attrs()`
#
path <- tempdir()
on.exit(unlink(path))
setwd(path)
graph <-
create_graph(write_backups = TRUE) %>%
add_path(
n = 8,
node_data = node_data(
weight = c(
8.2, 3.7, 6.3, 9.2,
1.6, 2.5, 7.2, 5.4))) %>%
colorize_node_attrs(
node_attr_from = weight,
node_attr_to = fillcolor,
palette = "Greens",
cut_points = c(1, 3, 5, 7, 9),
alpha = 90)
expect_equal(
list.files(path = path) %>% length(), 24)
})
test_that("Graph backups for `colorize_edge_attrs()` works", {
#
# Backup from `colorize_edge_attrs()`
#
path <- tempdir()
on.exit(unlink(path))
setwd(path)
graph <-
create_graph(write_backups = TRUE) %>%
add_path(
n = 8,
edge_data = edge_data(
weight = c(
8.2, 3.7, 6.3, 9.2,
1.6, 2.5, 7.2))) %>%
colorize_edge_attrs(
edge_attr_from = weight,
edge_attr_to = color,
palette = "Greens",
cut_points = c(1, 3, 5, 7, 9),
alpha = 90)
expect_equal(
list.files(path = path) %>% length(), 25)
})
test_that("Graph backups for `drop_node_attrs()` works", {
#
# Backup from `drop_node_attrs()`
#
path <- tempdir()
on.exit(unlink(path))
setwd(path)
create_graph(write_backups = TRUE) %>%
add_path(
n = 4,
node_aes = node_aes(
color = "blue")) %>%
drop_node_attrs(
node_attr = color)
expect_equal(
list.files(path = path) %>% length(), 26)
})
test_that("Graph backups for `drop_edge_attrs()` works", {
#
# Backup from `drop_edge_attrs()`
#
path <- tempdir()
on.exit(unlink(path))
setwd(path)
create_graph(write_backups = TRUE) %>%
add_path(
n = 4,
edge_aes = edge_aes(
color = "blue")) %>%
drop_edge_attrs(
edge_attr = color)
expect_equal(
list.files(path = path) %>% length(), 27)
})
test_that("Graph backups for `join_node_attrs()` works", {
#
# Backup from `join_node_attrs()`
#
path <- tempdir()
on.exit(unlink(path))
setwd(path)
df <-
data.frame(
id = c(1, 2, 3, 4, 5),
values = c(5.5, 2.3, 6.3, 2.1, 8.7))
create_graph(write_backups = TRUE) %>%
add_path(n = 5) %>%
join_node_attrs(df = df)
expect_equal(
list.files(path = path) %>% length(), 28)
})
test_that("Graph backups for `join_edge_attrs()` works", {
#
# Backup from `join_edge_attrs()`
#
path <- tempdir()
on.exit(unlink(path))
setwd(path)
df <-
data.frame(
from = c(1, 2, 3, 4),
to = c(2, 3, 4, 5),
values = c(5.5, 2.3, 6.3, 2.1))
create_graph(write_backups = TRUE) %>%
add_path(n = 5) %>%
join_edge_attrs(df = df)
expect_equal(
list.files(path = path) %>% length(), 29)
})
test_that("Graph backups for `mutate_node_attrs()` works", {
#
# Backup from `mutate_node_attrs()`
#
path <- tempdir()
on.exit(unlink(path))
setwd(path)
create_graph(write_backups = TRUE) %>%
add_path(
n = 4,
node_data = node_data(
weight = c(
8.2, 3.7, 6.3, 9.2))) %>%
mutate_node_attrs(
half_weight = weight / 2)
expect_equal(
list.files(path = path) %>% length(), 30)
})
test_that("Graph backups for `mutate_edge_attrs()` works", {
#
# Backup from `mutate_edge_attrs()`
#
path <- tempdir()
on.exit(unlink(path))
setwd(path)
create_graph(write_backups = TRUE) %>%
add_path(
n = 4,
edge_data = edge_data(
weight = c(
8.2, 3.7, 6.3))) %>%
mutate_edge_attrs(
half_weight = weight / 2)
expect_equal(
list.files(path = path) %>% length(), 31)
})
test_that("Graph backups for `mutate_node_attrs_ws()` works", {
#
# Backup from `mutate_node_attrs_ws()`
#
path <- tempdir()
on.exit(unlink(path))
setwd(path)
create_graph(write_backups = TRUE) %>%
add_path(
n = 4,
node_data = node_data(
weight = c(
8.2, 3.7, 6.3, 9.2))) %>%
select_nodes_by_id(nodes = c(1, 2)) %>%
mutate_node_attrs_ws(
half_weight = weight / 2)
expect_equal(
list.files(path = path) %>% length(), 32)
})
test_that("Graph backups for `mutate_edge_attrs_ws()` works", {
#
# Backup from `mutate_edge_attrs_ws()`
#
path <- tempdir()
on.exit(unlink(path))
setwd(path)
create_graph(write_backups = TRUE) %>%
add_path(
n = 4,
edge_data = edge_data(
weight = c(
8.2, 3.7, 6.3))) %>%
select_edges_by_edge_id(edges = c(1, 2)) %>%
mutate_edge_attrs_ws(
half_weight = weight / 2)
expect_equal(
list.files(path = path) %>% length(), 33)
})
test_that("Graph backups for `rename_node_attrs()` works", {
#
# Backup from `rename_node_attrs()`
#
path <- tempdir()
on.exit(unlink(path))
setwd(path)
create_graph(write_backups = TRUE) %>%
add_path(
n = 4,
node_data = node_data(
weight = c(
8.2, 3.7, 6.3, 9.2))) %>%
rename_node_attrs(
node_attr_from = weight,
node_attr_to = weight_2)
expect_equal(
list.files(path = path) %>% length(), 34)
})
test_that("Graph backups for `rename_edge_attrs()` works", {
#
# Backup from `rename_edge_attrs()`
#
path <- tempdir()
on.exit(unlink(path))
setwd(path)
create_graph(write_backups = TRUE) %>%
add_path(
n = 4,
edge_data = edge_data(
weight = c(
8.2, 3.7, 6.3))) %>%
rename_edge_attrs(
edge_attr_from = weight,
edge_attr_to = weight_2)
expect_equal(
list.files(path = path) %>% length(), 35)
})
test_that("Graph backups for `recode_node_attrs()` works", {
#
# Backup from `recode_node_attrs()`
#
path <- tempdir()
on.exit(unlink(path))
setwd(path)
create_graph(write_backups = TRUE) %>%
add_gnm_graph(
n = 5,
m = 10,
set_seed = 23) %>%
set_node_attrs(
node_attr = shape,
values =
c("circle", "hexagon",
"rectangle", "rectangle",
"circle")) %>%
recode_node_attrs(
node_attr_from = shape,
"circle -> square",
"rectangle -> triangle")
expect_equal(
list.files(path = path) %>% length(), 36)
})
test_that("Graph backups for `recode_edge_attrs()` works", {
#
# Backup from `recode_edge_attrs()`
#
path <- tempdir()
on.exit(unlink(path))
setwd(path)
create_graph(write_backups = TRUE) %>%
add_path(
n = 6,
edge_data = edge_data(
values = c(
"circle", "hexagon",
"rectangle", "rectangle",
"circle"))) %>%
recode_edge_attrs(
edge_attr_from = values,
"circle -> square",
"rectangle -> triangle")
expect_equal(
list.files(path = path) %>% length(), 37)
})
test_that("Graph backups for `rescale_node_attrs()` works", {
#
# Backup from `rescale_node_attrs()`
#
path <- tempdir()
on.exit(unlink(path))
setwd(path)
create_graph(write_backups = TRUE) %>%
add_path(
n = 4,
node_data = node_data(
weight = c(
8.2, 3.7, 6.3, 9.2))) %>%
rescale_node_attrs(
node_attr_from = weight,
to_lower_bound = 0,
to_upper_bound = 1)
expect_equal(
list.files(path = path) %>% length(), 38)
})
test_that("Graph backups for `rescale_edge_attrs()` works", {
#
# Backup from `rescale_edge_attrs()`
#
path <- tempdir()
on.exit(unlink(path))
setwd(path)
create_graph(write_backups = TRUE) %>%
add_path(
n = 5,
edge_data = edge_data(
weight = c(
8.2, 3.7, 6.3, 9.2))) %>%
rescale_edge_attrs(
edge_attr_from = weight,
to_lower_bound = 0,
to_upper_bound = 1)
expect_equal(
list.files(path = path) %>% length(), 39)
})
test_that("Graph backups for `rev_edge_dir()` works", {
#
# Backup from `rev_edge_dir()`
#
path <- tempdir()
on.exit(unlink(path))
setwd(path)
create_graph(write_backups = TRUE) %>%
add_path(n = 2) %>%
rev_edge_dir()
expect_equal(
list.files(path = path) %>% length(), 40)
})
test_that("Graph backups for `rev_edge_dir_ws()` works", {
#
# Backup from `rev_edge_dir_ws()`
#
path <- tempdir()
on.exit(unlink(path))
setwd(path)
create_graph(write_backups = TRUE) %>%
add_path(n = 3) %>%
select_edges_by_edge_id(edges = 1) %>%
rev_edge_dir_ws()
expect_equal(
list.files(path = path) %>% length(), 41)
})
test_that("Graph backups for `set_node_position()` works", {
#
# Backup from `set_node_position()`
#
path <- tempdir()
on.exit(unlink(path))
setwd(path)
create_graph(write_backups = TRUE) %>%
add_node() %>%
set_node_position(
node = 1, x = 1, y = 1)
expect_equal(
list.files(path = path) %>% length(), 42)
})
test_that("Graph backups for `nudge_node_positions_ws()` works", {
#
# Backup from `nudge_node_positions_ws()`
#
path <- tempdir()
on.exit(unlink(path))
setwd(path)
create_graph(write_backups = TRUE) %>%
add_node() %>%
set_node_position(
node = 1, x = 1, y = 1) %>%
select_nodes() %>%
nudge_node_positions_ws(
dx = 2, dy = 0)
expect_equal(
list.files(path = path) %>% length(), 43)
})
test_that("Graph backups for `nudge_node_positions_ws()` works", {
#
# Backup from `nudge_node_positions_ws()`
#
path <- tempdir()
on.exit(unlink(path))
setwd(path)
create_graph(write_backups = TRUE) %>%
add_node() %>%
set_node_position(
node = 1, x = 1, y = 1) %>%
select_nodes() %>%
nudge_node_positions_ws(
dx = 2, dy = 0)
expect_equal(
list.files(path = path) %>% length(), 44)
})
|
# Duke University Co-lab Shiny Workshop, Session 1, Spring 2020
# Shiny App
# Visually explore cross-sectional features of highly aggregated U.S. federal employee data
# Version 1, R scripts to be converted to Shiny
options(max.print=1000) # number of elements, not rows
options(stringsAsFactors=F)
options(scipen=999999)
#options(device="windows")
library(shiny)
library(ggplot2)
#######################################################################################################
# Read U.S. Office of Personnel Management Central Personnel Data File (CPDF)
# Source: Buzzfeed (https://www.buzzfeednews.com/article/jsvine/sharing-hundreds-of-millions-of-federal-payroll-records)
# Limited to general schedule (GS) grades 1 through 15, fiscal years 1988 through 2011, full-time employees
# OPM codebook: https://www.opm.gov/policy-data-oversight/data-analysis-documentation/data-policy-guidance/reporting-guidance/part-a-human-resources.pdf
# Columns:
# fy ........... U.S. federal government fiscal year
# agency ....... federal agency employed (synthetically generated for workshop)
# age .......... employee age (five year increments, noised induced by OPM)
# grade ........ general schedule (GS) grade
# occCat ....... occupational category
# yearsEd ...... years of education
# n ............ number of observations (employees) in fy, agency, age, grade, occCat, yearsEd combination
# sumPay ....... sum of basic pay in fy, agency, age, grade, occCat, yearsEd combination (in 2011 $U.S.)
# There is one record for each unique combination of fy, agency, age, grade, occCat, yearsEd combination
# n and sumPay are aggregated within fy, agency, age, grade, occCat, yearsEd combinations
#######################################################################################################
#######################################################################################################
# Read observations
#######################################################################################################
# Local
setwd("C:\\Projects\\Duke\\Co-lab\\Shiny\\Session-1-NPDHist-CPDF\\App\\CPDF")
# RStudio Cloud
#setwd("/cloud/project/Duke-Co-lab/Shiny/Session-1-NPDHist-CPDF/App/CPDF")
cpdf <- read.table(gzfile("CPDFAggregateDataBuzzfeed-Agency.csv.gz"), header=T, sep=",", strip.white=T)
# Compute mean pay per category (all employees in category assigned identical, mean, pay)
cpdf[,"pay"] <- cpdf[,"sumPay"]/cpdf[,"n"]
#######################################################################################################
# Configure theme and axis labels for plots
#######################################################################################################
ggTheme <- ggplot2::theme(plot.title=element_text(size=12, hjust=0.5),
#plot.caption=element_text(size=12, hjust=0.5),
panel.background=element_blank(),
panel.grid.major.x=element_blank(),
panel.grid.major.y=element_blank(),
panel.grid.minor=element_blank(),
panel.border=element_rect(fill=NA, color="gray75"),
panel.spacing=unit(0, "inches"),
axis.title.x=element_text(size=10),
axis.title.y=element_text(size=10),
axis.text.x=element_text(size=8, angle=90, hjust=1, vjust=0.5),
axis.text.y=element_text(size=8),
#axis.ticks=element_blank(),
strip.text=element_text(size=8),
strip.background=element_blank(),
legend.position="bottom",
legend.background=element_rect(color="gray"),
legend.key=element_rect(fill="white"),
legend.box="horizontal",
legend.text=element_text(size=8),
legend.title=element_text(size=8))
typeof(ggTheme)
ggLabs <- labs(x="\nfiscal year", y="pay<sub>mean</sub>\n")
typeof(ggLabs)
#######################################################################################################
# Mean pay by year (basic plot)
#######################################################################################################
# Aggregate mean pay by fy
gdat <- aggregate(1:nrow(cpdf),
by=list(cpdf[,"fy"]),
function(i) sum(cpdf[i,"sumPay"])/sum(cpdf[i,"n"]))
colnames(gdat) <- c("fy", "meanPay")
# Compose basic plot
ggplot(data=gdat) +
geom_line(aes(x=fy, y=meanPay)) +
scale_y_continuous(labels=function(x) format(x, big.mark=",")) +
ggTheme +
ggLabs
#######################################################################################################
# Mean pay by year (differentiating line types)
#######################################################################################################
# Specify line type differentiation variable (NULL for no differentiation)
diffVar <- "agency"
# Aggregate mean pay by fy and differentiation variable (if one specified)
gdat <- aggregate(1:nrow(cpdf),
by=lapply(c("fy", diffVar), function(v) cpdf[,v]),
function(i) sum(cpdf[i,"sumPay"])/sum(cpdf[i,"n"]))
colnames(gdat) <- c("fy", diffVar, "meanPay")
# Initialize plot
g <- ggplot()
typeof(g) # G is just a list and is modifiable - print() renders the actual plot
# Include line(s)
if(is.null(diffVar)) {
g <- g + geom_line(data=gdat, aes(x=fy, y=meanPay))
} else {
g <- g + geom_line(data=gdat, aes_string(x="fy", y="meanPay", linetype=diffVar)) +
scale_linetype_manual(values=setNames(c("22", "44", "solid"), unique(gdat[,"agency"])))
}
# Configure axis labels and apply theme
g <- g + scale_y_continuous(labels=function(x) format(x, big.mark=",")) +
ggTheme +
ggLabs
# Render
print(g)
#######################################################################################################
# Mean pay by year (differentiating color with faceting)
#######################################################################################################
# Specify color differentiation variable (NULL for no differentiation)
diffVar <- "occCat"
# Specify facet variable
# One facet panel for each level of the variable will be generated
# Specify NULL for no panels
panelVar <- "agency"
panelRows <- NULL
panelCols <- 1
# Aggregate mean pay by fy, differentiation variable, and facet variable
gdat <- aggregate(1:nrow(cpdf),
by=lapply(c("fy", diffVar, panelVar), function(v) cpdf[,v]),
function(i) sum(cpdf[i,"sumPay"])/sum(cpdf[i,"n"]))
colnames(gdat) <- c("fy", diffVar, panelVar, "meanPay")
# Initialize plot
g <- ggplot()
# Include line(s)
if(is.null(diffVar)) {
g <- g + geom_line(data=gdat, aes(x=fy, y=meanPay))
} else {
g <- g + geom_line(data=gdat, aes_string(x="fy", y="meanPay", color=diffVar)) +
scale_color_manual(values=colorRampPalette(c("blue", "red"))(length(unique(gdat[,diffVar]))))
}
# Facet, if requested
if(!is.null(panelVar))
g <- g + facet_wrap(panelVar, nrow=panelRows, ncol=panelCols,
labeller=as_labeller(function(x) paste(panelVar, " = ", x, sep="")))
# Configure axis labes and apply theme
g <- g + scale_y_continuous(labels=function(x) format(x, big.mark=",")) +
ggTheme +
ggLabs
print(g)
#######################################################################################################
# Mean of specified var dependent on one independent var (differentiating color with faceting)
#######################################################################################################
# Specify color differentiation variable (NULL for no differentiation)
diffVar <- "occCat"
# Specify facet variable
# One facet panel for each level of the variable will be generated
# Specify NULL for no panels
panelVar <- "yearsEd"
panelRows <- NULL
panelCols <- NULL
# Aggregate mean of dependent var by independent var, differentiation var, and facet var
depVar <- "sumPay"
depVarMean <- paste("mean_", depVar, sep="")
indepVar <- "fy"
gdat <- aggregate(1:nrow(cpdf),
by=lapply(c(indepVar, diffVar, panelVar), function(v) cpdf[,v]),
function(i) sum(cpdf[i,depVar]*cpdf[i,"n"])/sum(cpdf[i,"n"]))
colnames(gdat) <- c(indepVar, diffVar, panelVar, depVarMean)
# Order occupational categories in standard P, A, T, C, O sequence, if present
if("occCat" %in% colnames(gdat))
gdat[,"occCat"] <- factor(gdat[,"occCat"], levels=c("P", "A", "T", "C", "O"))
# Specify graph type, point size, and transparency
gType <- "line"
pointSize=3
pointAlpha <- 0.5
# Initialize plot
g <- ggplot()
# Include points or lines
if(gType=="point") {
if(is.null(diffVar)) {
g <- g + geom_point(data=gdat, aes_string(x=indepVar, y=depVarMean))
} else {
g <- g + geom_point(data=gdat, aes_string(x=indepVar, y=depVarMean, color=diffVar),
size=dotSize, alpha=dotAlpha) +
scale_color_manual(values=colorRampPalette(c("blue", "red"))(length(unique(gdat[,diffVar]))))
}
} else {
if(is.null(diffVar)) {
g <- g + geom_line(data=gdat, aes_string(x=indepVar, y=depVarMean))
} else {
g <- g + geom_line(data=gdat, aes_string(x=indepVar, y=depVarMean, color=diffVar)) +
scale_color_manual(values=colorRampPalette(c("blue", "red"))(length(unique(gdat[,diffVar]))))
}
}
# Facet, if requested
if(!is.null(panelVar))
g <- g + facet_wrap(panelVar, nrow=panelRows, ncol=panelCols,
labeller=as_labeller(function(x) paste(panelVar, " = ", x, sep="")))
# Configure axis labes and apply theme
g <- g + scale_y_continuous(labels=function(x) format(x, big.mark=",")) +
ggTheme +
labs(x=paste("\n", indepVar, sep=""), y=paste(depVar, "<sub>mean</sub>\n", sep=""))
print(g)
#######################################################################################################
# Distribution of observations (employees) by one variable, paneled by another
#######################################################################################################
# Specify independent and facet variables
indepVar <- "grade"
panelVar <- "fy"
panelRows <- NULL
panelCols <- NULL
loessSpan <- 0.75
# Compute observed mass distribution(s) of independent var
if(is.null(panelVar)) {
ng <- sum(cpdf[,"n"])
gdat <- aggregate(cpdf[,"n"], by=list(cpdf[,indepVar]), function(n) sum(n)/ng)
colnames(gdat) <- c(indepVar, "p")
} else {
gdat <- aggregate(cpdf[,"n"], by=list(cpdf[,indepVar], cpdf[,panelVar]), sum)
colnames(gdat) <- c(indepVar, panelVar, "n")
ng <- aggregate(gdat[,"n"], by=list(gdat[,panelVar]), sum)
colnames(ng) <- c(panelVar, "n")
gdat <- merge(gdat, ng, by.x=panelVar, by.y=panelVar)
gdat <- data.frame(gdat[,c(panelVar, indepVar, "n.x")], "p"=gdat[,"n.x"]/gdat[,"n.y"])
colnames(gdat) <- c(panelVar, indepVar, "n", "p")
}
# Initialize plot
g <- ggplot()
# Add smooth probability mass plot
g <- g + geom_smooth(data=gdat, aes_string(x=indepVar, y="p"),
method="loess", se=F, span=loessSpan, fullrange=T, color="Black", size=0.6)
# Facet, if requested
if(!is.null(panelVar))
g <- g + facet_wrap(panelVar, nrow=panelRows, ncol=panelCols,
labeller=as_labeller(function(x) paste(panelVar, " = ", x, sep="")))
# Configure axis labels and apply theme
g <- g + ggTheme +
labs(x=paste("\n", indepVar, sep=""), y="P<sub>mass</sub>\n")
# Render
print(g)
| /Session-1/App/CPDF/CPDF-1.r | no_license | tbalmat/Duke-Co-lab | R | false | false | 11,691 | r | # Duke University Co-lab Shiny Workshop, Session 1, Spring 2020
# Shiny App
# Visually explore cross-sectional features of highly aggregated U.S. federal employee data
# Version 1, R scripts to be converted to Shiny
options(max.print=1000) # number of elements, not rows
options(stringsAsFactors=F)
options(scipen=999999)
#options(device="windows")
library(shiny)
library(ggplot2)
#######################################################################################################
# Read U.S. Office of Personnel Management Central Personnel Data File (CPDF)
# Source: Buzzfeed (https://www.buzzfeednews.com/article/jsvine/sharing-hundreds-of-millions-of-federal-payroll-records)
# Limited to general schedule (GS) grades 1 through 15, fiscal years 1988 through 2011, full-time employees
# OPM codebook: https://www.opm.gov/policy-data-oversight/data-analysis-documentation/data-policy-guidance/reporting-guidance/part-a-human-resources.pdf
# Columns:
# fy ........... U.S. federal government fiscal year
# agency ....... federal agency employed (synthetically generated for workshop)
# age .......... employee age (five year increments, noised induced by OPM)
# grade ........ general schedule (GS) grade
# occCat ....... occupational category
# yearsEd ...... years of education
# n ............ number of observations (employees) in fy, agency, age, grade, occCat, yearsEd combination
# sumPay ....... sum of basic pay in fy, agency, age, grade, occCat, yearsEd combination (in 2011 $U.S.)
# There is one record for each unique combination of fy, agency, age, grade, occCat, yearsEd combination
# n and sumPay are aggregated within fy, agency, age, grade, occCat, yearsEd combinations
#######################################################################################################
#######################################################################################################
# Read observations
#######################################################################################################
# Local
setwd("C:\\Projects\\Duke\\Co-lab\\Shiny\\Session-1-NPDHist-CPDF\\App\\CPDF")
# RStudio Cloud
#setwd("/cloud/project/Duke-Co-lab/Shiny/Session-1-NPDHist-CPDF/App/CPDF")
cpdf <- read.table(gzfile("CPDFAggregateDataBuzzfeed-Agency.csv.gz"), header=T, sep=",", strip.white=T)
# Compute mean pay per category (all employees in category assigned identical, mean, pay)
cpdf[,"pay"] <- cpdf[,"sumPay"]/cpdf[,"n"]
#######################################################################################################
# Configure theme and axis labels for plots
#######################################################################################################
ggTheme <- ggplot2::theme(plot.title=element_text(size=12, hjust=0.5),
#plot.caption=element_text(size=12, hjust=0.5),
panel.background=element_blank(),
panel.grid.major.x=element_blank(),
panel.grid.major.y=element_blank(),
panel.grid.minor=element_blank(),
panel.border=element_rect(fill=NA, color="gray75"),
panel.spacing=unit(0, "inches"),
axis.title.x=element_text(size=10),
axis.title.y=element_text(size=10),
axis.text.x=element_text(size=8, angle=90, hjust=1, vjust=0.5),
axis.text.y=element_text(size=8),
#axis.ticks=element_blank(),
strip.text=element_text(size=8),
strip.background=element_blank(),
legend.position="bottom",
legend.background=element_rect(color="gray"),
legend.key=element_rect(fill="white"),
legend.box="horizontal",
legend.text=element_text(size=8),
legend.title=element_text(size=8))
typeof(ggTheme)
ggLabs <- labs(x="\nfiscal year", y="pay<sub>mean</sub>\n")
typeof(ggLabs)
#######################################################################################################
# Mean pay by year (basic plot)
#######################################################################################################
# Aggregate mean pay by fy
gdat <- aggregate(1:nrow(cpdf),
by=list(cpdf[,"fy"]),
function(i) sum(cpdf[i,"sumPay"])/sum(cpdf[i,"n"]))
colnames(gdat) <- c("fy", "meanPay")
# Compose basic plot
ggplot(data=gdat) +
geom_line(aes(x=fy, y=meanPay)) +
scale_y_continuous(labels=function(x) format(x, big.mark=",")) +
ggTheme +
ggLabs
#######################################################################################################
# Mean pay by year (differentiating line types)
#######################################################################################################
# Specify line type differentiation variable (NULL for no differentiation)
diffVar <- "agency"
# Aggregate mean pay by fy and differentiation variable (if one specified)
gdat <- aggregate(1:nrow(cpdf),
by=lapply(c("fy", diffVar), function(v) cpdf[,v]),
function(i) sum(cpdf[i,"sumPay"])/sum(cpdf[i,"n"]))
colnames(gdat) <- c("fy", diffVar, "meanPay")
# Initialize plot
g <- ggplot()
typeof(g) # G is just a list and is modifiable - print() renders the actual plot
# Include line(s)
if(is.null(diffVar)) {
g <- g + geom_line(data=gdat, aes(x=fy, y=meanPay))
} else {
g <- g + geom_line(data=gdat, aes_string(x="fy", y="meanPay", linetype=diffVar)) +
scale_linetype_manual(values=setNames(c("22", "44", "solid"), unique(gdat[,"agency"])))
}
# Configure axis labels and apply theme
g <- g + scale_y_continuous(labels=function(x) format(x, big.mark=",")) +
ggTheme +
ggLabs
# Render
print(g)
#######################################################################################################
# Mean pay by year (differentiating color with faceting)
#######################################################################################################
# Specify color differentiation variable (NULL for no differentiation)
diffVar <- "occCat"
# Specify facet variable
# One facet panel for each level of the variable will be generated
# Specify NULL for no panels
panelVar <- "agency"
panelRows <- NULL
panelCols <- 1
# Aggregate mean pay by fy, differentiation variable, and facet variable
gdat <- aggregate(1:nrow(cpdf),
by=lapply(c("fy", diffVar, panelVar), function(v) cpdf[,v]),
function(i) sum(cpdf[i,"sumPay"])/sum(cpdf[i,"n"]))
colnames(gdat) <- c("fy", diffVar, panelVar, "meanPay")
# Initialize plot
g <- ggplot()
# Include line(s)
if(is.null(diffVar)) {
g <- g + geom_line(data=gdat, aes(x=fy, y=meanPay))
} else {
g <- g + geom_line(data=gdat, aes_string(x="fy", y="meanPay", color=diffVar)) +
scale_color_manual(values=colorRampPalette(c("blue", "red"))(length(unique(gdat[,diffVar]))))
}
# Facet, if requested
if(!is.null(panelVar))
g <- g + facet_wrap(panelVar, nrow=panelRows, ncol=panelCols,
labeller=as_labeller(function(x) paste(panelVar, " = ", x, sep="")))
# Configure axis labes and apply theme
g <- g + scale_y_continuous(labels=function(x) format(x, big.mark=",")) +
ggTheme +
ggLabs
print(g)
#######################################################################################################
# Mean of specified var dependent on one independent var (differentiating color with faceting)
#######################################################################################################
# Specify color differentiation variable (NULL for no differentiation)
diffVar <- "occCat"
# Specify facet variable
# One facet panel for each level of the variable will be generated
# Specify NULL for no panels
panelVar <- "yearsEd"
panelRows <- NULL
panelCols <- NULL
# Aggregate mean of dependent var by independent var, differentiation var, and facet var
depVar <- "sumPay"
depVarMean <- paste("mean_", depVar, sep="")
indepVar <- "fy"
gdat <- aggregate(1:nrow(cpdf),
by=lapply(c(indepVar, diffVar, panelVar), function(v) cpdf[,v]),
function(i) sum(cpdf[i,depVar]*cpdf[i,"n"])/sum(cpdf[i,"n"]))
colnames(gdat) <- c(indepVar, diffVar, panelVar, depVarMean)
# Order occupational categories in standard P, A, T, C, O sequence, if present
if("occCat" %in% colnames(gdat))
gdat[,"occCat"] <- factor(gdat[,"occCat"], levels=c("P", "A", "T", "C", "O"))
# Specify graph type, point size, and transparency
gType <- "line"
pointSize=3
pointAlpha <- 0.5
# Initialize plot
g <- ggplot()
# Include points or lines
if(gType=="point") {
if(is.null(diffVar)) {
g <- g + geom_point(data=gdat, aes_string(x=indepVar, y=depVarMean))
} else {
g <- g + geom_point(data=gdat, aes_string(x=indepVar, y=depVarMean, color=diffVar),
size=dotSize, alpha=dotAlpha) +
scale_color_manual(values=colorRampPalette(c("blue", "red"))(length(unique(gdat[,diffVar]))))
}
} else {
if(is.null(diffVar)) {
g <- g + geom_line(data=gdat, aes_string(x=indepVar, y=depVarMean))
} else {
g <- g + geom_line(data=gdat, aes_string(x=indepVar, y=depVarMean, color=diffVar)) +
scale_color_manual(values=colorRampPalette(c("blue", "red"))(length(unique(gdat[,diffVar]))))
}
}
# Facet, if requested
if(!is.null(panelVar))
g <- g + facet_wrap(panelVar, nrow=panelRows, ncol=panelCols,
labeller=as_labeller(function(x) paste(panelVar, " = ", x, sep="")))
# Configure axis labes and apply theme
g <- g + scale_y_continuous(labels=function(x) format(x, big.mark=",")) +
ggTheme +
labs(x=paste("\n", indepVar, sep=""), y=paste(depVar, "<sub>mean</sub>\n", sep=""))
print(g)
#######################################################################################################
# Distribution of observations (employees) by one variable, paneled by another
#######################################################################################################
# Specify independent and facet variables
indepVar <- "grade"
panelVar <- "fy"
panelRows <- NULL
panelCols <- NULL
loessSpan <- 0.75
# Compute observed mass distribution(s) of independent var
if(is.null(panelVar)) {
ng <- sum(cpdf[,"n"])
gdat <- aggregate(cpdf[,"n"], by=list(cpdf[,indepVar]), function(n) sum(n)/ng)
colnames(gdat) <- c(indepVar, "p")
} else {
gdat <- aggregate(cpdf[,"n"], by=list(cpdf[,indepVar], cpdf[,panelVar]), sum)
colnames(gdat) <- c(indepVar, panelVar, "n")
ng <- aggregate(gdat[,"n"], by=list(gdat[,panelVar]), sum)
colnames(ng) <- c(panelVar, "n")
gdat <- merge(gdat, ng, by.x=panelVar, by.y=panelVar)
gdat <- data.frame(gdat[,c(panelVar, indepVar, "n.x")], "p"=gdat[,"n.x"]/gdat[,"n.y"])
colnames(gdat) <- c(panelVar, indepVar, "n", "p")
}
# Initialize plot
g <- ggplot()
# Add smooth probability mass plot
g <- g + geom_smooth(data=gdat, aes_string(x=indepVar, y="p"),
method="loess", se=F, span=loessSpan, fullrange=T, color="Black", size=0.6)
# Facet, if requested
if(!is.null(panelVar))
g <- g + facet_wrap(panelVar, nrow=panelRows, ncol=panelCols,
labeller=as_labeller(function(x) paste(panelVar, " = ", x, sep="")))
# Configure axis labels and apply theme
g <- g + ggTheme +
labs(x=paste("\n", indepVar, sep=""), y="P<sub>mass</sub>\n")
# Render
print(g)
|
# This tests the behaviour of findKNN's C++ code for detecting ties.
# library(kmknn); library(testthat); source("test-ties.R")
test_that("ties within the set of nearest neighbors triggers errors", {
# '1' and '3' are tied when compared to '2'.
coordinates <- rbind(1,2,3)
expect_warning(out <- findKNN(coordinates, k=1), "tied distances detected")
# Ties are now occurring _within_ the set.
coordinates <- rbind(1,2,3)
expect_warning(out <- findKNN(coordinates, k=2), "tied distances detected")
coordinates <- rbind(0.1, 1, 2, 3, 5.2)
expect_warning(out <- findKNN(coordinates, k=3), "tied distances detected")
# No warning when there are no ties.
coordinates <- rbind(1,2,4)
expect_warning(out <- findKNN(coordinates, k=1), NA)
expect_warning(out <- findKNN(coordinates, k=2), NA)
coordinates <- rbind(0.1, 1, 2, 4, 4.1)
expect_warning(out <- findKNN(coordinates, k=3), NA)
})
MOCKUP <- function(coordinates) {
info <- lapply(seq_len(nrow(coordinates)), FUN=function(i) {
list(i-1L, 0)
})
list(data=t(coordinates),
clusters=list(centers=t(coordinates), info=info),
order=seq_len(nrow(coordinates))
)
}
test_that("ties are correctly detected across clusters", {
# Testing k=1 to check if 'last_distance' is correctly updated across clusters.
# The second row should be equidistant to the two other clusters,
# so the use of TOLERANCE in the convex_holder::search_nn() C++ code should trigger.
coordinates <- matrix(1:3, nrow=3, ncol=5)
pre <- MOCKUP(coordinates)
expect_warning(out <- findKNN(precomputed=pre, k=1), "tied distances detected")
# Tie breakage below the threshold of numerical precision.
# Note that 1 becomes _closer_ to 2, while 3 moves further away.
coordinates2 <- coordinates
coordinates2[,1] <- coordinates2[,1] + c(1e-10, 0, 1e-10)
pre2 <- MOCKUP(coordinates2)
expect_warning(out <- findKNN(precomputed=pre2, k=1), "tied distances detected")
# Tie breakage above the threshold of numerical precision.
coordinates2 <- coordinates
coordinates2[,1] <- coordinates2[,1] + c(1e-5, 0, 1e-5)
pre2 <- MOCKUP(coordinates2)
expect_warning(out <- findKNN(precomputed=pre2, k=2), NA)
})
test_that("ties are correctly detected at zero distance", {
coordinates <- matrix(0, 10, 5)
expect_warning(out <- findKNN(coordinates, k=2), "tied distances detected")
# Breaking the ties.
coordinates[,1] <- runif(nrow(coordinates), 0, 1e-10)
expect_warning(out <- findKNN(coordinates, k=2), NA)
# Testing with a less trivial example.
coordinates2 <- matrix(1:5, 10, 5, byrow=TRUE)
expect_warning(out <- findKNN(coordinates2, k=2), "tied distances detected")
# Checking that zero distances across clusters are handled correctly.
pre <- MOCKUP(coordinates2)
expect_warning(out <- findKNN(precomputed=pre, k=1), "tied distances detected")
})
| /tests/testthat/test-ties.R | no_license | avanindra/kmknn | R | false | false | 2,971 | r | # This tests the behaviour of findKNN's C++ code for detecting ties.
# library(kmknn); library(testthat); source("test-ties.R")
test_that("ties within the set of nearest neighbors triggers errors", {
# '1' and '3' are tied when compared to '2'.
coordinates <- rbind(1,2,3)
expect_warning(out <- findKNN(coordinates, k=1), "tied distances detected")
# Ties are now occurring _within_ the set.
coordinates <- rbind(1,2,3)
expect_warning(out <- findKNN(coordinates, k=2), "tied distances detected")
coordinates <- rbind(0.1, 1, 2, 3, 5.2)
expect_warning(out <- findKNN(coordinates, k=3), "tied distances detected")
# No warning when there are no ties.
coordinates <- rbind(1,2,4)
expect_warning(out <- findKNN(coordinates, k=1), NA)
expect_warning(out <- findKNN(coordinates, k=2), NA)
coordinates <- rbind(0.1, 1, 2, 4, 4.1)
expect_warning(out <- findKNN(coordinates, k=3), NA)
})
MOCKUP <- function(coordinates) {
info <- lapply(seq_len(nrow(coordinates)), FUN=function(i) {
list(i-1L, 0)
})
list(data=t(coordinates),
clusters=list(centers=t(coordinates), info=info),
order=seq_len(nrow(coordinates))
)
}
test_that("ties are correctly detected across clusters", {
# Testing k=1 to check if 'last_distance' is correctly updated across clusters.
# The second row should be equidistant to the two other clusters,
# so the use of TOLERANCE in the convex_holder::search_nn() C++ code should trigger.
coordinates <- matrix(1:3, nrow=3, ncol=5)
pre <- MOCKUP(coordinates)
expect_warning(out <- findKNN(precomputed=pre, k=1), "tied distances detected")
# Tie breakage below the threshold of numerical precision.
# Note that 1 becomes _closer_ to 2, while 3 moves further away.
coordinates2 <- coordinates
coordinates2[,1] <- coordinates2[,1] + c(1e-10, 0, 1e-10)
pre2 <- MOCKUP(coordinates2)
expect_warning(out <- findKNN(precomputed=pre2, k=1), "tied distances detected")
# Tie breakage above the threshold of numerical precision.
coordinates2 <- coordinates
coordinates2[,1] <- coordinates2[,1] + c(1e-5, 0, 1e-5)
pre2 <- MOCKUP(coordinates2)
expect_warning(out <- findKNN(precomputed=pre2, k=2), NA)
})
test_that("ties are correctly detected at zero distance", {
coordinates <- matrix(0, 10, 5)
expect_warning(out <- findKNN(coordinates, k=2), "tied distances detected")
# Breaking the ties.
coordinates[,1] <- runif(nrow(coordinates), 0, 1e-10)
expect_warning(out <- findKNN(coordinates, k=2), NA)
# Testing with a less trivial example.
coordinates2 <- matrix(1:5, 10, 5, byrow=TRUE)
expect_warning(out <- findKNN(coordinates2, k=2), "tied distances detected")
# Checking that zero distances across clusters are handled correctly.
pre <- MOCKUP(coordinates2)
expect_warning(out <- findKNN(precomputed=pre, k=1), "tied distances detected")
})
|
ataFile <- "/Users/vimalavenugopalan-mark/Documents/DataAnalysis/exploring data/household_power_consumption.txt"
data <- read.table(dataFile, header=TRUE, sep=";", stringsAsFactors=FALSE, dec=".")
subSetData <- data[data$Date %in% c("1/2/2007","2/2/2007") ,]
datetime <- strptime(paste(subSetData$Date, subSetData$Time, sep=" "), "%d/%m/%Y %H:%M:%S")
datetime <- strptime(paste(subSetData$Date, subSetData$Time, sep=" "), "%d/%m/%Y %H:%M:%S")
globalActivePower <- as.numeric(subSetData$Global_active_power)
globalReactivePower <- as.numeric(subSetData$Global_reactive_power)
voltage <- as.numeric(subSetData$Voltage)
subMetering1 <- as.numeric(subSetData$Sub_metering_1)
subMetering2 <- as.numeric(subSetData$Sub_metering_2)
subMetering3 <- as.numeric(subSetData$Sub_metering_3)
png("plot4.png", width=480, height=480)
par(mfrow = c(2, 2))
plot(datetime, globalActivePower, type="l", xlab="", ylab="Global Active Power", cex=0.2)
plot(datetime, voltage, type="l", xlab="datetime", ylab="Voltage")
plot(datetime, subMetering1, type="l", ylab="Energy Submetering", xlab="")
lines(datetime, subMetering2, type="l", col="red")
lines(datetime, subMetering3, type="l", col="blue")
legend("topright", c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), lty=, lwd=2.5, col=c("black", "red", "blue"), bty="o")
plot(datetime, globalReactivePower, type="l", xlab="datetime", ylab="Global_reactive_power")
dev.off()
| /Plot4.R | no_license | vvenugopalan/ExploratoryDataAnalysisProject1 | R | false | false | 1,461 | r | ataFile <- "/Users/vimalavenugopalan-mark/Documents/DataAnalysis/exploring data/household_power_consumption.txt"
data <- read.table(dataFile, header=TRUE, sep=";", stringsAsFactors=FALSE, dec=".")
subSetData <- data[data$Date %in% c("1/2/2007","2/2/2007") ,]
datetime <- strptime(paste(subSetData$Date, subSetData$Time, sep=" "), "%d/%m/%Y %H:%M:%S")
datetime <- strptime(paste(subSetData$Date, subSetData$Time, sep=" "), "%d/%m/%Y %H:%M:%S")
globalActivePower <- as.numeric(subSetData$Global_active_power)
globalReactivePower <- as.numeric(subSetData$Global_reactive_power)
voltage <- as.numeric(subSetData$Voltage)
subMetering1 <- as.numeric(subSetData$Sub_metering_1)
subMetering2 <- as.numeric(subSetData$Sub_metering_2)
subMetering3 <- as.numeric(subSetData$Sub_metering_3)
png("plot4.png", width=480, height=480)
par(mfrow = c(2, 2))
plot(datetime, globalActivePower, type="l", xlab="", ylab="Global Active Power", cex=0.2)
plot(datetime, voltage, type="l", xlab="datetime", ylab="Voltage")
plot(datetime, subMetering1, type="l", ylab="Energy Submetering", xlab="")
lines(datetime, subMetering2, type="l", col="red")
lines(datetime, subMetering3, type="l", col="blue")
legend("topright", c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), lty=, lwd=2.5, col=c("black", "red", "blue"), bty="o")
plot(datetime, globalReactivePower, type="l", xlab="datetime", ylab="Global_reactive_power")
dev.off()
|
library(ggplot2)
library(plotly)
library(ggthemes)
# Get data
library(gapminder)
df <- gapminder
moz <- df[df$country == 'Mozambique',]
afr <- df[df$country %in%
c('Mozambique',
'Swaziland',
'South Africa',
'Kenya',
'Uganda',
'Somalia'),]
# Old way
plot(moz$year,
moz$lifeExp,
type = 'l',
xlab = 'Year',
ylab = 'Life expectancy',
main = 'Life expectancy in Mozambique')
# New way
g <-
ggplot(data = afr,
aes(x = year,
y = lifeExp,
group = country,
color = country)) +
geom_line() +
theme_fivethirtyeight() +
ggtitle('Life expectancy by country')
ggplotly(g)
| /interactive_plots_example.R | no_license | joebrew/cism_misc | R | false | false | 727 | r | library(ggplot2)
library(plotly)
library(ggthemes)
# Get data
library(gapminder)
df <- gapminder
moz <- df[df$country == 'Mozambique',]
afr <- df[df$country %in%
c('Mozambique',
'Swaziland',
'South Africa',
'Kenya',
'Uganda',
'Somalia'),]
# Old way
plot(moz$year,
moz$lifeExp,
type = 'l',
xlab = 'Year',
ylab = 'Life expectancy',
main = 'Life expectancy in Mozambique')
# New way
g <-
ggplot(data = afr,
aes(x = year,
y = lifeExp,
group = country,
color = country)) +
geom_line() +
theme_fivethirtyeight() +
ggtitle('Life expectancy by country')
ggplotly(g)
|
testlist <- list(x = NA_integer_, y = c(-1L, -58666L, -52736L, 30464L))
result <- do.call(diffrprojects:::dist_mat_absolute,testlist)
str(result) | /diffrprojects/inst/testfiles/dist_mat_absolute/libFuzzer_dist_mat_absolute/dist_mat_absolute_valgrind_files/1609962663-test.R | no_license | akhikolla/updated-only-Issues | R | false | false | 145 | r | testlist <- list(x = NA_integer_, y = c(-1L, -58666L, -52736L, 30464L))
result <- do.call(diffrprojects:::dist_mat_absolute,testlist)
str(result) |
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/ResampleResult_operators.R
\name{getRRPredictions}
\alias{getRRPredictions}
\title{Get predictions from resample results.}
\usage{
getRRPredictions(res)
}
\arguments{
\item{res}{[\code{ResampleResult}]\cr
The result of \code{\link{resample}} run with \code{keep.pred = TRUE}.}
}
\value{
[\code{ResamplePrediction}].
}
\description{
Very simple getter.
}
\seealso{
Other resample: \code{\link{ResampleDesc}},
\code{\link{makeResampleDesc}};
\code{\link{ResampleInstance}},
\code{\link{makeResampleInstance}};
\code{\link{ResamplePrediction}};
\code{\link{ResampleResult}};
\code{\link{bootstrapB632}},
\code{\link{bootstrapB632plus}},
\code{\link{bootstrapOOB}}, \code{\link{crossval}},
\code{\link{holdout}}, \code{\link{repcv}},
\code{\link{resample}}, \code{\link{subsample}}
}
| /man/getRRPredictions.Rd | no_license | gragusa/mlr | R | false | false | 887 | rd | % Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/ResampleResult_operators.R
\name{getRRPredictions}
\alias{getRRPredictions}
\title{Get predictions from resample results.}
\usage{
getRRPredictions(res)
}
\arguments{
\item{res}{[\code{ResampleResult}]\cr
The result of \code{\link{resample}} run with \code{keep.pred = TRUE}.}
}
\value{
[\code{ResamplePrediction}].
}
\description{
Very simple getter.
}
\seealso{
Other resample: \code{\link{ResampleDesc}},
\code{\link{makeResampleDesc}};
\code{\link{ResampleInstance}},
\code{\link{makeResampleInstance}};
\code{\link{ResamplePrediction}};
\code{\link{ResampleResult}};
\code{\link{bootstrapB632}},
\code{\link{bootstrapB632plus}},
\code{\link{bootstrapOOB}}, \code{\link{crossval}},
\code{\link{holdout}}, \code{\link{repcv}},
\code{\link{resample}}, \code{\link{subsample}}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/software_maintainability.R
\name{software_maintainability}
\alias{software_maintainability}
\title{Calculate software maintainability}
\usage{
software_maintainability(file)
}
\arguments{
\item{file}{The file to calculate for - a file-path.}
}
\value{
The maintainability index (a number).
}
\description{
Calculate the software maintainability index for a given file
}
| /man/software_maintainability.Rd | permissive | JSzitas/qa | R | false | true | 448 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/software_maintainability.R
\name{software_maintainability}
\alias{software_maintainability}
\title{Calculate software maintainability}
\usage{
software_maintainability(file)
}
\arguments{
\item{file}{The file to calculate for - a file-path.}
}
\value{
The maintainability index (a number).
}
\description{
Calculate the software maintainability index for a given file
}
|
/Experiment 3/InitialHelikopter.R | no_license | DanuschaGrosse-Hering/GdV-Experiment-1 | R | false | false | 1,663 | r |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.