content large_stringlengths 0 6.46M | path large_stringlengths 3 331 | license_type large_stringclasses 2 values | repo_name large_stringlengths 5 125 | language large_stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.46M | extension large_stringclasses 75 values | text stringlengths 0 6.46M |
|---|---|---|---|---|---|---|---|---|---|
### R code from vignette source 'PlotsAndStats.Rnw'
###################################################
### code chunk number 1: PlotsAndStats.Rnw:33-44
###################################################
library(cheddar)
# Makes copy-paste much less painful
options(continue=' ')
options(width=90)
options(prompt='> ')
options(SweaveHooks = list(fig=function() par(mgp=c(2.5,1,0),
mar=c(4,4,2,1),
oma=c(0,0,1,0),
cex.main=0.8)))
###################################################
### code chunk number 2: PlotsAndStats.Rnw:122-124
###################################################
getOption("SweaveHooks")[["fig"]]()
data(TL84)
PlotNPS(TL84, 'Log10M', 'Log10N')
###################################################
### code chunk number 3: PlotsAndStats.Rnw:142-143
###################################################
getOption("SweaveHooks")[["fig"]]()
PlotNPS(TL84, 'Log10M', 'Log10N', show.web=FALSE, highlight.nodes=NULL)
###################################################
### code chunk number 4: PlotsAndStats.Rnw:150-151
###################################################
getOption("SweaveHooks")[["fig"]]()
PlotNPS(TL84, 'Log10M', 'Log10N', show.nodes.as='labels', show.web=FALSE)
###################################################
### code chunk number 5: PlotsAndStats.Rnw:158-160
###################################################
getOption("SweaveHooks")[["fig"]]()
PlotNPS(TL84, 'Log10M', 'Log10N', show.nodes.as='labels', show.web=FALSE,
node.labels='node', cex=0.5)
###################################################
### code chunk number 6: PlotsAndStats.Rnw:166-169
###################################################
getOption("SweaveHooks")[["fig"]]()
lots.of.letters <- c(letters, LETTERS, paste(LETTERS,letters,sep=''))
PlotNPS(TL84, 'Log10M', 'Log10N', show.nodes.as='labels', show.web=FALSE,
node.labels=lots.of.letters[1:NumberOfNodes(TL84)])
###################################################
### code chunk number 7: PlotsAndStats.Rnw:174-175
###################################################
getOption("SweaveHooks")[["fig"]]()
PlotNPS(TL84, 'Log10M', 'Log10N', show.nodes.as='both', show.web=FALSE, cex=2)
###################################################
### code chunk number 8: PlotsAndStats.Rnw:185-187
###################################################
getOption("SweaveHooks")[["fig"]]()
PlotNPS(TL84, 'Log10M', 'Log10N', xlab=Log10MLabel(TL84),
ylab=Log10NLabel(TL84))
###################################################
### code chunk number 9: PlotsAndStats.Rnw:197-206
###################################################
getOption("SweaveHooks")[["fig"]]()
par(mfrow=c(1,3))
PlotNPS(TL84, 'Log10M', 'OutDegree', show.web=FALSE)
abline(lm(OutDegree(TL84) ~ Log10M(TL84)))
PlotNPS(TL84, 'Log10M', 'InDegree', show.web=FALSE)
abline(lm(InDegree(TL84) ~ Log10M(TL84)))
PlotNPS(TL84, 'Log10M', 'Degree', show.web=FALSE)
abline(lm(Degree(TL84) ~ Log10M(TL84)))
###################################################
### code chunk number 10: PlotsAndStats.Rnw:218-219
###################################################
getOption("SweaveHooks")[["fig"]]()
PlotNPS(TL84, 'Log10M', 'PreyAveragedTrophicLevel')
###################################################
### code chunk number 11: PlotsAndStats.Rnw:226-227
###################################################
getOption("SweaveHooks")[["fig"]]()
PlotNPS(TL84, 'Log10M', 'ChainAveragedTrophicLevel')
###################################################
### code chunk number 12: PlotsAndStats.Rnw:242-247
###################################################
getOption("SweaveHooks")[["fig"]]()
par(mfrow=c(1,2))
PlotNPS(TL84, 'Log10M', 'PreyAveragedTrophicLevel', ylim=c(1, 6),
main='Prey-averaged')
PlotNPS(TL84, 'Log10M', 'ChainAveragedTrophicLevel', ylim=c(1, 6),
main='Chain-averaged')
###################################################
### code chunk number 13: PlotsAndStats.Rnw:261-266
###################################################
getOption("SweaveHooks")[["fig"]]()
par(mfrow=c(2,2))
PlotMvN(TL84)
PlotNvM(TL84)
PlotBvM(TL84)
PlotMvB(TL84)
###################################################
### code chunk number 14: PlotsAndStats.Rnw:279-280
###################################################
getOption("SweaveHooks")[["fig"]]()
PlotRankNPS(TL84, 'Log10N')
###################################################
### code chunk number 15: PlotsAndStats.Rnw:285-286
###################################################
getOption("SweaveHooks")[["fig"]]()
PlotRankNPS(TL84, 'Log10N', rank.by='M')
###################################################
### code chunk number 16: PlotsAndStats.Rnw:294-295
###################################################
getOption("SweaveHooks")[["fig"]]()
PlotRankNPS(TL84, 'Log10N', rank.by='M', show.web=TRUE)
###################################################
### code chunk number 17: PlotsAndStats.Rnw:300-301
###################################################
getOption("SweaveHooks")[["fig"]]()
PlotRankNPS(TL84, 'PreyAveragedTrophicLevel', rank.by='M')
###################################################
### code chunk number 18: PlotsAndStats.Rnw:309-310
###################################################
getOption("SweaveHooks")[["fig"]]()
PlotRankNPS(TL84, 'PreyAveragedTrophicLevel', rank.by='M', log10.rank=TRUE)
###################################################
### code chunk number 19: PlotsAndStats.Rnw:320-324
###################################################
getOption("SweaveHooks")[["fig"]]()
par(mfrow=c(1,3))
PlotMvRankM(TL84)
PlotNvRankN(TL84)
PlotBvRankB(TL84)
###################################################
### code chunk number 20: PlotsAndStats.Rnw:338-339
###################################################
getOption("SweaveHooks")[["fig"]]()
PlotNPSDistribution(TL84, 'Log10M')
###################################################
### code chunk number 21: PlotsAndStats.Rnw:345-346
###################################################
getOption("SweaveHooks")[["fig"]]()
PlotNPSDistribution(TL84, 'Log10M', density.args=list(bw=3))
###################################################
### code chunk number 22: PlotsAndStats.Rnw:366-367
###################################################
getOption("SweaveHooks")[["fig"]]()
PlotNvM(TL84, col=1, pch=19, highlight.nodes=NULL)
###################################################
### code chunk number 23: PlotsAndStats.Rnw:374-375
###################################################
getOption("SweaveHooks")[["fig"]]()
PlotNvM(TL84, col=1:56, pch=19, highlight.nodes=NULL)
###################################################
### code chunk number 24: PlotsAndStats.Rnw:384-385
###################################################
getOption("SweaveHooks")[["fig"]]()
PlotNvM(TL84, colour.by='resolved.to', pch=19, highlight.nodes=NULL)
###################################################
### code chunk number 25: PlotsAndStats.Rnw:393-397
###################################################
getOption("SweaveHooks")[["fig"]]()
colour.spec <- c(Species='purple3', Genus='green3', 'red3')
PlotNvM(TL84, colour.by='resolved.to', colour.spec=colour.spec, pch=19,
highlight.nodes=NULL)
legend("topright", legend=names(colour.spec), pch=19, col=colour.spec)
###################################################
### code chunk number 26: PlotsAndStats.Rnw:408-420
###################################################
getOption("SweaveHooks")[["fig"]]()
symbol.spec = c(Bacteria=21, Plantae=22, Chromista=23,
Protozoa=24, Animalia=25, 19)
colour.spec = c(Bacteria='purple3', Plantae='green3',
Chromista='blue3', Protozoa='orange3',
Animalia='red3', 'black')
PlotNvM(TL84,
symbol.by='kingdom', symbol.spec=symbol.spec,
bg.by='kingdom', bg.spec=colour.spec,
colour.by='kingdom', colour.spec=colour.spec,
highlight.nodes=NULL)
legend("topright", legend=names(colour.spec), pch=symbol.spec,
col=colour.spec, pt.bg=colour.spec)
###################################################
### code chunk number 27: PlotsAndStats.Rnw:432-446
###################################################
getOption("SweaveHooks")[["fig"]]()
symbol.spec = c(Bacteria=21, Plantae=22, Chromista=23,
Protozoa=24, Animalia=25, 19)
colour.spec = c(Bacteria='purple3', Plantae='green3',
Chromista='blue3', Protozoa='orange3',
Animalia='red3', 'black')
PlotNvM(TL84,
symbol.by='kingdom', symbol.spec=symbol.spec,
bg.by='kingdom', bg.spec=colour.spec,
colour.by='kingdom', colour.spec=colour.spec,
highlight.nodes=NULL, show.web=FALSE)
legend("topright", legend=names(colour.spec), pch=symbol.spec,
col=colour.spec, pt.bg=colour.spec)
models <- NvMLinearRegressions(TL84, class='kingdom')
colours <- PlotLinearModels(models, colour.spec=colour.spec)
###################################################
### code chunk number 28: PlotsAndStats.Rnw:457-458
###################################################
getOption("SweaveHooks")[["fig"]]()
PlotNvM(TL84, pch=NA, highlight.nodes=NULL)
###################################################
### code chunk number 29: PlotsAndStats.Rnw:471-480
###################################################
getOption("SweaveHooks")[["fig"]]()
par(mfrow=c(1,2))
# Don't add ticks
options(cheddarTopAndRightTicks=FALSE)
PlotNvM(TL84)
# Add ticks
options(cheddarTopAndRightTicks=TRUE)
PlotNvM(TL84)
###################################################
### code chunk number 30: PlotsAndStats.Rnw:496-497
###################################################
getOption("SweaveHooks")[["fig"]]()
PlotNvM(TL84, highlight.nodes=Cannibals)
###################################################
### code chunk number 31: PlotsAndStats.Rnw:503-504
###################################################
getOption("SweaveHooks")[["fig"]]()
PlotNvM(TL84, highlight.nodes=IsolatedNodes)
###################################################
### code chunk number 32: PlotsAndStats.Rnw:510-511
###################################################
getOption("SweaveHooks")[["fig"]]()
PlotNvM(TL84, highlight.nodes='Chaoborus punctipennis')
###################################################
### code chunk number 33: PlotsAndStats.Rnw:524-525
###################################################
getOption("SweaveHooks")[["fig"]]()
PlotNvM(TL84, highlight.links=ResourceLargerThanConsumer)
###################################################
### code chunk number 34: PlotsAndStats.Rnw:531-533
###################################################
getOption("SweaveHooks")[["fig"]]()
PlotNvM(TL84, highlight.nodes='Chaoborus punctipennis',
highlight.links=TrophicLinksForNodes(TL84, 'Chaoborus punctipennis'))
###################################################
### code chunk number 35: PlotsAndStats.Rnw:554-558
###################################################
getOption("SweaveHooks")[["fig"]]()
data(YthanEstuary)
par(mfrow=c(1,2))
PlotNvM(YthanEstuary)
PlotNvM(YthanEstuary, show.na=TRUE)
###################################################
### code chunk number 36: PlotsAndStats.Rnw:568-569
###################################################
getOption("SweaveHooks")[["fig"]]()
PlotNvM(YthanEstuary, xlim=c(-10, 4), ylim=c(-10, 13), show.na=TRUE)
###################################################
### code chunk number 37: PlotsAndStats.Rnw:581-604
###################################################
getOption("SweaveHooks")[["fig"]]()
par(mfrow=c(2,2))
np <- NPS(TL84)
np[1,'M'] <- NA
PlotNvM(Community(nodes=np, trophic.links=TLPS(TL84), properties=CPS(TL84)),
main='Node 1 M=NA', show.nodes.as='both', cex=2, show.na=TRUE)
np <- NPS(TL84)
np[1,'N'] <- NA
PlotNvM(Community(nodes=np, trophic.links=TLPS(TL84), properties=CPS(TL84)),
main='Node 1 N=NA', show.nodes.as='both', cex=2, show.na=TRUE)
np <- NPS(TL84)
np[1,'M'] <- NA
np[1,'N'] <- NA
PlotNvM(Community(nodes=np, trophic.links=TLPS(TL84), properties=CPS(TL84)),
main='Node 1 M=NA and N=NA', show.nodes.as='both', cex=2, show.na=TRUE)
np <- NPS(TL84)
np[c(10, 20, 30, 40),'M'] <- NA
np[c(10, 20, 30, 40),'N'] <- NA
PlotNvM(Community(nodes=np, trophic.links=TLPS(TL84), properties=CPS(TL84)),
main='Nodes 10, 20, 30 and 40 M=NA and N=NA', show.nodes.as='both',
cex=2, show.na=TRUE)
###################################################
### code chunk number 38: PlotsAndStats.Rnw:616-619
###################################################
getOption("SweaveHooks")[["fig"]]()
par(mfrow=c(1,2))
PlotMvRankM(YthanEstuary)
PlotMvRankM(YthanEstuary, show.na=TRUE)
###################################################
### code chunk number 39: PlotsAndStats.Rnw:643-644
###################################################
getOption("SweaveHooks")[["fig"]]()
PlotTLPS(TL84, 'resource.Log10M', 'consumer.Log10M')
###################################################
### code chunk number 40: PlotsAndStats.Rnw:653-654
###################################################
getOption("SweaveHooks")[["fig"]]()
PlotTLPS(TL84, 'resource.Log10M', 'consumer.Log10M', axes.limits.equal=TRUE)
###################################################
### code chunk number 41: PlotsAndStats.Rnw:675-680
###################################################
getOption("SweaveHooks")[["fig"]]()
par(mfrow=c(2,2))
PlotPredationMatrix(TL84)
PlotMRvMC(TL84)
PlotNCvNR(TL84)
PlotBRvBC(TL84)
###################################################
### code chunk number 42: PlotsAndStats.Rnw:694-695
###################################################
getOption("SweaveHooks")[["fig"]]()
PlotMRvMC(TL84)
###################################################
### code chunk number 43: PlotsAndStats.Rnw:703-705
###################################################
getOption("SweaveHooks")[["fig"]]()
PlotMRvMC(TL84, colour.by='consumer.category', bg.by='consumer.category',
symbol.by='consumer.category')
###################################################
### code chunk number 44: PlotsAndStats.Rnw:716-719
###################################################
SumMByClass(TL84)
SumNByClass(TL84)
SumBiomassByClass(TL84)
###################################################
### code chunk number 45: PlotsAndStats.Rnw:723-726
###################################################
SumMByClass(TL84, 'kingdom')
SumNByClass(TL84, 'kingdom')
SumBiomassByClass(TL84, 'kingdom')
###################################################
### code chunk number 46: PlotsAndStats.Rnw:732-734
###################################################
SumBiomassByClass(TL84)
ApplyByClass(TL84, 'Biomass', 'category', sum)
###################################################
### code chunk number 47: PlotsAndStats.Rnw:747-749
###################################################
models <- NvMLinearRegressions(TL84)
names(models)
###################################################
### code chunk number 48: PlotsAndStats.Rnw:752-753
###################################################
sapply(models, 'coef')
###################################################
### code chunk number 49: PlotsAndStats.Rnw:760-762
###################################################
models <- NvMLinearRegressions(TL84, class='phylum')
names(models)
###################################################
### code chunk number 50: PlotsAndStats.Rnw:770-771
###################################################
sapply(models, is.null)
###################################################
### code chunk number 51: PlotsAndStats.Rnw:777-780
###################################################
data(BroadstoneStream)
models <- NvMLinearRegressions(BroadstoneStream)
sapply(models, is.null)
###################################################
### code chunk number 52: PlotsAndStats.Rnw:784-787
###################################################
NvMSlope(TL84)
NvMIntercept(TL84)
NvMSlopeAndIntercept(TL84)
###################################################
### code chunk number 53: PlotsAndStats.Rnw:790-793
###################################################
NvMSlopeByClass(TL84)
NvMInterceptByClass(TL84)
NvMSlopeAndInterceptByClass(TL84)
###################################################
### code chunk number 54: PlotsAndStats.Rnw:796-799
###################################################
NvMSlopeByClass(TL84, class='kingdom')
NvMInterceptByClass(TL84, class='kingdom')
NvMSlopeAndInterceptByClass(TL84, class='kingdom')
###################################################
### code chunk number 55: PlotsAndStats.Rnw:835-842
###################################################
getOption("SweaveHooks")[["fig"]]()
data(TL86)
par(mfrow=c(1,2))
PlotMvN(TL84, show.nodes.as='both', cex=2, xlim=c(-2, 10), ylim=c(-14, 0),
highlight.nodes=NULL, highlight.links=NULL, main='')
PlotMvN(TL86, show.nodes.as='both', cex=2, xlim=c(-2, 10), ylim=c(-14, 0),
highlight.nodes=NULL, highlight.links=NULL, main='')
title(main='Jonsson et al. (2005) AER, Fig. 3 (p 30)', outer=TRUE)
###################################################
### code chunk number 56: PlotsAndStats.Rnw:851-857
###################################################
getOption("SweaveHooks")[["fig"]]()
par(mfrow=c(1,2))
PlotMCvMR(TL84, xlim=c(-14, 0), ylim=c(-14, 0), main='')
abline(a=0, b=1, lty=2)
PlotMCvMR(TL86, xlim=c(-14, 0), ylim=c(-14, 0), main='')
abline(a=0, b=1, lty=2)
title(main='Jonsson et al. (2005) AER, Fig. 4 (p 33)', outer=TRUE)
###################################################
### code chunk number 57: PlotsAndStats.Rnw:866-872
###################################################
getOption("SweaveHooks")[["fig"]]()
par(mfrow=c(2,2))
PlotNvM(TL84, xlim=c(-14, 0), ylim=c(-2,10), show.web=FALSE, main='')
PlotNvM(TL86, xlim=c(-14, 0), ylim=c(-2,10), show.web=FALSE, main='')
PlotBvM(TL84, xlim=c(-14, 0), ylim=c(-8,2), show.web=FALSE, main='')
PlotBvM(TL86, xlim=c(-14, 0), ylim=c(-8,2), show.web=FALSE, main='')
title(main='Jonsson et al. (2005) AER, Fig. 5 (p 37)', outer=TRUE)
###################################################
### code chunk number 58: PlotsAndStats.Rnw:881-891
###################################################
getOption("SweaveHooks")[["fig"]]()
par(mfrow=c(2,2))
PlotNCvNR(TL84, xlim=c(0, 10), ylim=c(-2,10), main='')
abline(a=0, b=1, lty=2)
PlotNCvNR(TL86, xlim=c(0, 10), ylim=c(-2,10), main='')
abline(a=0, b=1, lty=2)
PlotBCvBR(TL84, xlim=c(-8, -2), ylim=c(-8, -2), main='')
abline(a=0, b=1, lty=2)
PlotBCvBR(TL86, xlim=c(-8, -2), ylim=c(-8, -2), main='')
abline(a=0, b=1, lty=2)
title(main='Jonsson et al. (2005) AER, Fig. 7 (p 47)', outer=TRUE)
###################################################
### code chunk number 59: PlotsAndStats.Rnw:900-910
###################################################
getOption("SweaveHooks")[["fig"]]()
par(mfrow=c(2,2))
TL84.no.iso <- RemoveIsolatedNodes(TL84)
TL86.no.iso <- RemoveIsolatedNodes(TL86)
tl84.levels <- floor(TrophicHeight(TL84.no.iso))
tl86.levels <- floor(TrophicHeight(TL86.no.iso))
PlotNPyramid(TL84.no.iso, level=tl84.levels, main='', ylab='Trophic height')
PlotNPyramid(TL86.no.iso, level=tl86.levels, main='')
PlotBPyramid(TL84.no.iso, level=tl84.levels, main='', ylab='Trophic height')
PlotBPyramid(TL86.no.iso, level=tl86.levels, main='')
title(main='Jonsson et al. (2005) AER, Fig. 8 (p 49)', outer=TRUE)
###################################################
### code chunk number 60: PlotsAndStats.Rnw:919-925
###################################################
getOption("SweaveHooks")[["fig"]]()
par(mfrow=c(2,2))
PlotNvRankN(TL84, xlim=c(0,60), ylim=c(-2, 10), main='')
PlotNvRankN(TL86, xlim=c(0,60), ylim=c(-2, 10), main='')
PlotBvRankB(TL84, xlim=c(0,60), ylim=c(-8, -2), main='')
PlotBvRankB(TL86, xlim=c(0,60), ylim=c(-8, -2), main='')
title(main='Jonsson et al. (2005) AER, Fig. 10 (p 57)', outer=TRUE)
###################################################
### code chunk number 61: PlotsAndStats.Rnw:934-946
###################################################
getOption("SweaveHooks")[["fig"]]()
par(mfrow=c(2,2))
PlotRankNPS(TL84, property='Log10N', rank.by='M', log10.rank=TRUE,
xlim=c(0,2), ylim=c(-2, 10), ylab=Log10NLabel(TL84), main='')
PlotRankNPS(TL86, property='Log10N', rank.by='M', log10.rank=TRUE,
xlim=c(0,2), ylim=c(-2, 10), ylab=Log10NLabel(TL84), main='')
PlotRankNPS(TL84, property='Log10Biomass', rank.by='M',
log10.rank=TRUE, xlim=c(0,2), ylim=c(-8, -2),
ylab=Log10BLabel(TL84), main='')
PlotRankNPS(TL86, property='Log10Biomass', rank.by='M',
log10.rank=TRUE, xlim=c(0,2), ylim=c(-8, -2),
ylab=Log10BLabel(TL84), main='')
title(main='Jonsson et al. (2005) AER, Fig. 11 (p 60)', outer=TRUE)
###################################################
### code chunk number 62: PlotsAndStats.Rnw:957-981
###################################################
getOption("SweaveHooks")[["fig"]]()
PlotCommunityVCommunity <- function(a, b, property, xlim=NULL, ylim=NULL, ...)
{
a.nodes <- NP(a, 'node')
b.nodes <- NP(b, 'node')
all.nodes <- union(a.nodes, b.nodes)
a.values <- NPS(a, property)[,property]
names(a.values) <- a.nodes
b.values <- NPS(b, property)[,property]
names(b.values) <- b.nodes
points <- PlaceMissingPoints(a.values[all.nodes], xlim,
b.values[all.nodes], ylim)
plot(points[,1], points[,2], xlim=xlim, ylim=ylim, ...)
abline(a=0, b=1, lty=2)
}
par(mfrow=c(1,2))
PlotCommunityVCommunity(TL84, TL86, 'Log10N', xlim=c(-2,10), ylim=c(-2,10),
xlab=~log[10]~(N~of~84), ylab=~log[10]~(N~of~86),pch=19)
PlotCommunityVCommunity(TL84, TL86, 'Log10Biomass',
xlim=c(-8,-2), ylim=c(-8,-2),
xlab=~log[10]~(B~of~84), ylab=~log[10]~(B~of~86),pch=19)
title(main='Jonsson et al. (2005) AER, Fig. 12 (p 61)', outer=TRUE)
###################################################
### code chunk number 63: PlotsAndStats.Rnw:994-1010
###################################################
getOption("SweaveHooks")[["fig"]]()
data(pHWebs)
par(mfrow=c(2,2))
for(community in pHWebs[1:2])
{
PlotNvM(community, xlim=c(-15, 10), ylim=c(-5,15), main='',
highlight.nodes=NULL)
text(-15, 13, with(CPS(community), paste(title, ', pH ', pH, sep='')),
adj=0, cex=1.5)
tlps <- TLPS(community, node.properties='M')
tlps <- tlps[!is.na(tlps$resource.M) & !is.na(tlps$consumer.M),]
interaction.strength <- log10( (tlps$consumer.M / tlps$resource.M)^0.75 )
plot(density(interaction.strength), xlim=c(-4,14), ylim=c(0,0.6),
main='', xlab=~log[10]((M[C]/M[R])^0.75))
rug(interaction.strength)
}
title(main='Layer et al. (2010) AER, Fig. 6 (p 282)', outer=TRUE)
###################################################
### code chunk number 64: PlotsAndStats.Rnw:1022-1037
###################################################
getOption("SweaveHooks")[["fig"]]()
data(BroadstoneStream)
par(mfrow=c(1,2))
PlotMvN(BroadstoneStream, show.nodes.as='labels', label.cex=0.8,
xlim=c(-2, 4.2), ylim=c(-6,2), main='', show.na=FALSE,
highlight.links=NULL)
abline(a=0, b=-1)
tlps <- TLPS(BroadstoneStream, node.properties='M')
lty <- rep(0, NumberOfTrophicLinks(BroadstoneStream))
lty[tlps$resource.M > tlps$consumer.M] <- 1
PlotMvN(BroadstoneStream, show.nodes.as='labels', label.cex=0.8,
xlim=c(-2, 4.2), ylim=c(-6,2), main='', show.na=FALSE,
highlight.links=NULL, link.lty=lty)
abline(a=0, b=-1)
title(main='Woodward et al. (2005) AER, Fig. 4 (p 108)', outer=TRUE)
###################################################
### code chunk number 65: PlotsAndStats.Rnw:1052-1055
###################################################
collection <- CommunityCollection(list(TL84, TL86, YthanEstuary))
table <- NvMTriTrophicTable(collection)
print(round(table,2))
###################################################
### code chunk number 66: PlotsAndStats.Rnw:1059-1088
###################################################
res <- lapply(list(TL84, TL86, YthanEstuary), function(community)
{
community <- RemoveNodes(community, remove=with(NPS(community), node[is.na(M) | is.na(N)]))
community <- RemoveCannibalisticLinks(community)
community <- RemoveIsolatedNodes(community)
chains <- ThreeNodeChains(community, node.properties='M')
MR <- chains$bottom.M
MI <- chains$intermediate.M
MC <- chains$top.M
lp <- TLPS(community, node.properties='M')
return (c('MR<=MI<=MC'=sum(MR<=MI & MI<=MC),
'MR<=MC<MI'=sum(MR<=MC & MC<MI),
'MI<MR<=MC'=sum(MI<MR & MR<=MC),
'MI<=MC<MR'=sum(MI<=MC & MC<MR),
'MC<MR<MI'=sum(MC<MR & MR<MI),
'MC<MI<MR'=sum(MC<MI & MI<MR),
'All 2-chains'=nrow(chains),
'MR<MC'=sum(lp$resource.M<lp$consumer.M),
'MR=MC'=sum(lp$resource.M==lp$consumer.M),
'MR>MC'=sum(lp$resource.M>lp$consumer.M),
'All links'=nrow(lp)))
})
res <- do.call('cbind', res)
colnames(res) <- c('TL84', 'TL86', 'Ythan Estuary')
print(round(res,2))
###################################################
### code chunk number 67: PlotsAndStats.Rnw:1095-1107
###################################################
getOption("SweaveHooks")[["fig"]]()
par(mfrow=c(3,2))
for(community in list(TL84, TL86, YthanEstuary))
{
community <- RemoveIsolatedNodes(community)
pch <- rep(1, NumberOfNodes(community))
pch[IsIntermediateNode(community)] <- 20
pch[IsTopLevelNode(community)] <- 8
PlotNvM(community, col=1, highlight.nodes=NULL, show.web=FALSE,
main='', pch=pch)
PlotAuppervAlower(community, main='')
}
title(main='Cohen et al. (2009) PNAS, Fig. 1 (p 22336)', outer=TRUE)
###################################################
### code chunk number 68: PlotsAndStats.Rnw:1116-1119
###################################################
data(ChesapeakeBay)
res <- NodeQuantitativeDescriptors(ChesapeakeBay, 'biomass.flow')
print(round(res[1:6,],2))
###################################################
### code chunk number 69: PlotsAndStats.Rnw:1123-1125
###################################################
res <- QuantitativeDescriptors(ChesapeakeBay, 'biomass.flow')
print(round(res,3))
| /inst/doc/PlotsAndStats.R | no_license | cran/cheddar | R | false | false | 26,536 | r | ### R code from vignette source 'PlotsAndStats.Rnw'
###################################################
### code chunk number 1: PlotsAndStats.Rnw:33-44
###################################################
library(cheddar)
# Makes copy-paste much less painful
options(continue=' ')
options(width=90)
options(prompt='> ')
options(SweaveHooks = list(fig=function() par(mgp=c(2.5,1,0),
mar=c(4,4,2,1),
oma=c(0,0,1,0),
cex.main=0.8)))
###################################################
### code chunk number 2: PlotsAndStats.Rnw:122-124
###################################################
getOption("SweaveHooks")[["fig"]]()
data(TL84)
PlotNPS(TL84, 'Log10M', 'Log10N')
###################################################
### code chunk number 3: PlotsAndStats.Rnw:142-143
###################################################
getOption("SweaveHooks")[["fig"]]()
PlotNPS(TL84, 'Log10M', 'Log10N', show.web=FALSE, highlight.nodes=NULL)
###################################################
### code chunk number 4: PlotsAndStats.Rnw:150-151
###################################################
getOption("SweaveHooks")[["fig"]]()
PlotNPS(TL84, 'Log10M', 'Log10N', show.nodes.as='labels', show.web=FALSE)
###################################################
### code chunk number 5: PlotsAndStats.Rnw:158-160
###################################################
getOption("SweaveHooks")[["fig"]]()
PlotNPS(TL84, 'Log10M', 'Log10N', show.nodes.as='labels', show.web=FALSE,
node.labels='node', cex=0.5)
###################################################
### code chunk number 6: PlotsAndStats.Rnw:166-169
###################################################
getOption("SweaveHooks")[["fig"]]()
lots.of.letters <- c(letters, LETTERS, paste(LETTERS,letters,sep=''))
PlotNPS(TL84, 'Log10M', 'Log10N', show.nodes.as='labels', show.web=FALSE,
node.labels=lots.of.letters[1:NumberOfNodes(TL84)])
###################################################
### code chunk number 7: PlotsAndStats.Rnw:174-175
###################################################
getOption("SweaveHooks")[["fig"]]()
PlotNPS(TL84, 'Log10M', 'Log10N', show.nodes.as='both', show.web=FALSE, cex=2)
###################################################
### code chunk number 8: PlotsAndStats.Rnw:185-187
###################################################
getOption("SweaveHooks")[["fig"]]()
PlotNPS(TL84, 'Log10M', 'Log10N', xlab=Log10MLabel(TL84),
ylab=Log10NLabel(TL84))
###################################################
### code chunk number 9: PlotsAndStats.Rnw:197-206
###################################################
getOption("SweaveHooks")[["fig"]]()
par(mfrow=c(1,3))
PlotNPS(TL84, 'Log10M', 'OutDegree', show.web=FALSE)
abline(lm(OutDegree(TL84) ~ Log10M(TL84)))
PlotNPS(TL84, 'Log10M', 'InDegree', show.web=FALSE)
abline(lm(InDegree(TL84) ~ Log10M(TL84)))
PlotNPS(TL84, 'Log10M', 'Degree', show.web=FALSE)
abline(lm(Degree(TL84) ~ Log10M(TL84)))
###################################################
### code chunk number 10: PlotsAndStats.Rnw:218-219
###################################################
getOption("SweaveHooks")[["fig"]]()
PlotNPS(TL84, 'Log10M', 'PreyAveragedTrophicLevel')
###################################################
### code chunk number 11: PlotsAndStats.Rnw:226-227
###################################################
getOption("SweaveHooks")[["fig"]]()
PlotNPS(TL84, 'Log10M', 'ChainAveragedTrophicLevel')
###################################################
### code chunk number 12: PlotsAndStats.Rnw:242-247
###################################################
getOption("SweaveHooks")[["fig"]]()
par(mfrow=c(1,2))
PlotNPS(TL84, 'Log10M', 'PreyAveragedTrophicLevel', ylim=c(1, 6),
main='Prey-averaged')
PlotNPS(TL84, 'Log10M', 'ChainAveragedTrophicLevel', ylim=c(1, 6),
main='Chain-averaged')
###################################################
### code chunk number 13: PlotsAndStats.Rnw:261-266
###################################################
getOption("SweaveHooks")[["fig"]]()
par(mfrow=c(2,2))
PlotMvN(TL84)
PlotNvM(TL84)
PlotBvM(TL84)
PlotMvB(TL84)
###################################################
### code chunk number 14: PlotsAndStats.Rnw:279-280
###################################################
getOption("SweaveHooks")[["fig"]]()
PlotRankNPS(TL84, 'Log10N')
###################################################
### code chunk number 15: PlotsAndStats.Rnw:285-286
###################################################
getOption("SweaveHooks")[["fig"]]()
PlotRankNPS(TL84, 'Log10N', rank.by='M')
###################################################
### code chunk number 16: PlotsAndStats.Rnw:294-295
###################################################
getOption("SweaveHooks")[["fig"]]()
PlotRankNPS(TL84, 'Log10N', rank.by='M', show.web=TRUE)
###################################################
### code chunk number 17: PlotsAndStats.Rnw:300-301
###################################################
getOption("SweaveHooks")[["fig"]]()
PlotRankNPS(TL84, 'PreyAveragedTrophicLevel', rank.by='M')
###################################################
### code chunk number 18: PlotsAndStats.Rnw:309-310
###################################################
getOption("SweaveHooks")[["fig"]]()
PlotRankNPS(TL84, 'PreyAveragedTrophicLevel', rank.by='M', log10.rank=TRUE)
###################################################
### code chunk number 19: PlotsAndStats.Rnw:320-324
###################################################
getOption("SweaveHooks")[["fig"]]()
par(mfrow=c(1,3))
PlotMvRankM(TL84)
PlotNvRankN(TL84)
PlotBvRankB(TL84)
###################################################
### code chunk number 20: PlotsAndStats.Rnw:338-339
###################################################
getOption("SweaveHooks")[["fig"]]()
PlotNPSDistribution(TL84, 'Log10M')
###################################################
### code chunk number 21: PlotsAndStats.Rnw:345-346
###################################################
getOption("SweaveHooks")[["fig"]]()
PlotNPSDistribution(TL84, 'Log10M', density.args=list(bw=3))
###################################################
### code chunk number 22: PlotsAndStats.Rnw:366-367
###################################################
getOption("SweaveHooks")[["fig"]]()
PlotNvM(TL84, col=1, pch=19, highlight.nodes=NULL)
###################################################
### code chunk number 23: PlotsAndStats.Rnw:374-375
###################################################
getOption("SweaveHooks")[["fig"]]()
PlotNvM(TL84, col=1:56, pch=19, highlight.nodes=NULL)
###################################################
### code chunk number 24: PlotsAndStats.Rnw:384-385
###################################################
getOption("SweaveHooks")[["fig"]]()
PlotNvM(TL84, colour.by='resolved.to', pch=19, highlight.nodes=NULL)
###################################################
### code chunk number 25: PlotsAndStats.Rnw:393-397
###################################################
getOption("SweaveHooks")[["fig"]]()
colour.spec <- c(Species='purple3', Genus='green3', 'red3')
PlotNvM(TL84, colour.by='resolved.to', colour.spec=colour.spec, pch=19,
highlight.nodes=NULL)
legend("topright", legend=names(colour.spec), pch=19, col=colour.spec)
###################################################
### code chunk number 26: PlotsAndStats.Rnw:408-420
###################################################
getOption("SweaveHooks")[["fig"]]()
symbol.spec = c(Bacteria=21, Plantae=22, Chromista=23,
Protozoa=24, Animalia=25, 19)
colour.spec = c(Bacteria='purple3', Plantae='green3',
Chromista='blue3', Protozoa='orange3',
Animalia='red3', 'black')
PlotNvM(TL84,
symbol.by='kingdom', symbol.spec=symbol.spec,
bg.by='kingdom', bg.spec=colour.spec,
colour.by='kingdom', colour.spec=colour.spec,
highlight.nodes=NULL)
legend("topright", legend=names(colour.spec), pch=symbol.spec,
col=colour.spec, pt.bg=colour.spec)
###################################################
### code chunk number 27: PlotsAndStats.Rnw:432-446
###################################################
getOption("SweaveHooks")[["fig"]]()
symbol.spec = c(Bacteria=21, Plantae=22, Chromista=23,
Protozoa=24, Animalia=25, 19)
colour.spec = c(Bacteria='purple3', Plantae='green3',
Chromista='blue3', Protozoa='orange3',
Animalia='red3', 'black')
PlotNvM(TL84,
symbol.by='kingdom', symbol.spec=symbol.spec,
bg.by='kingdom', bg.spec=colour.spec,
colour.by='kingdom', colour.spec=colour.spec,
highlight.nodes=NULL, show.web=FALSE)
legend("topright", legend=names(colour.spec), pch=symbol.spec,
col=colour.spec, pt.bg=colour.spec)
models <- NvMLinearRegressions(TL84, class='kingdom')
colours <- PlotLinearModels(models, colour.spec=colour.spec)
###################################################
### code chunk number 28: PlotsAndStats.Rnw:457-458
###################################################
getOption("SweaveHooks")[["fig"]]()
PlotNvM(TL84, pch=NA, highlight.nodes=NULL)
###################################################
### code chunk number 29: PlotsAndStats.Rnw:471-480
###################################################
getOption("SweaveHooks")[["fig"]]()
par(mfrow=c(1,2))
# Don't add ticks
options(cheddarTopAndRightTicks=FALSE)
PlotNvM(TL84)
# Add ticks
options(cheddarTopAndRightTicks=TRUE)
PlotNvM(TL84)
###################################################
### code chunk number 30: PlotsAndStats.Rnw:496-497
###################################################
getOption("SweaveHooks")[["fig"]]()
PlotNvM(TL84, highlight.nodes=Cannibals)
###################################################
### code chunk number 31: PlotsAndStats.Rnw:503-504
###################################################
getOption("SweaveHooks")[["fig"]]()
PlotNvM(TL84, highlight.nodes=IsolatedNodes)
###################################################
### code chunk number 32: PlotsAndStats.Rnw:510-511
###################################################
getOption("SweaveHooks")[["fig"]]()
PlotNvM(TL84, highlight.nodes='Chaoborus punctipennis')
###################################################
### code chunk number 33: PlotsAndStats.Rnw:524-525
###################################################
getOption("SweaveHooks")[["fig"]]()
PlotNvM(TL84, highlight.links=ResourceLargerThanConsumer)
###################################################
### code chunk number 34: PlotsAndStats.Rnw:531-533
###################################################
getOption("SweaveHooks")[["fig"]]()
PlotNvM(TL84, highlight.nodes='Chaoborus punctipennis',
highlight.links=TrophicLinksForNodes(TL84, 'Chaoborus punctipennis'))
###################################################
### code chunk number 35: PlotsAndStats.Rnw:554-558
###################################################
getOption("SweaveHooks")[["fig"]]()
data(YthanEstuary)
par(mfrow=c(1,2))
PlotNvM(YthanEstuary)
PlotNvM(YthanEstuary, show.na=TRUE)
###################################################
### code chunk number 36: PlotsAndStats.Rnw:568-569
###################################################
getOption("SweaveHooks")[["fig"]]()
PlotNvM(YthanEstuary, xlim=c(-10, 4), ylim=c(-10, 13), show.na=TRUE)
###################################################
### code chunk number 37: PlotsAndStats.Rnw:581-604
###################################################
getOption("SweaveHooks")[["fig"]]()
par(mfrow=c(2,2))
np <- NPS(TL84)
np[1,'M'] <- NA
PlotNvM(Community(nodes=np, trophic.links=TLPS(TL84), properties=CPS(TL84)),
main='Node 1 M=NA', show.nodes.as='both', cex=2, show.na=TRUE)
np <- NPS(TL84)
np[1,'N'] <- NA
PlotNvM(Community(nodes=np, trophic.links=TLPS(TL84), properties=CPS(TL84)),
main='Node 1 N=NA', show.nodes.as='both', cex=2, show.na=TRUE)
np <- NPS(TL84)
np[1,'M'] <- NA
np[1,'N'] <- NA
PlotNvM(Community(nodes=np, trophic.links=TLPS(TL84), properties=CPS(TL84)),
main='Node 1 M=NA and N=NA', show.nodes.as='both', cex=2, show.na=TRUE)
np <- NPS(TL84)
np[c(10, 20, 30, 40),'M'] <- NA
np[c(10, 20, 30, 40),'N'] <- NA
PlotNvM(Community(nodes=np, trophic.links=TLPS(TL84), properties=CPS(TL84)),
main='Nodes 10, 20, 30 and 40 M=NA and N=NA', show.nodes.as='both',
cex=2, show.na=TRUE)
###################################################
### code chunk number 38: PlotsAndStats.Rnw:616-619
###################################################
getOption("SweaveHooks")[["fig"]]()
par(mfrow=c(1,2))
PlotMvRankM(YthanEstuary)
PlotMvRankM(YthanEstuary, show.na=TRUE)
###################################################
### code chunk number 39: PlotsAndStats.Rnw:643-644
###################################################
getOption("SweaveHooks")[["fig"]]()
PlotTLPS(TL84, 'resource.Log10M', 'consumer.Log10M')
###################################################
### code chunk number 40: PlotsAndStats.Rnw:653-654
###################################################
getOption("SweaveHooks")[["fig"]]()
PlotTLPS(TL84, 'resource.Log10M', 'consumer.Log10M', axes.limits.equal=TRUE)
###################################################
### code chunk number 41: PlotsAndStats.Rnw:675-680
###################################################
getOption("SweaveHooks")[["fig"]]()
par(mfrow=c(2,2))
PlotPredationMatrix(TL84)
PlotMRvMC(TL84)
PlotNCvNR(TL84)
PlotBRvBC(TL84)
###################################################
### code chunk number 42: PlotsAndStats.Rnw:694-695
###################################################
getOption("SweaveHooks")[["fig"]]()
PlotMRvMC(TL84)
###################################################
### code chunk number 43: PlotsAndStats.Rnw:703-705
###################################################
getOption("SweaveHooks")[["fig"]]()
PlotMRvMC(TL84, colour.by='consumer.category', bg.by='consumer.category',
symbol.by='consumer.category')
###################################################
### code chunk number 44: PlotsAndStats.Rnw:716-719
###################################################
SumMByClass(TL84)
SumNByClass(TL84)
SumBiomassByClass(TL84)
###################################################
### code chunk number 45: PlotsAndStats.Rnw:723-726
###################################################
SumMByClass(TL84, 'kingdom')
SumNByClass(TL84, 'kingdom')
SumBiomassByClass(TL84, 'kingdom')
###################################################
### code chunk number 46: PlotsAndStats.Rnw:732-734
###################################################
SumBiomassByClass(TL84)
ApplyByClass(TL84, 'Biomass', 'category', sum)
###################################################
### code chunk number 47: PlotsAndStats.Rnw:747-749
###################################################
models <- NvMLinearRegressions(TL84)
names(models)
###################################################
### code chunk number 48: PlotsAndStats.Rnw:752-753
###################################################
sapply(models, 'coef')
###################################################
### code chunk number 49: PlotsAndStats.Rnw:760-762
###################################################
models <- NvMLinearRegressions(TL84, class='phylum')
names(models)
###################################################
### code chunk number 50: PlotsAndStats.Rnw:770-771
###################################################
sapply(models, is.null)
###################################################
### code chunk number 51: PlotsAndStats.Rnw:777-780
###################################################
data(BroadstoneStream)
models <- NvMLinearRegressions(BroadstoneStream)
sapply(models, is.null)
###################################################
### code chunk number 52: PlotsAndStats.Rnw:784-787
###################################################
NvMSlope(TL84)
NvMIntercept(TL84)
NvMSlopeAndIntercept(TL84)
###################################################
### code chunk number 53: PlotsAndStats.Rnw:790-793
###################################################
NvMSlopeByClass(TL84)
NvMInterceptByClass(TL84)
NvMSlopeAndInterceptByClass(TL84)
###################################################
### code chunk number 54: PlotsAndStats.Rnw:796-799
###################################################
NvMSlopeByClass(TL84, class='kingdom')
NvMInterceptByClass(TL84, class='kingdom')
NvMSlopeAndInterceptByClass(TL84, class='kingdom')
###################################################
### code chunk number 55: PlotsAndStats.Rnw:835-842
###################################################
getOption("SweaveHooks")[["fig"]]()
data(TL86)
par(mfrow=c(1,2))
PlotMvN(TL84, show.nodes.as='both', cex=2, xlim=c(-2, 10), ylim=c(-14, 0),
highlight.nodes=NULL, highlight.links=NULL, main='')
PlotMvN(TL86, show.nodes.as='both', cex=2, xlim=c(-2, 10), ylim=c(-14, 0),
highlight.nodes=NULL, highlight.links=NULL, main='')
title(main='Jonsson et al. (2005) AER, Fig. 3 (p 30)', outer=TRUE)
###################################################
### code chunk number 56: PlotsAndStats.Rnw:851-857
###################################################
getOption("SweaveHooks")[["fig"]]()
par(mfrow=c(1,2))
PlotMCvMR(TL84, xlim=c(-14, 0), ylim=c(-14, 0), main='')
abline(a=0, b=1, lty=2)
PlotMCvMR(TL86, xlim=c(-14, 0), ylim=c(-14, 0), main='')
abline(a=0, b=1, lty=2)
title(main='Jonsson et al. (2005) AER, Fig. 4 (p 33)', outer=TRUE)
###################################################
### code chunk number 57: PlotsAndStats.Rnw:866-872
###################################################
getOption("SweaveHooks")[["fig"]]()
par(mfrow=c(2,2))
PlotNvM(TL84, xlim=c(-14, 0), ylim=c(-2,10), show.web=FALSE, main='')
PlotNvM(TL86, xlim=c(-14, 0), ylim=c(-2,10), show.web=FALSE, main='')
PlotBvM(TL84, xlim=c(-14, 0), ylim=c(-8,2), show.web=FALSE, main='')
PlotBvM(TL86, xlim=c(-14, 0), ylim=c(-8,2), show.web=FALSE, main='')
title(main='Jonsson et al. (2005) AER, Fig. 5 (p 37)', outer=TRUE)
###################################################
### code chunk number 58: PlotsAndStats.Rnw:881-891
###################################################
getOption("SweaveHooks")[["fig"]]()
par(mfrow=c(2,2))
PlotNCvNR(TL84, xlim=c(0, 10), ylim=c(-2,10), main='')
abline(a=0, b=1, lty=2)
PlotNCvNR(TL86, xlim=c(0, 10), ylim=c(-2,10), main='')
abline(a=0, b=1, lty=2)
PlotBCvBR(TL84, xlim=c(-8, -2), ylim=c(-8, -2), main='')
abline(a=0, b=1, lty=2)
PlotBCvBR(TL86, xlim=c(-8, -2), ylim=c(-8, -2), main='')
abline(a=0, b=1, lty=2)
title(main='Jonsson et al. (2005) AER, Fig. 7 (p 47)', outer=TRUE)
###################################################
### code chunk number 59: PlotsAndStats.Rnw:900-910
###################################################
getOption("SweaveHooks")[["fig"]]()
par(mfrow=c(2,2))
TL84.no.iso <- RemoveIsolatedNodes(TL84)
TL86.no.iso <- RemoveIsolatedNodes(TL86)
tl84.levels <- floor(TrophicHeight(TL84.no.iso))
tl86.levels <- floor(TrophicHeight(TL86.no.iso))
PlotNPyramid(TL84.no.iso, level=tl84.levels, main='', ylab='Trophic height')
PlotNPyramid(TL86.no.iso, level=tl86.levels, main='')
PlotBPyramid(TL84.no.iso, level=tl84.levels, main='', ylab='Trophic height')
PlotBPyramid(TL86.no.iso, level=tl86.levels, main='')
title(main='Jonsson et al. (2005) AER, Fig. 8 (p 49)', outer=TRUE)
###################################################
### code chunk number 60: PlotsAndStats.Rnw:919-925
###################################################
getOption("SweaveHooks")[["fig"]]()
par(mfrow=c(2,2))
PlotNvRankN(TL84, xlim=c(0,60), ylim=c(-2, 10), main='')
PlotNvRankN(TL86, xlim=c(0,60), ylim=c(-2, 10), main='')
PlotBvRankB(TL84, xlim=c(0,60), ylim=c(-8, -2), main='')
PlotBvRankB(TL86, xlim=c(0,60), ylim=c(-8, -2), main='')
title(main='Jonsson et al. (2005) AER, Fig. 10 (p 57)', outer=TRUE)
###################################################
### code chunk number 61: PlotsAndStats.Rnw:934-946
###################################################
getOption("SweaveHooks")[["fig"]]()
par(mfrow=c(2,2))
PlotRankNPS(TL84, property='Log10N', rank.by='M', log10.rank=TRUE,
xlim=c(0,2), ylim=c(-2, 10), ylab=Log10NLabel(TL84), main='')
PlotRankNPS(TL86, property='Log10N', rank.by='M', log10.rank=TRUE,
xlim=c(0,2), ylim=c(-2, 10), ylab=Log10NLabel(TL84), main='')
PlotRankNPS(TL84, property='Log10Biomass', rank.by='M',
log10.rank=TRUE, xlim=c(0,2), ylim=c(-8, -2),
ylab=Log10BLabel(TL84), main='')
PlotRankNPS(TL86, property='Log10Biomass', rank.by='M',
log10.rank=TRUE, xlim=c(0,2), ylim=c(-8, -2),
ylab=Log10BLabel(TL84), main='')
title(main='Jonsson et al. (2005) AER, Fig. 11 (p 60)', outer=TRUE)
###################################################
### code chunk number 62: PlotsAndStats.Rnw:957-981
###################################################
getOption("SweaveHooks")[["fig"]]()
PlotCommunityVCommunity <- function(a, b, property, xlim=NULL, ylim=NULL, ...)
{
a.nodes <- NP(a, 'node')
b.nodes <- NP(b, 'node')
all.nodes <- union(a.nodes, b.nodes)
a.values <- NPS(a, property)[,property]
names(a.values) <- a.nodes
b.values <- NPS(b, property)[,property]
names(b.values) <- b.nodes
points <- PlaceMissingPoints(a.values[all.nodes], xlim,
b.values[all.nodes], ylim)
plot(points[,1], points[,2], xlim=xlim, ylim=ylim, ...)
abline(a=0, b=1, lty=2)
}
par(mfrow=c(1,2))
PlotCommunityVCommunity(TL84, TL86, 'Log10N', xlim=c(-2,10), ylim=c(-2,10),
xlab=~log[10]~(N~of~84), ylab=~log[10]~(N~of~86),pch=19)
PlotCommunityVCommunity(TL84, TL86, 'Log10Biomass',
xlim=c(-8,-2), ylim=c(-8,-2),
xlab=~log[10]~(B~of~84), ylab=~log[10]~(B~of~86),pch=19)
title(main='Jonsson et al. (2005) AER, Fig. 12 (p 61)', outer=TRUE)
###################################################
### code chunk number 63: PlotsAndStats.Rnw:994-1010
###################################################
getOption("SweaveHooks")[["fig"]]()
data(pHWebs)
par(mfrow=c(2,2))
for(community in pHWebs[1:2])
{
PlotNvM(community, xlim=c(-15, 10), ylim=c(-5,15), main='',
highlight.nodes=NULL)
text(-15, 13, with(CPS(community), paste(title, ', pH ', pH, sep='')),
adj=0, cex=1.5)
tlps <- TLPS(community, node.properties='M')
tlps <- tlps[!is.na(tlps$resource.M) & !is.na(tlps$consumer.M),]
interaction.strength <- log10( (tlps$consumer.M / tlps$resource.M)^0.75 )
plot(density(interaction.strength), xlim=c(-4,14), ylim=c(0,0.6),
main='', xlab=~log[10]((M[C]/M[R])^0.75))
rug(interaction.strength)
}
title(main='Layer et al. (2010) AER, Fig. 6 (p 282)', outer=TRUE)
###################################################
### code chunk number 64: PlotsAndStats.Rnw:1022-1037
###################################################
getOption("SweaveHooks")[["fig"]]()
data(BroadstoneStream)
par(mfrow=c(1,2))
PlotMvN(BroadstoneStream, show.nodes.as='labels', label.cex=0.8,
xlim=c(-2, 4.2), ylim=c(-6,2), main='', show.na=FALSE,
highlight.links=NULL)
abline(a=0, b=-1)
tlps <- TLPS(BroadstoneStream, node.properties='M')
lty <- rep(0, NumberOfTrophicLinks(BroadstoneStream))
lty[tlps$resource.M > tlps$consumer.M] <- 1
PlotMvN(BroadstoneStream, show.nodes.as='labels', label.cex=0.8,
xlim=c(-2, 4.2), ylim=c(-6,2), main='', show.na=FALSE,
highlight.links=NULL, link.lty=lty)
abline(a=0, b=-1)
title(main='Woodward et al. (2005) AER, Fig. 4 (p 108)', outer=TRUE)
###################################################
### code chunk number 65: PlotsAndStats.Rnw:1052-1055
###################################################
collection <- CommunityCollection(list(TL84, TL86, YthanEstuary))
table <- NvMTriTrophicTable(collection)
print(round(table,2))
###################################################
### code chunk number 66: PlotsAndStats.Rnw:1059-1088
###################################################
res <- lapply(list(TL84, TL86, YthanEstuary), function(community)
{
community <- RemoveNodes(community, remove=with(NPS(community), node[is.na(M) | is.na(N)]))
community <- RemoveCannibalisticLinks(community)
community <- RemoveIsolatedNodes(community)
chains <- ThreeNodeChains(community, node.properties='M')
MR <- chains$bottom.M
MI <- chains$intermediate.M
MC <- chains$top.M
lp <- TLPS(community, node.properties='M')
return (c('MR<=MI<=MC'=sum(MR<=MI & MI<=MC),
'MR<=MC<MI'=sum(MR<=MC & MC<MI),
'MI<MR<=MC'=sum(MI<MR & MR<=MC),
'MI<=MC<MR'=sum(MI<=MC & MC<MR),
'MC<MR<MI'=sum(MC<MR & MR<MI),
'MC<MI<MR'=sum(MC<MI & MI<MR),
'All 2-chains'=nrow(chains),
'MR<MC'=sum(lp$resource.M<lp$consumer.M),
'MR=MC'=sum(lp$resource.M==lp$consumer.M),
'MR>MC'=sum(lp$resource.M>lp$consumer.M),
'All links'=nrow(lp)))
})
res <- do.call('cbind', res)
colnames(res) <- c('TL84', 'TL86', 'Ythan Estuary')
print(round(res,2))
###################################################
### code chunk number 67: PlotsAndStats.Rnw:1095-1107
###################################################
getOption("SweaveHooks")[["fig"]]()
par(mfrow=c(3,2))
for(community in list(TL84, TL86, YthanEstuary))
{
community <- RemoveIsolatedNodes(community)
pch <- rep(1, NumberOfNodes(community))
pch[IsIntermediateNode(community)] <- 20
pch[IsTopLevelNode(community)] <- 8
PlotNvM(community, col=1, highlight.nodes=NULL, show.web=FALSE,
main='', pch=pch)
PlotAuppervAlower(community, main='')
}
title(main='Cohen et al. (2009) PNAS, Fig. 1 (p 22336)', outer=TRUE)
###################################################
### code chunk number 68: PlotsAndStats.Rnw:1116-1119
###################################################
data(ChesapeakeBay)
res <- NodeQuantitativeDescriptors(ChesapeakeBay, 'biomass.flow')
print(round(res[1:6,],2))
###################################################
### code chunk number 69: PlotsAndStats.Rnw:1123-1125
###################################################
res <- QuantitativeDescriptors(ChesapeakeBay, 'biomass.flow')
print(round(res,3))
|
#Zip file downloaded and stored in project folder.
#Data extraction from zip file
library(lubridate)
library(dplyr)
EDHfile <- "exdata_data_household_power_consumption.zip"
unzip(EDHfile)
EDH <- read.table("household_power_consumption.txt", header=TRUE, na.strings = "?", sep=";")
#Converting all variables to dates and times using strptime and lubridate. New variable made for datetime combined.
EDH$DT <- paste(EDH$Date, EDH$Time, sep=" ")
EDH$datetime <- strptime(EDH$DT, "%d/%m/%Y %H:%M:%S")
EDH$Date <- dmy(EDH$Date)
EDH$Time <- strptime(EDH$Time, format = "%H:%M:%S")
EDH$Time <- format(EDH$Time, "%H:%M:%S")
EDHsubset <- subset(EDH, EDH$Date >= as.Date("2007-02-01") & EDH$Date < as.Date("2007-02-03"))
#histogram of global active power (plot1)
png("plot1.png", width=480, height=480)
hist(EDHsubset$Global_active_power, main= "Global Active Power", col="red", xlab="Global Active Power")
dev.off()
#line scatter plot (plot2)
png("plot2.png", width=480, height=480)
with(EDHsubset, plot(datetime, Global_active_power, type="l", xlab="", ylab="Global Active Power (kilowatts)"))
dev.off()
#line scatter plot with 3 submeters (plot3)
png("plot3.png", width=480, height=480)
with(EDHsubset, plot(datetime, Sub_metering_1, type = "l", col="black", xlab="", ylab="Energy Sub metering"))
with(EDHsubset, lines(datetime, Sub_metering_2, type="l", col="red"))
with(EDHsubset, lines(datetime, Sub_metering_3, type= "l", col="blue"))
legend("topright", c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), lty=1, lwd=2.5, col=c("black", "red", "blue"))
dev.off()
#multiple plot graph (plot4)
png("plot4.png", width=480, height=480)
par(mfrow=c(2,2))
with(EDHsubset, plot(datetime, Global_active_power, type="l", xlab="", ylab="Global Active Power (kilowatts)"))
with(EDHsubset, plot(datetime, Voltage, type="l", xlab="datetime", ylab="Voltage"))
with(EDHsubset, plot(datetime, Sub_metering_1, type = "l", col="black", xlab="", ylab="Energy Sub metering"))
with(EDHsubset, lines(datetime, Sub_metering_2, type="l", col="red"))
with(EDHsubset, lines(datetime, Sub_metering_3, type= "l", col="blue"))
legend("topright", c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), lty=1, lwd=2.5, col=c("black", "red", "blue"))
with(EDHsubset, plot(datetime, Global_reactive_power, type="l", xlab="datetime", ylab="Global_reactive_power"))
dev.off() | /Plotscript.R | no_license | ritadelphine/ExData_Plotting1 | R | false | false | 2,351 | r | #Zip file downloaded and stored in project folder.
#Data extraction from zip file
library(lubridate)
library(dplyr)
EDHfile <- "exdata_data_household_power_consumption.zip"
unzip(EDHfile)
EDH <- read.table("household_power_consumption.txt", header=TRUE, na.strings = "?", sep=";")
#Converting all variables to dates and times using strptime and lubridate. New variable made for datetime combined.
EDH$DT <- paste(EDH$Date, EDH$Time, sep=" ")
EDH$datetime <- strptime(EDH$DT, "%d/%m/%Y %H:%M:%S")
EDH$Date <- dmy(EDH$Date)
EDH$Time <- strptime(EDH$Time, format = "%H:%M:%S")
EDH$Time <- format(EDH$Time, "%H:%M:%S")
EDHsubset <- subset(EDH, EDH$Date >= as.Date("2007-02-01") & EDH$Date < as.Date("2007-02-03"))
#histogram of global active power (plot1)
png("plot1.png", width=480, height=480)
hist(EDHsubset$Global_active_power, main= "Global Active Power", col="red", xlab="Global Active Power")
dev.off()
#line scatter plot (plot2)
png("plot2.png", width=480, height=480)
with(EDHsubset, plot(datetime, Global_active_power, type="l", xlab="", ylab="Global Active Power (kilowatts)"))
dev.off()
#line scatter plot with 3 submeters (plot3)
png("plot3.png", width=480, height=480)
with(EDHsubset, plot(datetime, Sub_metering_1, type = "l", col="black", xlab="", ylab="Energy Sub metering"))
with(EDHsubset, lines(datetime, Sub_metering_2, type="l", col="red"))
with(EDHsubset, lines(datetime, Sub_metering_3, type= "l", col="blue"))
legend("topright", c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), lty=1, lwd=2.5, col=c("black", "red", "blue"))
dev.off()
#multiple plot graph (plot4)
png("plot4.png", width=480, height=480)
par(mfrow=c(2,2))
with(EDHsubset, plot(datetime, Global_active_power, type="l", xlab="", ylab="Global Active Power (kilowatts)"))
with(EDHsubset, plot(datetime, Voltage, type="l", xlab="datetime", ylab="Voltage"))
with(EDHsubset, plot(datetime, Sub_metering_1, type = "l", col="black", xlab="", ylab="Energy Sub metering"))
with(EDHsubset, lines(datetime, Sub_metering_2, type="l", col="red"))
with(EDHsubset, lines(datetime, Sub_metering_3, type= "l", col="blue"))
legend("topright", c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), lty=1, lwd=2.5, col=c("black", "red", "blue"))
with(EDHsubset, plot(datetime, Global_reactive_power, type="l", xlab="datetime", ylab="Global_reactive_power"))
dev.off() |
\name{RNAinteract-class}
\Rdversion{1.1}
\docType{class}
\alias{RNAinteract-class}
\alias{show,RNAinteract-method}
\title{Class "RNAinteract"}
\description{
A class for double perturbation experiments (genetic interaction screens, drug-drug interaction screens). There are functions for creation, analysis, and display of interaction screens.
}
\section{Objects from the Class}{
Objects can be created by calls of \code{\link{createRNAinteractFromFiles}}. See vignette("RNAinteract") for an example of creating an \code{RNAinteract} object.
}
\section{Slots}{
\describe{
\item{\code{data}:}{Object of class \code{"array"} with dimension \code{sgi@F x sgi@S x sgi@C}. The raw data of the screen. }
\item{\code{screenNames}:}{Object of class \code{"character"} with length \code{sgi@S}. }
\item{\code{channelNames}:}{Object of class \code{"character"} with length \code{sgi@C}.}
\item{\code{well}:}{Object of class \code{"character"} with length \code{sgi@F}. Well name (e.g. F04) for each measurement.}
\item{\code{plate}:}{Object of class \code{"integer"} with length \code{sgi@F}. Number of the plate for each measurement}
\item{\code{pdim}:}{Object of class \code{"integer"} of length 2. Plate dimensions.}
\item{\code{NT}:}{Object of class \code{"integer"} of length 1. Number of template reagents.}
\item{\code{NQ}:}{Object of class \code{"integer"} of length 1. Number of query reagents.}
\item{\code{C}:}{Object of class \code{"integer"} of length 1. Number of readout channels.}
\item{\code{S}:}{Object of class \code{"integer"} of length 1. Number of screens.}
\item{\code{F}:}{Object of class \code{"integer"} of length 1. Number of measurements or single experiments per screen.}
\item{\code{reagents}:}{Object of class \code{"data.frame"} describing each reagents. Obligatory columns: \code{RID} and \code{TID}.}
\item{\code{targets}:}{Object of class \code{"data.frame"} describing each target gene. Obligatory columns: \code{TID}, \code{Symbol}, \code{group}, \code{GID}.}
\item{\code{templateDesign}:}{Object of class \code{"data.frame"} with \code{sgi@NT} rows describing the template design. Obligatory columns: \code{TemplatePlate}, \code{Well}, \code{RID}, \code{QueryNr}. }
\item{\code{queryDesign}:}{Object of class \code{"data.frame"} with \code{sgi@NQ} rows describing the query design. Obligatory columns: \code{Plate}, \code{TemplatePlate}, \code{QueryNr}, \code{RID}. }
\item{\code{transformation}:}{Object of class \code{"character"} of length \code{sgi@C}. The transformation applied to the input data. }
\item{\code{mainTemplate}:}{Object of class \code{"array"} with dimension \code{sgi@NT x sgi@S x sgi@C}. The main effect of the template reagents.}
\item{\code{mainQuery}:}{Object of class \code{"array"} with dimension \code{sgi@NQ x sgi@S x sgi@C}. The main effect of the query reagents.}
\item{\code{mainSderrTemplate}:}{Object of class \code{"array"} with dimension \code{sgi@NT x sgi@S x sgi@C}. The standard error of the main effect of the template reagents.}
\item{\code{mainSderrQuery}:}{Object of class \code{"array"} with dimension \code{sgi@NQ x sgi@S x sgi@C}. The standard error of the main effect of the query reagents.}
\item{\code{mainSdTemplate}:}{Object of class \code{"array"} with dimension \code{sgi@NQ x sgi@S x sgi@C}. The standard deviation of the main effect of the query reagents.}
\item{\code{mainSdQuery}:}{Object of class \code{"array"} with dimension \code{sgi@NQ x sgi@S x sgi@C}. The standard deviation of the main effect of the query reagents.}
\item{\code{mainTimeEffect}:}{Object of class \code{"array"} with dimension \code{sgi@NQ x sgi@S x sgi@C}. The systematic changes of the query main effects, e.g. decreasing cell number over time.}
\item{\code{mainSpatialEffect}:}{Object of class \code{"array"} with dimension \code{sgi@F x sgi@S x sgi@C}. The systematic spatial plate effects.}
\item{\code{mainSpatialEffectRow}:}{Object of class \code{"array"}. Spatial effects per row (as computed by Bscore).}
\item{\code{mainSpatialEffectCol}:}{Object of class \code{"array"}. Spatial effects per column (as computed by Bscore).}
\item{\code{mainNeg}:}{Object of class \code{"array"} with dimension \code{sgi@S x sgi@C}. The main effect of the negative control.}
\item{\code{mainNegTemplate}:}{Object of class \code{"array"} with dimension \code{sgi@S x sgi@C}. The template main effect of the negative control.}
\item{\code{mainNegQuery}:}{Object of class \code{"array"} with dimension \code{sgi@S x sgi@C}. The query main effect of the negative control.}
\item{\code{data2mainTemplate}:}{Object of class \code{"integer"} with dimension \code{sgi@F}. Mapping of single experiments to template reagents.}
\item{\code{data2mainQuery}:}{Object of class \code{"integer"} with dimension \code{sgi@F}. Mapping of single experiments to query reagents.}
\item{\code{ni.model}:}{Object of class \code{"array"} with dimension \code{sgi@F x sgi@S x sgi@C}. The expected values of the non-interacting model.}
\item{\code{pi}:}{Object of class \code{"array"} with dimension \code{sgi@F x sgi@S x sgi@C}. The pairwise interaction score.}
\item{\code{plateeffect}:}{Object of class \code{"array"}.}
\item{\code{p.value}:}{Object of class \code{"array"} with dimension \code{sgi@NT x sgi@NQ x sgi@S x sgi@C} describing the p.values.}
\item{\code{q.value}:}{Object of class \code{"array"} with dimension \code{sgi@NT x sgi@NQ x sgi@S x sgi@C} describing the q.values.}
}
}
\section{Methods}{
\describe{
\item{show}{\code{signature(object = "RNAinteract")}: ... }
}
}
\author{
Bernd Fischer
}
\seealso{
\code{\link{RNAinteract-package}}
}
\examples{
showClass("RNAinteract")
}
\keyword{classes}
| /man/RNAinteract-class.Rd | no_license | Huber-group-EMBL/RNAinteract | R | false | false | 5,799 | rd | \name{RNAinteract-class}
\Rdversion{1.1}
\docType{class}
\alias{RNAinteract-class}
\alias{show,RNAinteract-method}
\title{Class "RNAinteract"}
\description{
A class for double perturbation experiments (genetic interaction screens, drug-drug interaction screens). There are functions for creation, analysis, and display of interaction screens.
}
\section{Objects from the Class}{
Objects can be created by calls of \code{\link{createRNAinteractFromFiles}}. See vignette("RNAinteract") for an example of creating an \code{RNAinteract} object.
}
\section{Slots}{
\describe{
\item{\code{data}:}{Object of class \code{"array"} with dimension \code{sgi@F x sgi@S x sgi@C}. The raw data of the screen. }
\item{\code{screenNames}:}{Object of class \code{"character"} with length \code{sgi@S}. }
\item{\code{channelNames}:}{Object of class \code{"character"} with length \code{sgi@C}.}
\item{\code{well}:}{Object of class \code{"character"} with length \code{sgi@F}. Well name (e.g. F04) for each measurement.}
\item{\code{plate}:}{Object of class \code{"integer"} with length \code{sgi@F}. Number of the plate for each measurement}
\item{\code{pdim}:}{Object of class \code{"integer"} of length 2. Plate dimensions.}
\item{\code{NT}:}{Object of class \code{"integer"} of length 1. Number of template reagents.}
\item{\code{NQ}:}{Object of class \code{"integer"} of length 1. Number of query reagents.}
\item{\code{C}:}{Object of class \code{"integer"} of length 1. Number of readout channels.}
\item{\code{S}:}{Object of class \code{"integer"} of length 1. Number of screens.}
\item{\code{F}:}{Object of class \code{"integer"} of length 1. Number of measurements or single experiments per screen.}
\item{\code{reagents}:}{Object of class \code{"data.frame"} describing each reagents. Obligatory columns: \code{RID} and \code{TID}.}
\item{\code{targets}:}{Object of class \code{"data.frame"} describing each target gene. Obligatory columns: \code{TID}, \code{Symbol}, \code{group}, \code{GID}.}
\item{\code{templateDesign}:}{Object of class \code{"data.frame"} with \code{sgi@NT} rows describing the template design. Obligatory columns: \code{TemplatePlate}, \code{Well}, \code{RID}, \code{QueryNr}. }
\item{\code{queryDesign}:}{Object of class \code{"data.frame"} with \code{sgi@NQ} rows describing the query design. Obligatory columns: \code{Plate}, \code{TemplatePlate}, \code{QueryNr}, \code{RID}. }
\item{\code{transformation}:}{Object of class \code{"character"} of length \code{sgi@C}. The transformation applied to the input data. }
\item{\code{mainTemplate}:}{Object of class \code{"array"} with dimension \code{sgi@NT x sgi@S x sgi@C}. The main effect of the template reagents.}
\item{\code{mainQuery}:}{Object of class \code{"array"} with dimension \code{sgi@NQ x sgi@S x sgi@C}. The main effect of the query reagents.}
\item{\code{mainSderrTemplate}:}{Object of class \code{"array"} with dimension \code{sgi@NT x sgi@S x sgi@C}. The standard error of the main effect of the template reagents.}
\item{\code{mainSderrQuery}:}{Object of class \code{"array"} with dimension \code{sgi@NQ x sgi@S x sgi@C}. The standard error of the main effect of the query reagents.}
\item{\code{mainSdTemplate}:}{Object of class \code{"array"} with dimension \code{sgi@NQ x sgi@S x sgi@C}. The standard deviation of the main effect of the query reagents.}
\item{\code{mainSdQuery}:}{Object of class \code{"array"} with dimension \code{sgi@NQ x sgi@S x sgi@C}. The standard deviation of the main effect of the query reagents.}
\item{\code{mainTimeEffect}:}{Object of class \code{"array"} with dimension \code{sgi@NQ x sgi@S x sgi@C}. The systematic changes of the query main effects, e.g. decreasing cell number over time.}
\item{\code{mainSpatialEffect}:}{Object of class \code{"array"} with dimension \code{sgi@F x sgi@S x sgi@C}. The systematic spatial plate effects.}
\item{\code{mainSpatialEffectRow}:}{Object of class \code{"array"}. Spatial effects per row (as computed by Bscore).}
\item{\code{mainSpatialEffectCol}:}{Object of class \code{"array"}. Spatial effects per column (as computed by Bscore).}
\item{\code{mainNeg}:}{Object of class \code{"array"} with dimension \code{sgi@S x sgi@C}. The main effect of the negative control.}
\item{\code{mainNegTemplate}:}{Object of class \code{"array"} with dimension \code{sgi@S x sgi@C}. The template main effect of the negative control.}
\item{\code{mainNegQuery}:}{Object of class \code{"array"} with dimension \code{sgi@S x sgi@C}. The query main effect of the negative control.}
\item{\code{data2mainTemplate}:}{Object of class \code{"integer"} with dimension \code{sgi@F}. Mapping of single experiments to template reagents.}
\item{\code{data2mainQuery}:}{Object of class \code{"integer"} with dimension \code{sgi@F}. Mapping of single experiments to query reagents.}
\item{\code{ni.model}:}{Object of class \code{"array"} with dimension \code{sgi@F x sgi@S x sgi@C}. The expected values of the non-interacting model.}
\item{\code{pi}:}{Object of class \code{"array"} with dimension \code{sgi@F x sgi@S x sgi@C}. The pairwise interaction score.}
\item{\code{plateeffect}:}{Object of class \code{"array"}.}
\item{\code{p.value}:}{Object of class \code{"array"} with dimension \code{sgi@NT x sgi@NQ x sgi@S x sgi@C} describing the p.values.}
\item{\code{q.value}:}{Object of class \code{"array"} with dimension \code{sgi@NT x sgi@NQ x sgi@S x sgi@C} describing the q.values.}
}
}
\section{Methods}{
\describe{
\item{show}{\code{signature(object = "RNAinteract")}: ... }
}
}
\author{
Bernd Fischer
}
\seealso{
\code{\link{RNAinteract-package}}
}
\examples{
showClass("RNAinteract")
}
\keyword{classes}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cloudwatchlogs_operations.R
\name{cloudwatchlogs_test_metric_filter}
\alias{cloudwatchlogs_test_metric_filter}
\title{Tests the filter pattern of a metric filter against a sample of log
event messages}
\usage{
cloudwatchlogs_test_metric_filter(filterPattern, logEventMessages)
}
\arguments{
\item{filterPattern}{[required]}
\item{logEventMessages}{[required] The log event messages to test.}
}
\description{
Tests the filter pattern of a metric filter against a sample of log
event messages. You can use this operation to validate the correctness
of a metric filter pattern.
}
\section{Request syntax}{
\preformatted{svc$test_metric_filter(
filterPattern = "string",
logEventMessages = list(
"string"
)
)
}
}
\keyword{internal}
| /cran/paws.management/man/cloudwatchlogs_test_metric_filter.Rd | permissive | johnnytommy/paws | R | false | true | 818 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cloudwatchlogs_operations.R
\name{cloudwatchlogs_test_metric_filter}
\alias{cloudwatchlogs_test_metric_filter}
\title{Tests the filter pattern of a metric filter against a sample of log
event messages}
\usage{
cloudwatchlogs_test_metric_filter(filterPattern, logEventMessages)
}
\arguments{
\item{filterPattern}{[required]}
\item{logEventMessages}{[required] The log event messages to test.}
}
\description{
Tests the filter pattern of a metric filter against a sample of log
event messages. You can use this operation to validate the correctness
of a metric filter pattern.
}
\section{Request syntax}{
\preformatted{svc$test_metric_filter(
filterPattern = "string",
logEventMessages = list(
"string"
)
)
}
}
\keyword{internal}
|
#author: Thomas Coleman
# Difference in Differences Regressions - Error Analysis and Graphs for data from Snow 1855
# See "Causality in the Time of Cholera" working paper at https://papers.ssrn.com/abstract=3262234
# and my John Snow project website, http://www.hilerun.org/econ/papers/snow
# This code is licensed under the BSD 2-Clause License, https://opensource.org/licenses/BSD-2-Clause
# This collect together some simple functions that produce a standard format graph, and are 'source'd
# into various notebooks
# **preperrdata** Before graphing, to prepare the data
# * Takes in *fittedmodel* - a regression that has been already run. From this it extracts the necessary
# parameters. Also *single*, a string which for "single" says that there is a single treatment effect.
# * Calculates the 1849 and 1854 predicted counts and rates
# * Calculates *approximate* 95% error bars around the predicted rates, based on whether the fitted model
# is Poisson or Negative Binomial
# * Produces an adjusted 1854 predicted rate, adjusting for the 1854 time effect and treatment effect, so
# that it is comparable to the 1849 predicted rate (for purposes of plotting with error bars)
# This function changes global data (the x1849 & x1854 dataframes) using the "<<-" instead of "<-"
# assignment. This is poor programming style but I could not find another easy way of doing what I wanted.
# **plot2_worker** Plots actual vs predicted, with error bars around the predicted
# * Actually does the plotting, given all the data as input arguments (sequence no. for sub-districts;
# the actual mean or rate; predicted; the 2.5% and 97.5% points; title)
# * btw, the hack for plotting error bars is from
# https://stackoverflow.com/questions/13032777/scatter-plot-with-error-bars
# **plot2_worker** Is a cover function which unpacks the actual versus predicted mean from the appropriate dataframe
# **plot3** Plots actual 1849, 1854 (adjusted for time & treatment effects), predicted, with error bars
# **plotcomp** Plots actual 1849 versus 1854, with error bars around actual 1849
# **ploterrbars** is NOT a function you should use - I use it to print out .pdf versions of the graphs I want to use
preperrdata <- function(fittedmodel,single = "single") { # This is not a good way to do this because I am
# changing globals from within the function (using <<-
# instead of <-)
# Function to prepare the data (error bars) for graphing
# The 2.5% and 97.5% confidence bands are calculated assuming either Poisson or Negative Binomial
# The rates are calculated by generating the counts up and down from the "expected" (from the fitted model)
expected <- exp(predict(fittedmodel)) # expected values
theta <- fittedmodel$theta
x1849$predcount <<- expected[1:28]
x1854$predcount <<- expected[29:56]
x1849$predrate <<- 10000 * expected[1:28] / x1849$pop1851
x1854$predrate <<- 10000 * expected[29:56] / x1854$pop1851
xfamily <- family(fittedmodel)$family
if (xfamily == "poisson") { # Get 95% confidence bands depending on model used (Poiss vs Neg Binom)
x1849$limdn <<- 10000 * qpois(.025,lambda=x1849$predcount) / x1849$pop1851
x1849$limup <<- 10000 * qpois(.975,lambda=x1849$predcount) / x1849$pop1851
} else {
x1849$limdn <<- 10000 * qnbinom(.025,size=theta,mu=x1849$predcount) / x1849$pop1851
x1849$limup <<- 10000 * qnbinom(.975,size=theta,mu=x1849$predcount) / x1849$pop1851
x1854$limdn <<- 10000 * qnbinom(.025,size=theta,mu=x1854$predcount) / x1849$pop1851
x1854$limup <<- 10000 * qnbinom(.975,size=theta,mu=x1854$predcount) / x1849$pop1851
x1849$limdnact <<- 10000 * qnbinom(.025,size=theta,mu=x1849$deaths) / x1849$pop1851
x1849$limupact <<- 10000 * qnbinom(.975,size=theta,mu=x1849$deaths) / x1849$pop1851
}
# Now adjust the 1854 predicted counts (and rates) for the time and treatment fixed effects -
# effectively netting out the 1854 effect and making it 1849-equivalent
xyr1854 <- (coef(fittedmodel)["year1854"])
x3 <- exp(-xyr1854)
# Adjust for the year effect only - this should make 1849 & 1854 comparable net of time effect
x1854$rateadjyr <<- 10000 * (x1854$deaths*x3) / x1854$pop1851
# Adjust the 1854 actual for the estimated Treatment Effect and the estimate Year Effect
if (single == "single") { # model with single treatment effect
xdegless <- (coef(fittedmodel)["supplierSouthwarkVauxhall_Lambeth:year1854"])
xdegmore <- xdegless # Make both treatment effects the same
x3 <- x3 * exp(-(x1854$lambethdegree == "less_Lambeth")*xdegless)
x3 <- x3 * exp(-(x1854$lambethdegree == "more_Lambeth")*xdegmore)
} else if (single == "two") { # model with two treatment effects
xdegless <- coef(fittedmodel)["lambethdegreeless_Lambeth:year1854"]
xdegmore <- coef(fittedmodel)["lambethdegreemore_Lambeth:year1854"]
x3 <- x3 * exp(-(x1854$lambethdegree == "less_Lambeth")*xdegless)
x3 <- x3 * exp(-(x1854$lambethdegree == "more_Lambeth")*xdegmore)
} else { # model with continuous treatment (population proportions)
xlambethperc54 <- coef(fittedmodel)["lambethperc54"]
x3 <- x3 * exp(-(x1854$lambethperc54 * xlambethperc54))
}
# Adjust for year & treatment effect - so that we can compare the actuals against each other
x1854$rateadj <<- 10000 * (x1854$deaths*x3) / x1854$pop1851
return(xfamily)
}
# "Worker" function to plot mean, predicted, and error bars
plot2_worker <- function(yseq, xmean,xlimdn,xpred,xlimup,title) {
xplot <- plot(xmean, yseq,
xlim=range(c(xmean, xpred,xlimdn,xlimup)),
ylim=rev(range(yseq)), col="red",
main=title,xlab="Mortality rate actual (red filled) vs predicted (empty circle)",ylab="sub-district",
pch=19)
lines(xpred, yseq, type="p",
xlim=range(c(xmean, xpred,xlimdn,xlimup)),
ylim=rev(range(yseq)),
pch=1)
# horizontal error bars
xplot <- arrows(xlimdn, yseq, xlimup, yseq, length=0.05, angle=90, code=3,lty=3)
xplot
}
# "Cover" function that takes in the dataframe and unpacks
plot2 <- function(confidata,xsupplier,title) {
xmean <- subset(confidata,supplier==xsupplier)[,"rate"]
xlimdn <- subset(confidata,supplier==xsupplier)[,"limdn"]
xlimup <- subset(confidata,supplier==xsupplier)[,"limup"]
xpred <- subset(confidata,supplier==xsupplier)[,"predrate"]
yseq <- subset(confidata,supplier==xsupplier)[,"seq"]
# xtitle <- paste(xsupplier,title)
xplot <- plot2_worker(yseq,xmean,xlimdn,xpred,xlimup,title)
}
plot3 <- function(confidata1849,confidata1854,xsupplier,title) {
xmean <- subset(confidata1849,supplier==xsupplier)[,"rate"]
xlimdn <- subset(confidata1849,supplier==xsupplier)[,"limdn"]
xlimup <- subset(confidata1849,supplier==xsupplier)[,"limup"]
xpred <- subset(confidata1849,supplier==xsupplier)[,"predrate"]
y <- subset(confidata1849,supplier==xsupplier)[,"seq"]
x1854adj <- subset(confidata1854,supplier==xsupplier)[,"rateadj"]
xplot <- plot(xmean, y,
xlim=range(c(xmean, xpred,xlimdn,xlimup,x1854adj)),
ylim=rev(range(y)), col="red",
main=title,xlab="Mortality: 1849 red circle, 1854 blue diamond vs predicted (circle)",ylab="sub-district",
pch=19)
lines(xpred, y, type="p",
xlim=range(c(xmean, xpred,xlimdn,xlimup)),
ylim=rev(range(y)),
pch=1)
lines(x1854adj, y, type="p",
xlim=range(c(xmean, xpred,xlimdn,xlimup)),
ylim=rev(range(y)), col="blue",
pch=17)
# horizontal error bars
xplot <- arrows(xlimdn, y, xlimup, y, length=0.05, angle=90, code=3,lty=3)
}
# Plotting joint region for 1849 & 1854 with error bars for each
plotcomp <- function(confidata1849,confidata1854,xsupplier,title) {
xmean <- subset(confidata1849,supplier==xsupplier)[,"rate"]
xlimdn <- subset(confidata1849,supplier==xsupplier)[,"limdnact"]
xlimup <- subset(confidata1849,supplier==xsupplier)[,"limupact"]
xpred <- subset(confidata1849,supplier==xsupplier)[,"predrate"]
y <- subset(confidata1849,supplier==xsupplier)[,"seq"]
xmean1854 <- subset(confidata1854,supplier==xsupplier)[,"rateadjyr"]
# xlimdn1854 <- subset(confidata1854,supplier==xsupplier)[,"limdn"]
# xlimup1854 <- subset(confidata1854,supplier==xsupplier)[,"limup"]
xplot <- plot(xmean, y,
xlim=range(c(xmean, xmean1854,xlimdn,xlimup)),
ylim=rev(range(y)), col="red",
main=title,xlab="Mortality: 1849 red circle, 1854 blue diamond",ylab="sub-district",
pch=19)
lines(xmean1854, y, type="p",
xlim=range(c(xmean, xmean1854,xlimdn,xlimup)),
ylim=rev(range(y)), col="blue",
pch=17)
# horizontal error bars
xplot <- arrows(xlimdn, y, xlimup, y, length=0.05, angle=90, code=3,,lty=3)
# xplot <- arrows(xlimdn1854, y, xlimup1854, y, length=0.05, angle=90, code=3)
}
ploterrbars <- function(fittedmodel,plotname,single = "single") { # This is not a good way to do this because I am
# changing globals from within the function (using <<-
# instead of <-)
xfamily <- preperrdata(fittedmodel,single) # this function modifies global data
pdf(paste("../paper/figures/errbar_",plotname,"a.pdf",sep=""))
plot2(x1849,"SouthwarkVauxhall",paste("First-12 Southwark-only ",xfamily," 1849 "))
dev.off()
pdf(paste("../paper/figures/errbar_",plotname,"b.pdf",sep=""))
plot2(x1849,"SouthwarkVauxhall_Lambeth",paste("Next-16 Jointly-Supplied ",xfamily," 1849 "))
dev.off()
pdf(paste("../paper/figures/errbar_",plotname,"c.pdf",sep=""))
plot3(x1849,x1854,"SouthwarkVauxhall",paste("First-12 Southwark-only ",xfamily," 1849vs1854 "))
dev.off()
pdf(paste("../paper/figures/errbar_",plotname,"d.pdf",sep=""))
plot3(x1849,x1854,"SouthwarkVauxhall_Lambeth",paste("Next-16 Jointly-Supplied ",xfamily," 1849vs1854 "))
dev.off()
if (xfamily != "poisson") { # Plot comparison for joint region only for Negative Binomial
pdf(paste("../paper/figures/errbar_",plotname,"e.pdf",sep=""))
plotcomp(x1849,x1854,"SouthwarkVauxhall",paste("First-12 Southwark-only ",xfamily," 1849vs1854 "))
dev.off()
pdf(paste("../paper/figures/errbar_",plotname,"f.pdf",sep=""))
plotcomp(x1849,x1854,"SouthwarkVauxhall_Lambeth",paste("Next-16 Jointly-Supplied ",xfamily," 1849vs1854 "))
dev.off()
}
}
| /SnowPlotFns.r | permissive | tscoleman/SnowCholera | R | false | false | 10,364 | r | #author: Thomas Coleman
# Difference in Differences Regressions - Error Analysis and Graphs for data from Snow 1855
# See "Causality in the Time of Cholera" working paper at https://papers.ssrn.com/abstract=3262234
# and my John Snow project website, http://www.hilerun.org/econ/papers/snow
# This code is licensed under the BSD 2-Clause License, https://opensource.org/licenses/BSD-2-Clause
# This collect together some simple functions that produce a standard format graph, and are 'source'd
# into various notebooks
# **preperrdata** Before graphing, to prepare the data
# * Takes in *fittedmodel* - a regression that has been already run. From this it extracts the necessary
# parameters. Also *single*, a string which for "single" says that there is a single treatment effect.
# * Calculates the 1849 and 1854 predicted counts and rates
# * Calculates *approximate* 95% error bars around the predicted rates, based on whether the fitted model
# is Poisson or Negative Binomial
# * Produces an adjusted 1854 predicted rate, adjusting for the 1854 time effect and treatment effect, so
# that it is comparable to the 1849 predicted rate (for purposes of plotting with error bars)
# This function changes global data (the x1849 & x1854 dataframes) using the "<<-" instead of "<-"
# assignment. This is poor programming style but I could not find another easy way of doing what I wanted.
# **plot2_worker** Plots actual vs predicted, with error bars around the predicted
# * Actually does the plotting, given all the data as input arguments (sequence no. for sub-districts;
# the actual mean or rate; predicted; the 2.5% and 97.5% points; title)
# * btw, the hack for plotting error bars is from
# https://stackoverflow.com/questions/13032777/scatter-plot-with-error-bars
# **plot2_worker** Is a cover function which unpacks the actual versus predicted mean from the appropriate dataframe
# **plot3** Plots actual 1849, 1854 (adjusted for time & treatment effects), predicted, with error bars
# **plotcomp** Plots actual 1849 versus 1854, with error bars around actual 1849
# **ploterrbars** is NOT a function you should use - I use it to print out .pdf versions of the graphs I want to use
preperrdata <- function(fittedmodel,single = "single") { # This is not a good way to do this because I am
# changing globals from within the function (using <<-
# instead of <-)
# Function to prepare the data (error bars) for graphing
# The 2.5% and 97.5% confidence bands are calculated assuming either Poisson or Negative Binomial
# The rates are calculated by generating the counts up and down from the "expected" (from the fitted model)
expected <- exp(predict(fittedmodel)) # expected values
theta <- fittedmodel$theta
x1849$predcount <<- expected[1:28]
x1854$predcount <<- expected[29:56]
x1849$predrate <<- 10000 * expected[1:28] / x1849$pop1851
x1854$predrate <<- 10000 * expected[29:56] / x1854$pop1851
xfamily <- family(fittedmodel)$family
if (xfamily == "poisson") { # Get 95% confidence bands depending on model used (Poiss vs Neg Binom)
x1849$limdn <<- 10000 * qpois(.025,lambda=x1849$predcount) / x1849$pop1851
x1849$limup <<- 10000 * qpois(.975,lambda=x1849$predcount) / x1849$pop1851
} else {
x1849$limdn <<- 10000 * qnbinom(.025,size=theta,mu=x1849$predcount) / x1849$pop1851
x1849$limup <<- 10000 * qnbinom(.975,size=theta,mu=x1849$predcount) / x1849$pop1851
x1854$limdn <<- 10000 * qnbinom(.025,size=theta,mu=x1854$predcount) / x1849$pop1851
x1854$limup <<- 10000 * qnbinom(.975,size=theta,mu=x1854$predcount) / x1849$pop1851
x1849$limdnact <<- 10000 * qnbinom(.025,size=theta,mu=x1849$deaths) / x1849$pop1851
x1849$limupact <<- 10000 * qnbinom(.975,size=theta,mu=x1849$deaths) / x1849$pop1851
}
# Now adjust the 1854 predicted counts (and rates) for the time and treatment fixed effects -
# effectively netting out the 1854 effect and making it 1849-equivalent
xyr1854 <- (coef(fittedmodel)["year1854"])
x3 <- exp(-xyr1854)
# Adjust for the year effect only - this should make 1849 & 1854 comparable net of time effect
x1854$rateadjyr <<- 10000 * (x1854$deaths*x3) / x1854$pop1851
# Adjust the 1854 actual for the estimated Treatment Effect and the estimate Year Effect
if (single == "single") { # model with single treatment effect
xdegless <- (coef(fittedmodel)["supplierSouthwarkVauxhall_Lambeth:year1854"])
xdegmore <- xdegless # Make both treatment effects the same
x3 <- x3 * exp(-(x1854$lambethdegree == "less_Lambeth")*xdegless)
x3 <- x3 * exp(-(x1854$lambethdegree == "more_Lambeth")*xdegmore)
} else if (single == "two") { # model with two treatment effects
xdegless <- coef(fittedmodel)["lambethdegreeless_Lambeth:year1854"]
xdegmore <- coef(fittedmodel)["lambethdegreemore_Lambeth:year1854"]
x3 <- x3 * exp(-(x1854$lambethdegree == "less_Lambeth")*xdegless)
x3 <- x3 * exp(-(x1854$lambethdegree == "more_Lambeth")*xdegmore)
} else { # model with continuous treatment (population proportions)
xlambethperc54 <- coef(fittedmodel)["lambethperc54"]
x3 <- x3 * exp(-(x1854$lambethperc54 * xlambethperc54))
}
# Adjust for year & treatment effect - so that we can compare the actuals against each other
x1854$rateadj <<- 10000 * (x1854$deaths*x3) / x1854$pop1851
return(xfamily)
}
# "Worker" function to plot mean, predicted, and error bars
plot2_worker <- function(yseq, xmean,xlimdn,xpred,xlimup,title) {
xplot <- plot(xmean, yseq,
xlim=range(c(xmean, xpred,xlimdn,xlimup)),
ylim=rev(range(yseq)), col="red",
main=title,xlab="Mortality rate actual (red filled) vs predicted (empty circle)",ylab="sub-district",
pch=19)
lines(xpred, yseq, type="p",
xlim=range(c(xmean, xpred,xlimdn,xlimup)),
ylim=rev(range(yseq)),
pch=1)
# horizontal error bars
xplot <- arrows(xlimdn, yseq, xlimup, yseq, length=0.05, angle=90, code=3,lty=3)
xplot
}
# "Cover" function that takes in the dataframe and unpacks
plot2 <- function(confidata,xsupplier,title) {
xmean <- subset(confidata,supplier==xsupplier)[,"rate"]
xlimdn <- subset(confidata,supplier==xsupplier)[,"limdn"]
xlimup <- subset(confidata,supplier==xsupplier)[,"limup"]
xpred <- subset(confidata,supplier==xsupplier)[,"predrate"]
yseq <- subset(confidata,supplier==xsupplier)[,"seq"]
# xtitle <- paste(xsupplier,title)
xplot <- plot2_worker(yseq,xmean,xlimdn,xpred,xlimup,title)
}
plot3 <- function(confidata1849,confidata1854,xsupplier,title) {
xmean <- subset(confidata1849,supplier==xsupplier)[,"rate"]
xlimdn <- subset(confidata1849,supplier==xsupplier)[,"limdn"]
xlimup <- subset(confidata1849,supplier==xsupplier)[,"limup"]
xpred <- subset(confidata1849,supplier==xsupplier)[,"predrate"]
y <- subset(confidata1849,supplier==xsupplier)[,"seq"]
x1854adj <- subset(confidata1854,supplier==xsupplier)[,"rateadj"]
xplot <- plot(xmean, y,
xlim=range(c(xmean, xpred,xlimdn,xlimup,x1854adj)),
ylim=rev(range(y)), col="red",
main=title,xlab="Mortality: 1849 red circle, 1854 blue diamond vs predicted (circle)",ylab="sub-district",
pch=19)
lines(xpred, y, type="p",
xlim=range(c(xmean, xpred,xlimdn,xlimup)),
ylim=rev(range(y)),
pch=1)
lines(x1854adj, y, type="p",
xlim=range(c(xmean, xpred,xlimdn,xlimup)),
ylim=rev(range(y)), col="blue",
pch=17)
# horizontal error bars
xplot <- arrows(xlimdn, y, xlimup, y, length=0.05, angle=90, code=3,lty=3)
}
# Plotting joint region for 1849 & 1854 with error bars for each
plotcomp <- function(confidata1849,confidata1854,xsupplier,title) {
xmean <- subset(confidata1849,supplier==xsupplier)[,"rate"]
xlimdn <- subset(confidata1849,supplier==xsupplier)[,"limdnact"]
xlimup <- subset(confidata1849,supplier==xsupplier)[,"limupact"]
xpred <- subset(confidata1849,supplier==xsupplier)[,"predrate"]
y <- subset(confidata1849,supplier==xsupplier)[,"seq"]
xmean1854 <- subset(confidata1854,supplier==xsupplier)[,"rateadjyr"]
# xlimdn1854 <- subset(confidata1854,supplier==xsupplier)[,"limdn"]
# xlimup1854 <- subset(confidata1854,supplier==xsupplier)[,"limup"]
xplot <- plot(xmean, y,
xlim=range(c(xmean, xmean1854,xlimdn,xlimup)),
ylim=rev(range(y)), col="red",
main=title,xlab="Mortality: 1849 red circle, 1854 blue diamond",ylab="sub-district",
pch=19)
lines(xmean1854, y, type="p",
xlim=range(c(xmean, xmean1854,xlimdn,xlimup)),
ylim=rev(range(y)), col="blue",
pch=17)
# horizontal error bars
xplot <- arrows(xlimdn, y, xlimup, y, length=0.05, angle=90, code=3,,lty=3)
# xplot <- arrows(xlimdn1854, y, xlimup1854, y, length=0.05, angle=90, code=3)
}
ploterrbars <- function(fittedmodel,plotname,single = "single") { # This is not a good way to do this because I am
# changing globals from within the function (using <<-
# instead of <-)
xfamily <- preperrdata(fittedmodel,single) # this function modifies global data
pdf(paste("../paper/figures/errbar_",plotname,"a.pdf",sep=""))
plot2(x1849,"SouthwarkVauxhall",paste("First-12 Southwark-only ",xfamily," 1849 "))
dev.off()
pdf(paste("../paper/figures/errbar_",plotname,"b.pdf",sep=""))
plot2(x1849,"SouthwarkVauxhall_Lambeth",paste("Next-16 Jointly-Supplied ",xfamily," 1849 "))
dev.off()
pdf(paste("../paper/figures/errbar_",plotname,"c.pdf",sep=""))
plot3(x1849,x1854,"SouthwarkVauxhall",paste("First-12 Southwark-only ",xfamily," 1849vs1854 "))
dev.off()
pdf(paste("../paper/figures/errbar_",plotname,"d.pdf",sep=""))
plot3(x1849,x1854,"SouthwarkVauxhall_Lambeth",paste("Next-16 Jointly-Supplied ",xfamily," 1849vs1854 "))
dev.off()
if (xfamily != "poisson") { # Plot comparison for joint region only for Negative Binomial
pdf(paste("../paper/figures/errbar_",plotname,"e.pdf",sep=""))
plotcomp(x1849,x1854,"SouthwarkVauxhall",paste("First-12 Southwark-only ",xfamily," 1849vs1854 "))
dev.off()
pdf(paste("../paper/figures/errbar_",plotname,"f.pdf",sep=""))
plotcomp(x1849,x1854,"SouthwarkVauxhall_Lambeth",paste("Next-16 Jointly-Supplied ",xfamily," 1849vs1854 "))
dev.off()
}
}
|
# Copyright 2017 Province of British Columbia
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and limitations under the License.
# Simplification and aggregation for visualizations -----------------------
library(bcmaps)
library(rmapshaper)
library(feather)
library(dplyr)
library(sf)
source("fun.R")
dir.create("out", showWarnings = FALSE)
dir.create("out-shiny", showWarnings = FALSE)
## Create simplified versions of ecoregions for leaflet map
eco_leaflet_rds <- "out-shiny/ecoregions_t_leaflet.rds"
ecoregions_t_simp_leaflet <- tryCatch(readRDS(eco_leaflet_rds), error = function(e) {
eco_t_simp_leaflet <- ms_simplify(ecoregions_t[,c("CRGNCD", "CRGNNM")], 0.001) %>%
fix_geo_problems() %>%
st_set_crs(3005) %>%
st_transform(4326) %>%
mutate(CRGNNM = tools::toTitleCase(tolower(as.character(CRGNNM)))) %>%
group_by(CRGNCD, CRGNNM) %>%
summarise()
saveRDS(as(st_cast(eco_t_simp_leaflet), "Spatial"), eco_leaflet_rds)
eco_t_simp_leaflet
})
## Create simplified versions of ecoregions for visualization
ecoregions_t_simp_rds <- "tmp/ecoregions_t_simp.rds"
ecoregions_t_simp <- tryCatch(readRDS(ecoregions_t_simp_rds), error = function(e) {
eco_t_simp <- ms_simplify(ecoregions_t, 0.01) %>%
fix_geo_problems()
saveRDS(eco_t_simp, ecoregions_t_simp_rds)
eco_t_simp
})
gg_ecoreg <- gg_fortify(as(ecoregions_t_simp, "Spatial")) %>%
write_feather("out-shiny/gg_ecoreg.feather")
## Simplify BEC pologyons for use in display
bec_zone_rds <- "tmp/bec_zone.rds"
bec_zone <- tryCatch(readRDS(bec_zone_rds), error = function(e) {
bec_zone <- group_by(bec_t, ZONE) %>%
summarize() %>%
fix_geo_problems()
bec_zone$zone_area <- st_area(bec_zone)
saveRDS(bec_zone, bec_zone_rds)
bec_zone
})
bec_zone_simp_rds <- "tmp/bec_zone_simp.rds"
bec_zone_simp <- tryCatch(readRDS(bec_zone_simp_rds), error = function(e) {
bec_zone$zone_area <- as.numeric(bec_zone$zone_area) ## Of class units, need as numeric
bec_zone_simp <- bec_zone %>%
ms_simplify(keep = 0.005) %>%
fix_geo_problems()
saveRDS(bec_zone_simp, bec_zone_simp_rds)
bec_zone_simp
})
gg_bec <- as(bec_zone_simp, "Spatial") %>%
gg_fortify() %>%
write_feather("out-shiny/gg_bec.feather")
## Further simplification for BEC leaflet map
bec_zone_leaflet_rds <- "out-shiny/bec_leaflet.rds"
bec_zone_leaflet <- tryCatch(readRDS(bec_zone_leaflet_rds), error = function(e) {
bec_zone_leaflet <- bec_zone_simp %>%
ms_simplify(0.1) %>%
fix_geo_problems() %>%
st_transform(4326)
bec_zone_leaflet$ZONE <- as.character(bec_zone_leaflet$ZONE)
saveRDS(as(bec_zone_leaflet, "Spatial"), bec_zone_leaflet_rds)
bec_zone_leaflet
})
## Simplify ld a bit more for shiny plotting
ld_simp_more <- ms_simplify(ld_simp, keep = 0.05, explode = TRUE, keep_shapes = TRUE) %>%
fix_geo_problems() %>%
group_by(category) %>%
summarise()
## Intersect simplified ld with simplified bec to get viz object:
ld_bec_simp <- st_intersection(bec_zone_simp, ld_simp_more) %>%
st_collectionextract("POLYGON") %>%
select(ZONE, category) %>%
fix_geo_problems()
gg_ld_bec <- as(ld_bec_simp, "Spatial") %>%
gg_fortify() %>%
write_feather("out-shiny/gg_ld_bec.feather")
## Intersect simplified ld with simplified ecoregions to get viz object:
ld_ecoreg_simp <- st_intersection(ecoregions_t_simp, ld_simp_more) %>%
st_collectionextract("POLYGON") %>%
select(CRGNCD, category) %>%
fix_geo_problems()
gg_ld_ecoreg <- as(ld_ecoreg_simp, "Spatial") %>%
fix_geo_problems() %>%
gg_fortify() %>%
write_feather("out-shiny/gg_ld_ecoreg.feather")
# Copy gg objects from tmp needed for shiny app
file.copy(from = file.path("tmp", c("gg_ld_simp.feather", "gg_bc_bound.feather")),
to = "out-shiny")
# Copy all objects needed for shiny app to shiny app project folder
files_list <- list.files("out-shiny", pattern = "\\.feather$|\\.rds$", full.names = TRUE)
file.copy(from = files_list, to = "../land-designations-shinyapp/app/data", overwrite = TRUE)
| /04_output_shiny.R | permissive | bcgov/land-designations-indicator | R | false | false | 4,438 | r | # Copyright 2017 Province of British Columbia
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and limitations under the License.
# Simplification and aggregation for visualizations -----------------------
library(bcmaps)
library(rmapshaper)
library(feather)
library(dplyr)
library(sf)
source("fun.R")
dir.create("out", showWarnings = FALSE)
dir.create("out-shiny", showWarnings = FALSE)
## Create simplified versions of ecoregions for leaflet map
eco_leaflet_rds <- "out-shiny/ecoregions_t_leaflet.rds"
ecoregions_t_simp_leaflet <- tryCatch(readRDS(eco_leaflet_rds), error = function(e) {
eco_t_simp_leaflet <- ms_simplify(ecoregions_t[,c("CRGNCD", "CRGNNM")], 0.001) %>%
fix_geo_problems() %>%
st_set_crs(3005) %>%
st_transform(4326) %>%
mutate(CRGNNM = tools::toTitleCase(tolower(as.character(CRGNNM)))) %>%
group_by(CRGNCD, CRGNNM) %>%
summarise()
saveRDS(as(st_cast(eco_t_simp_leaflet), "Spatial"), eco_leaflet_rds)
eco_t_simp_leaflet
})
## Create simplified versions of ecoregions for visualization
ecoregions_t_simp_rds <- "tmp/ecoregions_t_simp.rds"
ecoregions_t_simp <- tryCatch(readRDS(ecoregions_t_simp_rds), error = function(e) {
eco_t_simp <- ms_simplify(ecoregions_t, 0.01) %>%
fix_geo_problems()
saveRDS(eco_t_simp, ecoregions_t_simp_rds)
eco_t_simp
})
gg_ecoreg <- gg_fortify(as(ecoregions_t_simp, "Spatial")) %>%
write_feather("out-shiny/gg_ecoreg.feather")
## Simplify BEC pologyons for use in display
bec_zone_rds <- "tmp/bec_zone.rds"
bec_zone <- tryCatch(readRDS(bec_zone_rds), error = function(e) {
bec_zone <- group_by(bec_t, ZONE) %>%
summarize() %>%
fix_geo_problems()
bec_zone$zone_area <- st_area(bec_zone)
saveRDS(bec_zone, bec_zone_rds)
bec_zone
})
bec_zone_simp_rds <- "tmp/bec_zone_simp.rds"
bec_zone_simp <- tryCatch(readRDS(bec_zone_simp_rds), error = function(e) {
bec_zone$zone_area <- as.numeric(bec_zone$zone_area) ## Of class units, need as numeric
bec_zone_simp <- bec_zone %>%
ms_simplify(keep = 0.005) %>%
fix_geo_problems()
saveRDS(bec_zone_simp, bec_zone_simp_rds)
bec_zone_simp
})
gg_bec <- as(bec_zone_simp, "Spatial") %>%
gg_fortify() %>%
write_feather("out-shiny/gg_bec.feather")
## Further simplification for BEC leaflet map
bec_zone_leaflet_rds <- "out-shiny/bec_leaflet.rds"
bec_zone_leaflet <- tryCatch(readRDS(bec_zone_leaflet_rds), error = function(e) {
bec_zone_leaflet <- bec_zone_simp %>%
ms_simplify(0.1) %>%
fix_geo_problems() %>%
st_transform(4326)
bec_zone_leaflet$ZONE <- as.character(bec_zone_leaflet$ZONE)
saveRDS(as(bec_zone_leaflet, "Spatial"), bec_zone_leaflet_rds)
bec_zone_leaflet
})
## Simplify ld a bit more for shiny plotting
ld_simp_more <- ms_simplify(ld_simp, keep = 0.05, explode = TRUE, keep_shapes = TRUE) %>%
fix_geo_problems() %>%
group_by(category) %>%
summarise()
## Intersect simplified ld with simplified bec to get viz object:
ld_bec_simp <- st_intersection(bec_zone_simp, ld_simp_more) %>%
st_collectionextract("POLYGON") %>%
select(ZONE, category) %>%
fix_geo_problems()
gg_ld_bec <- as(ld_bec_simp, "Spatial") %>%
gg_fortify() %>%
write_feather("out-shiny/gg_ld_bec.feather")
## Intersect simplified ld with simplified ecoregions to get viz object:
ld_ecoreg_simp <- st_intersection(ecoregions_t_simp, ld_simp_more) %>%
st_collectionextract("POLYGON") %>%
select(CRGNCD, category) %>%
fix_geo_problems()
gg_ld_ecoreg <- as(ld_ecoreg_simp, "Spatial") %>%
fix_geo_problems() %>%
gg_fortify() %>%
write_feather("out-shiny/gg_ld_ecoreg.feather")
# Copy gg objects from tmp needed for shiny app
file.copy(from = file.path("tmp", c("gg_ld_simp.feather", "gg_bc_bound.feather")),
to = "out-shiny")
# Copy all objects needed for shiny app to shiny app project folder
files_list <- list.files("out-shiny", pattern = "\\.feather$|\\.rds$", full.names = TRUE)
file.copy(from = files_list, to = "../land-designations-shinyapp/app/data", overwrite = TRUE)
|
# Original data
left <- LETTERS[1:5]
right <- letters[1:5]
data <- cbind(left, right)
data <- data.frame(data, stringsAsFactors=FALSE)
write.table(left, "left.txt", col.names=FALSE, row.names=FALSE, quote=FALSE)
write.table(right, "right.txt", col.names=FALSE, row.names=FALSE, quote=FALSE)
# Loaded data
left2 <- read.table("left.txt", stringsAsFactors=FALSE)[,1]
right2 <- read.table("right.txt", stringsAsFactors=FALSE)[,1]
data2 <- data.frame(left=left2, right=right2)
# Should be TRUE
identical(data, data2) | /src/debug5.R | no_license | kokitsuyuzaki/preprocess-exercise | R | false | false | 515 | r | # Original data
left <- LETTERS[1:5]
right <- letters[1:5]
data <- cbind(left, right)
data <- data.frame(data, stringsAsFactors=FALSE)
write.table(left, "left.txt", col.names=FALSE, row.names=FALSE, quote=FALSE)
write.table(right, "right.txt", col.names=FALSE, row.names=FALSE, quote=FALSE)
# Loaded data
left2 <- read.table("left.txt", stringsAsFactors=FALSE)[,1]
right2 <- read.table("right.txt", stringsAsFactors=FALSE)[,1]
data2 <- data.frame(left=left2, right=right2)
# Should be TRUE
identical(data, data2) |
expected_values[[runno]] <- list(lik = c(-13224.64, 26471.28, 26534.33), param = c(1.3603,
4.2322, 1.3738, 3.8828, -0.082017, 0.19416), stdev_param = c(0.27956,
0.30694, 0.31952, 0.32791, 0.35965, NA), sigma = c(prop.err = 0.19416),
parFixedDf = structure(list(Estimate = c(lCl = 1.36033126394913,
lVc = 4.23215119131238, lQ = 1.37381586993406, lVp = 3.8827597757202,
lKA = -0.082017108389879, prop.err = 0.19415985496711), SE = c(lCl = 0.0285675283820851,
lVc = 0.041588730887297, lQ = 0.123258794248241, lVp = 0.0573002474574704,
lKA = 0.063924288407078, prop.err = NA), "%RSE" = c(2.10004203675741,
0.982685376946574, 8.97200250381127, 1.47576081878107, 77.9401879217756,
NA), "Back-transformed" = c(3.89748418397449, 68.8652151995417,
3.95039616939885, 48.5580396223039, 0.921256197351321, 0.19415985496711
), "CI Lower" = c(3.68525579044501, 63.4745333982472, 3.10257704947502,
43.3997358376751, 0.812770569656718, NA), "CI Upper" = c(4.12193449467372,
74.7137097444208, 5.0298927782766, 54.329436952802, 1.04422208781093,
NA), "BSV(CV%)" = c(31.4310041157679, 28.5112031118897, 33.692553031394,
32.7851395712572, 37.1600635128661, NA), "Shrink(SD)%" = c(0.703069992782979,
15.6656178045198, 62.3450408484174, 33.4881728866632, 29.0679457558813,
NA)), class = "data.frame", row.names = c("lCl", "lVc", "lQ",
"lVp", "lKA", "prop.err")), omega = structure(c(0.0781537280177815,
0, 0, 0, 0, 0, 0.0942103042288283, 0, 0, 0, 0, 0, 0.10209306726118,
0, 0, 0, 0, 0, 0.107525102927145, 0, 0, 0, 0, 0, 0.129348810836102
), .Dim = c(5L, 5L), .Dimnames = list(c("eta.Vc", "eta.Cl",
"eta.Vp", "eta.Q", "eta.KA"), c("eta.Vc", "eta.Cl", "eta.Vp",
"eta.Q", "eta.KA"))), time = structure(list(saem = 28.074,
setup = 0.316125999999988, table = 0.615999999999985,
cwres = 1.16999999999996, covariance = 0.0450000000000728,
other = 0.454874000000082), class = "data.frame", row.names = "elapsed"),
objDf = structure(list(OBJF = 22258.9179787061, AIC = 26471.2776901194,
BIC = 26534.3289280608, "Log-likelihood" = -13224.6388450597,
"Condition Number" = 24.1902633908432), row.names = "FOCEi", class = "data.frame"))
| /inst/models/values-1.1.1.3-U062_solve_saem-unix.R | no_license | nlmixrdevelopment/nlmixr.examples | R | false | false | 2,258 | r | expected_values[[runno]] <- list(lik = c(-13224.64, 26471.28, 26534.33), param = c(1.3603,
4.2322, 1.3738, 3.8828, -0.082017, 0.19416), stdev_param = c(0.27956,
0.30694, 0.31952, 0.32791, 0.35965, NA), sigma = c(prop.err = 0.19416),
parFixedDf = structure(list(Estimate = c(lCl = 1.36033126394913,
lVc = 4.23215119131238, lQ = 1.37381586993406, lVp = 3.8827597757202,
lKA = -0.082017108389879, prop.err = 0.19415985496711), SE = c(lCl = 0.0285675283820851,
lVc = 0.041588730887297, lQ = 0.123258794248241, lVp = 0.0573002474574704,
lKA = 0.063924288407078, prop.err = NA), "%RSE" = c(2.10004203675741,
0.982685376946574, 8.97200250381127, 1.47576081878107, 77.9401879217756,
NA), "Back-transformed" = c(3.89748418397449, 68.8652151995417,
3.95039616939885, 48.5580396223039, 0.921256197351321, 0.19415985496711
), "CI Lower" = c(3.68525579044501, 63.4745333982472, 3.10257704947502,
43.3997358376751, 0.812770569656718, NA), "CI Upper" = c(4.12193449467372,
74.7137097444208, 5.0298927782766, 54.329436952802, 1.04422208781093,
NA), "BSV(CV%)" = c(31.4310041157679, 28.5112031118897, 33.692553031394,
32.7851395712572, 37.1600635128661, NA), "Shrink(SD)%" = c(0.703069992782979,
15.6656178045198, 62.3450408484174, 33.4881728866632, 29.0679457558813,
NA)), class = "data.frame", row.names = c("lCl", "lVc", "lQ",
"lVp", "lKA", "prop.err")), omega = structure(c(0.0781537280177815,
0, 0, 0, 0, 0, 0.0942103042288283, 0, 0, 0, 0, 0, 0.10209306726118,
0, 0, 0, 0, 0, 0.107525102927145, 0, 0, 0, 0, 0, 0.129348810836102
), .Dim = c(5L, 5L), .Dimnames = list(c("eta.Vc", "eta.Cl",
"eta.Vp", "eta.Q", "eta.KA"), c("eta.Vc", "eta.Cl", "eta.Vp",
"eta.Q", "eta.KA"))), time = structure(list(saem = 28.074,
setup = 0.316125999999988, table = 0.615999999999985,
cwres = 1.16999999999996, covariance = 0.0450000000000728,
other = 0.454874000000082), class = "data.frame", row.names = "elapsed"),
objDf = structure(list(OBJF = 22258.9179787061, AIC = 26471.2776901194,
BIC = 26534.3289280608, "Log-likelihood" = -13224.6388450597,
"Condition Number" = 24.1902633908432), row.names = "FOCEi", class = "data.frame"))
|
library(shiny)
library(plotly)
load("currencies.rda")
shinyUI(fluidPage(
titlePanel("Ceny kryptowalut cały czas rosną"),
sidebarLayout(
sidebarPanel(
selectInput("cryptocurrency",
label = "Wybierz kryptowaluty",
choices = levels(currencies$Currency),
selected = "BitCoin",
multiple = TRUE),
checkboxInput("trendLine",
"Czy zaznaczyć linię trendu?",
value = TRUE),
selectInput("weeks",
label = "Z ilu ostatnich tygodni pokazać dane?",
choices = c("1 tydzień", "2 tygodnie", "3 tygodnie", "4 tygodnie"),
selected = "4 tygodnie"
),
selectInput("metric",
label="Wybierz metrykę",
choices=c("Najniższa cena", "Najwyższa cena", "Cena otwarcia", "Cena zamknięcia"),
selected="Najniższa cena"),
p("Dane na 06.12.2017r
")
),
mainPanel(
tabsetPanel(
tabPanel("Wykres",
plotOutput("currencyPlot")),
tabPanel("Interaktywny",
plotlyOutput("currencyPlotly")),
tabPanel("Szczegóły",
dataTableOutput("details")
)
)
)
)
)) | /PraceDomowe/PD_08/PiotrKrzeszewski/ui.R | no_license | vaidasmo/TechnikiWizualizacjiDanych2017 | R | false | false | 1,320 | r | library(shiny)
library(plotly)
load("currencies.rda")
shinyUI(fluidPage(
titlePanel("Ceny kryptowalut cały czas rosną"),
sidebarLayout(
sidebarPanel(
selectInput("cryptocurrency",
label = "Wybierz kryptowaluty",
choices = levels(currencies$Currency),
selected = "BitCoin",
multiple = TRUE),
checkboxInput("trendLine",
"Czy zaznaczyć linię trendu?",
value = TRUE),
selectInput("weeks",
label = "Z ilu ostatnich tygodni pokazać dane?",
choices = c("1 tydzień", "2 tygodnie", "3 tygodnie", "4 tygodnie"),
selected = "4 tygodnie"
),
selectInput("metric",
label="Wybierz metrykę",
choices=c("Najniższa cena", "Najwyższa cena", "Cena otwarcia", "Cena zamknięcia"),
selected="Najniższa cena"),
p("Dane na 06.12.2017r
")
),
mainPanel(
tabsetPanel(
tabPanel("Wykres",
plotOutput("currencyPlot")),
tabPanel("Interaktywny",
plotlyOutput("currencyPlotly")),
tabPanel("Szczegóły",
dataTableOutput("details")
)
)
)
)
)) |
\name{mcga-package}
\alias{mcga-package}
\docType{package}
\title{
Machine Coded Genetic Algorithms for Real-valued Optimization Problems
}
\description{
Machine coded genetic algorithm (MCGA) is a fast tool for real-valued optimization problems. It uses the byte representation of variables rather than real-values. It performs the classical crossover operations (uniform) on these byte representations. Mutation operator is also similar to classical mutation operator, which is to say, it changes a randomly selected byte value of a chromosome by +1 or -1 with probability 1/2. In MCGAs there is no need for encoding-decoding process and the classical operators are directly applicable on real-values. It is fast and can handle a wide range of a search space with high precision. Using a 256-unary alphabet is the main disadvantage of this algorithm but a moderate size population is convenient for many problems.
}
\author{
Mehmet Hakan Satman
Maintainer: Mehmet Hakan Satman <mhsatman@istanbul.edu.tr>
}
\examples{
\dontrun{
# A sample optimization problem
# Min f(xi) = (x1-7)^2 + (x2-77)^2 + (x3-777)^2 + (x4-7777)^2 + (x5-77777)^2
# The range of xi is unknown. The solution is
# x1 = 7
# x2 = 77
# x3 = 777
# x4 = 7777
# x5 = 77777
# Min f(xi) = 0
require("mcga")
f<-function(x){
return ((x[1]-7)^2 + (x[2]-77)^2 +(x[3]-777)^2 +(x[4]-7777)^2 +(x[5]-77777)^2)
}
m <- mcga( popsize=200,
chsize=5,
minval=0.0,
maxval=999999999.9,
maxiter=2500,
crossprob=1.0,
mutateprob=0.01,
evalFunc=f)
cat("Best chromosome:\n")
print(m$population[1,])
cat("Cost: ",m$costs[1],"\n")
}
}
| /man/mcga-package.Rd | no_license | cran/mcga | R | false | false | 1,621 | rd | \name{mcga-package}
\alias{mcga-package}
\docType{package}
\title{
Machine Coded Genetic Algorithms for Real-valued Optimization Problems
}
\description{
Machine coded genetic algorithm (MCGA) is a fast tool for real-valued optimization problems. It uses the byte representation of variables rather than real-values. It performs the classical crossover operations (uniform) on these byte representations. Mutation operator is also similar to classical mutation operator, which is to say, it changes a randomly selected byte value of a chromosome by +1 or -1 with probability 1/2. In MCGAs there is no need for encoding-decoding process and the classical operators are directly applicable on real-values. It is fast and can handle a wide range of a search space with high precision. Using a 256-unary alphabet is the main disadvantage of this algorithm but a moderate size population is convenient for many problems.
}
\author{
Mehmet Hakan Satman
Maintainer: Mehmet Hakan Satman <mhsatman@istanbul.edu.tr>
}
\examples{
\dontrun{
# A sample optimization problem
# Min f(xi) = (x1-7)^2 + (x2-77)^2 + (x3-777)^2 + (x4-7777)^2 + (x5-77777)^2
# The range of xi is unknown. The solution is
# x1 = 7
# x2 = 77
# x3 = 777
# x4 = 7777
# x5 = 77777
# Min f(xi) = 0
require("mcga")
f<-function(x){
return ((x[1]-7)^2 + (x[2]-77)^2 +(x[3]-777)^2 +(x[4]-7777)^2 +(x[5]-77777)^2)
}
m <- mcga( popsize=200,
chsize=5,
minval=0.0,
maxval=999999999.9,
maxiter=2500,
crossprob=1.0,
mutateprob=0.01,
evalFunc=f)
cat("Best chromosome:\n")
print(m$population[1,])
cat("Cost: ",m$costs[1],"\n")
}
}
|
library(shinytest2)
test_that("Migrated shinytest test: mytest.R", {
app <- AppDriver$new(variant = shinycoreci::platform_rversion(),
seed = 100, shiny_args = list(display.mode = "normal"))
app$set_inputs(`reactlog_module-refresh` = "click")
Sys.sleep(4)
app$expect_values()
app$expect_screenshot()
app$set_inputs(bins = 8)
app$set_inputs(`reactlog_module-refresh` = "click")
Sys.sleep(4)
app$expect_values()
app$expect_screenshot()
app$set_inputs(bins = 5)
app$set_inputs(bins = 22)
app$set_inputs(`reactlog_module-refresh` = "click")
Sys.sleep(4)
app$expect_values()
app$expect_screenshot()
})
| /apps/192-reactlog-hello/tests/testthat/test-mytest.R | permissive | rstudio/shinycoreci-apps | R | false | false | 636 | r | library(shinytest2)
test_that("Migrated shinytest test: mytest.R", {
app <- AppDriver$new(variant = shinycoreci::platform_rversion(),
seed = 100, shiny_args = list(display.mode = "normal"))
app$set_inputs(`reactlog_module-refresh` = "click")
Sys.sleep(4)
app$expect_values()
app$expect_screenshot()
app$set_inputs(bins = 8)
app$set_inputs(`reactlog_module-refresh` = "click")
Sys.sleep(4)
app$expect_values()
app$expect_screenshot()
app$set_inputs(bins = 5)
app$set_inputs(bins = 22)
app$set_inputs(`reactlog_module-refresh` = "click")
Sys.sleep(4)
app$expect_values()
app$expect_screenshot()
})
|
#! /usr/bin/R
#setwd("/home/schimar/ui/ta/ta_dna_snakemake_pbs/vars/taSubInDel/stats/gemma/output/")
## Load hyperparameters
## ========================================================================
argv <- commandArgs()
infile <- argv[6]
hyp <- read.table(infile, header=T)
## ========================================================================
# from http://romainvilloutreix.alwaysdata.net/romainvilloutreix/wp-content/uploads/2017/01/gwas_gemma-2017-01-17.pdf
#path <- dirname(infile)
mainDir <- dirname(infile)
subDir <- unlist(strsplit(basename(infile), '.', fixed= T))[1]
#substr(basename(infile), 1, nchar(basename(infile))-4)
outDir <- file.path(mainDir, subDir)
if (!dir.exists(outDir)){
dir.create(outDir)
} else {
print(paste0(outDir, " already exists"))
}
outfile1 <- paste0(file.path(outDir, subDir), '.hyp.pve.pge.tiff')
# plot traces and distributions of hyperparameters
# ==============================================================================
tiff(file=outfile1, compression= 'zip+p', width = 800, height = 800)#, units = "px", res = 800, type= 'Xlib')
layout(matrix(c(1,1,2,3,4,4,5,6), 4, 2, byrow = TRUE))
# PVE
# ------------------------------------------------------------------------------
plot(hyp$pve, type="l", ylab="PVE", main="PVE - trace")
hist(hyp$pve, main="PVE - posterior distribution", xlab="PVE")
plot(density(hyp$pve), main="PVE - posterior distribution", xlab="PVE")
# ------------------------------------------------------------------------------
# PGE
# ------------------------------------------------------------------------------
plot(hyp$pge, type="l", ylab="PGE", main="PGE - trace")
hist(hyp$pge, main="PGE - posterior distribution", xlab="PGE")
plot(density(hyp$pge), main="PGE - posterior distribution", xlab="PGE")
# ------------------------------------------------------------------------------
dev.off()
outfile2 <- paste0(file.path(outDir, subDir), '.hyp.pi.n_gamma.tiff')
# plot traces and distributions of hyperparameters
# ==============================================================================
tiff(file=outfile2, compression= 'zip+p', width = 800, height = 800)#, units = "px", res = 800, type= 'Xlib')
layout(matrix(c(1,1,2,3,4,4,5,6), 4, 2, byrow = TRUE))
# pi
# ------------------------------------------------------------------------------
plot(hyp$pi, type="l", ylab="pi", main="pi")
hist(hyp$pi, main="pi", xlab="pi")
plot(density(hyp$pi), main="pi", xlab="pi")
# ------------------------------------------------------------------------------
# No gamma
# ------------------------------------------------------------------------------
plot(hyp$n_gamma, type="l", ylab="n_gamma", main="n_gamma - trace")
hist(hyp$n_gamma, main="n_gamma - posterior distribution", xlab="n_gamma")
plot(density(hyp$pi), main="n_gamma - posterior distribution", xlab="n_gamma")
# ------------------------------------------------------------------------------
dev.off()
# ==============================================================================
# get table of hyperparameters
# Get mean, median, and 95% ETPI of hyperparameters
# ========================================================================
# h-> approximation to proportion of phenotypic variance
# explained by variants (PVE)
h <- c("h",mean(hyp$h),quantile(hyp$h, probs=c(0.5,0.025,0.975)))
# pve -> PVE
pve <- c("PVE", mean(hyp$pve),quantile(hyp$pve,
probs=c(0.5,0.025,0.975)))
# rho-> approximation to proportion of genetic variance explained by variants
# with major effect (PGE)
# rho=0 -> pure LMM, highly polygenic basis
# rho=1 -> pure BVSR, few major effect loci
rho <- c("rho",mean(hyp$rho),quantile(hyp$rho, probs=c(0.5,0.025,0.975)))
# pge -> PGE
pge <- c("PGE",mean(hyp$pge),quantile(hyp$pge, probs=c(0.5,0.025,0.975)))
# pi -> proportion of variants with non-zero effects
pi <- c("pi",mean(hyp$pi),quantile(hyp$pi, probs=c(0.5,0.025,0.975)))
# n.gamma -> number of variants with major effect
n.gamma <- c("n.gamma",mean(hyp$n_gamma),quantile(hyp$n_gamma,
probs=c(0.5,0.025,0.975)))
# get table of hyperparameters and save it to a file
# ==============================================================================
hyp.table <- as.data.frame(rbind(h,pve,rho,pge,pi,n.gamma),row.names=F)
colnames(hyp.table) <- c("hyperparam", "mean","median","2.5%", "97.5%")
# show table
hyp.table
# write table to file
write.table(hyp.table, file= file.path(outDir, "hyperparameters.dsv"), sep="\t", quote=F)
| /script/bslmm_hyp.r | no_license | tholtzem/ta_dna_snakemake_pbs | R | false | false | 4,468 | r | #! /usr/bin/R
#setwd("/home/schimar/ui/ta/ta_dna_snakemake_pbs/vars/taSubInDel/stats/gemma/output/")
## Load hyperparameters
## ========================================================================
argv <- commandArgs()
infile <- argv[6]
hyp <- read.table(infile, header=T)
## ========================================================================
# from http://romainvilloutreix.alwaysdata.net/romainvilloutreix/wp-content/uploads/2017/01/gwas_gemma-2017-01-17.pdf
#path <- dirname(infile)
mainDir <- dirname(infile)
subDir <- unlist(strsplit(basename(infile), '.', fixed= T))[1]
#substr(basename(infile), 1, nchar(basename(infile))-4)
outDir <- file.path(mainDir, subDir)
if (!dir.exists(outDir)){
dir.create(outDir)
} else {
print(paste0(outDir, " already exists"))
}
outfile1 <- paste0(file.path(outDir, subDir), '.hyp.pve.pge.tiff')
# plot traces and distributions of hyperparameters
# ==============================================================================
tiff(file=outfile1, compression= 'zip+p', width = 800, height = 800)#, units = "px", res = 800, type= 'Xlib')
layout(matrix(c(1,1,2,3,4,4,5,6), 4, 2, byrow = TRUE))
# PVE
# ------------------------------------------------------------------------------
plot(hyp$pve, type="l", ylab="PVE", main="PVE - trace")
hist(hyp$pve, main="PVE - posterior distribution", xlab="PVE")
plot(density(hyp$pve), main="PVE - posterior distribution", xlab="PVE")
# ------------------------------------------------------------------------------
# PGE
# ------------------------------------------------------------------------------
plot(hyp$pge, type="l", ylab="PGE", main="PGE - trace")
hist(hyp$pge, main="PGE - posterior distribution", xlab="PGE")
plot(density(hyp$pge), main="PGE - posterior distribution", xlab="PGE")
# ------------------------------------------------------------------------------
dev.off()
outfile2 <- paste0(file.path(outDir, subDir), '.hyp.pi.n_gamma.tiff')
# plot traces and distributions of hyperparameters
# ==============================================================================
tiff(file=outfile2, compression= 'zip+p', width = 800, height = 800)#, units = "px", res = 800, type= 'Xlib')
layout(matrix(c(1,1,2,3,4,4,5,6), 4, 2, byrow = TRUE))
# pi
# ------------------------------------------------------------------------------
plot(hyp$pi, type="l", ylab="pi", main="pi")
hist(hyp$pi, main="pi", xlab="pi")
plot(density(hyp$pi), main="pi", xlab="pi")
# ------------------------------------------------------------------------------
# No gamma
# ------------------------------------------------------------------------------
plot(hyp$n_gamma, type="l", ylab="n_gamma", main="n_gamma - trace")
hist(hyp$n_gamma, main="n_gamma - posterior distribution", xlab="n_gamma")
plot(density(hyp$pi), main="n_gamma - posterior distribution", xlab="n_gamma")
# ------------------------------------------------------------------------------
dev.off()
# ==============================================================================
# get table of hyperparameters
# Get mean, median, and 95% ETPI of hyperparameters
# ========================================================================
# h-> approximation to proportion of phenotypic variance
# explained by variants (PVE)
h <- c("h",mean(hyp$h),quantile(hyp$h, probs=c(0.5,0.025,0.975)))
# pve -> PVE
pve <- c("PVE", mean(hyp$pve),quantile(hyp$pve,
probs=c(0.5,0.025,0.975)))
# rho-> approximation to proportion of genetic variance explained by variants
# with major effect (PGE)
# rho=0 -> pure LMM, highly polygenic basis
# rho=1 -> pure BVSR, few major effect loci
rho <- c("rho",mean(hyp$rho),quantile(hyp$rho, probs=c(0.5,0.025,0.975)))
# pge -> PGE
pge <- c("PGE",mean(hyp$pge),quantile(hyp$pge, probs=c(0.5,0.025,0.975)))
# pi -> proportion of variants with non-zero effects
pi <- c("pi",mean(hyp$pi),quantile(hyp$pi, probs=c(0.5,0.025,0.975)))
# n.gamma -> number of variants with major effect
n.gamma <- c("n.gamma",mean(hyp$n_gamma),quantile(hyp$n_gamma,
probs=c(0.5,0.025,0.975)))
# get table of hyperparameters and save it to a file
# ==============================================================================
hyp.table <- as.data.frame(rbind(h,pve,rho,pge,pi,n.gamma),row.names=F)
colnames(hyp.table) <- c("hyperparam", "mean","median","2.5%", "97.5%")
# show table
hyp.table
# write table to file
write.table(hyp.table, file= file.path(outDir, "hyperparameters.dsv"), sep="\t", quote=F)
|
library(cobalt)
### Name: set.cobalt.options
### Title: Set options in cobalt
### Aliases: set.cobalt.options get.cobalt.options
### ** Examples
# Set un to be TRUE to always display unadjusted
# balance measures and set binary to "std" to
# produce standardized mean differences for
# binary variables.
set.cobalt.options(un = TRUE, binary = "std")
# Note: the above is equivalent to:
# options(cobalt_un = TRUE, cobalt_binary = "std")
# but performs some additional checks
get.cobalt.options("un", "binary")
# Note: the above is equivalent to:
# getOption("cobalt_un")
# getOption("cobalt_binary")
# Return all cobalt options to their defaults
set.cobalt.options(default = TRUE)
# View all available options
get.cobalt.options()
| /data/genthat_extracted_code/cobalt/examples/set.cobalt.options.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 748 | r | library(cobalt)
### Name: set.cobalt.options
### Title: Set options in cobalt
### Aliases: set.cobalt.options get.cobalt.options
### ** Examples
# Set un to be TRUE to always display unadjusted
# balance measures and set binary to "std" to
# produce standardized mean differences for
# binary variables.
set.cobalt.options(un = TRUE, binary = "std")
# Note: the above is equivalent to:
# options(cobalt_un = TRUE, cobalt_binary = "std")
# but performs some additional checks
get.cobalt.options("un", "binary")
# Note: the above is equivalent to:
# getOption("cobalt_un")
# getOption("cobalt_binary")
# Return all cobalt options to their defaults
set.cobalt.options(default = TRUE)
# View all available options
get.cobalt.options()
|
library(tidyverse)
library(lubridate)
### LOADING AND CLEANING DATA
landings_data <- read_csv("_data/sample_landings_data_raw.csv")
landings_data
#start with the landings_data data frame
landings_data <- landings_data %>%
#rename the columns
rename(Year = yy,
Date = dat,
Trip_ID = trip,
Effort_Hours = effort,
Gear = gr,
Species = sp,
Length_cm = l_cm,
Weight_g = w_cm) %>%
#turn the date column into a date format that R recognizes
mutate(Date = mdy(Date))
landings_data
#checking for missing values
landings_data[!complete.cases(landings_data),]
#removing observations with missing values
landings_data <- na.omit(landings_data)
landings_data
#checking for typos
unique(landings_data$Gear)
#fixing this by changing all to lowercase letters
landings_data <- landings_data %>%
mutate(Gear = tolower(Gear))
unique(landings_data$Gear)
#checking another variable
unique(landings_data$Species)
#checking how many times each of the 2 species spellings occurs
landings_data %>%
filter(Species == "Caesoi cunning") %>%
nrow()
landings_data %>%
filter(Species == "Caesio cuning") %>%
nrow()
#replacing misspelled values
landings_data <- landings_data %>%
mutate(Species = replace(Species,Species == "Caesoi cunning", "Caesio cuning"))
unique(landings_data$Species)
#looking at the range and distribution of a numeric variable
summary(landings_data$Length_cm)
#visualising data to idenitfy errors
plot(landings_data$Length_cm)
#removing error
landings_data <- landings_data %>%
filter(Length_cm < 100)
plot(landings_data$Length_cm)
#try with Weight_g and Effort_Hours
#saving this dataset using a new name so we have a copy of raw and clean data
write_csv(landings_data,"_data/sample_landings_data_clean.csv")
### BASIC FISHERIES STATITSICS
#start with the landings data frame
annual_landings <- landings_data %>%
#adding column for kilograms by dividing gram column by 1000
mutate(Weight_kg = Weight_g / 1000) %>%
#grouping data by year
group_by(Year) %>%
#summarising the total annual landings per year
summarize(Annual_Landings_kg = sum(Weight_kg,na.rm=TRUE))
#displaying a table of the annual landings data
annual_landings
#na.rm = TRUE tells R what to do with NA values in your data
#here we are remobing the values before summing Weight_kg
#many functions will return NA if any value is NA
#starting with the landings data frame
annual_gear_landings <- landings_data %>%
#adding column for kilograms by dividing gram column by 1000
mutate(Weight_kg = Weight_g / 1000) %>%
#grouping data by year and gear type
group_by(Year,Gear) %>%
#summarising the total annual landings per year and gear type
summarize(Annual_Landings_kg = sum(Weight_kg,na.rm=TRUE))
#displaying a table of the annual landings data by gear type
annual_gear_landings
#calculating catch-per-unit-effort (CPUE)
#CPUE is calculated by dividing the catch of each fishing trip by the number of
#hours fished during that trip
#unit of CPUE is kilograms per hour
#the median for every year is then calculated in order to remove outliers
#starting with the landings data frame
cpue_data <- landings_data %>%
#adding column for kilograms by dividing gram column by 1000
mutate(Weight_kg = Weight_g / 1000) %>%
#grouping by year and Trip ID to calculate CPUE for every trip in every year
group_by(Year,Trip_ID) %>%
#for each year and trip ID, calculating the CPUE for each trip by
#dividing the sum of the catch, converted from grams to kilograms,
#by the trip by the number of fishing hours
summarize(Trip_CPUE = sum(Weight_kg) / mean(Effort_Hours)) %>%
#group by year so we can calculate median CPUE for each year across all trips
#in the year
group_by(Year) %>%
#calculating median CPUE for each year
summarize(Median_CPUE_kg_hour = median(Trip_CPUE))
#displaying a table of the CPUE data
cpue_data
#determining the percentage of mature fish in the catch in every year of the
#data frame
#defining m95, the length at which 95% of fish are mature
m95 = 15.9
#starting with the landings data frame
landings_data %>%
#adding a column to the data that indicates whether each length measurement
#is from a mature or immature fish.
#If it's mature, this value should be TRUE; if immature, FALSE.
mutate(Mature = Length_cm > m95) %>%
#grouping by year so we can see the percent mature for every year
group_by(Year) %>%
#the percentage mature is equal to the number of mature fish divided by
#the total number of fish and multiplied by 100
summarize(Percent_Mature = sum(Mature) / n() * 100)
### PLOTTING FISHERIES DATA
#starting with the annual_landings data frame you created in the last step
annual_landings %>%
#initialising a ggplot of annual landings versus year
ggplot(aes(x=Year,y=Annual_Landings_kg)) +
#telling ggplot that the plot type should be a scatter plot
geom_point() +
#adding a line connecting the points
geom_line() +
#changing the y-axis title
ylab("Annual Landings [kg/year]") +
#adding figure title
ggtitle("Annual landings of Caesio cuning")
#starting with the landings data frame
annual_gear_landings %>%
#grouping the data by year
group_by(Year,Gear) %>%
#initialising a ggplot of annual landings versus year
ggplot(aes(x=Year,y=Annual_Landings_kg)) +
#telling ggplot that the plot type should be a scatter plot
geom_point() +
#adding a line connecting the points
geom_line() +
#changing the y-axis title
ylab("Normalized annual Landings [kg/year]") +
#adding figure title
ggtitle("Normalized annual landings of Caesio cuning") +
#telling the figure to plot by all different gear types
facet_wrap(~Gear)
#starting with the CPUE data frame
cpue_data %>%
#initialising a ggplot of median CPUE versus year
ggplot(aes(x=Year,y=Median_CPUE_kg_hour)) +
#telling ggplot that the plot type should be a scatter plot
geom_point() +
#adding a line connecting the points
geom_line() +
#changing the y-axis title
ylab("Median CPUE [kg/hour]") +
#adding a figure title
ggtitle("Median CPUE for Caesio cuning")
#starting with the landings data frame
landings_data %>%
#filtering data to only look at length measurements from 2014
filter(Year == 2014) %>%
#initialising ggplot of data using the length column
ggplot(aes(Length_cm)) +
#telling ggplot that the plot type should be a histogram
geom_histogram() +
#changing x-axis label
xlab("Length [cm]") +
#adding figure title
ggtitle("Length histogram of Caesio cuning in the catch\nLength at 95% maturity shown as a red line.") +
#adding a red vertical line for m95,
#the length at which 95% of fish are mature.
#Any fish below this length may be immature.
#Use the m95 value defined in the previous section
geom_vline(aes(xintercept=m95),color="red")
#starting with the landings data frame
landings_data %>%
#filtering data to only look at length measurements from 2014
filter(Year == 2014) %>%
#initalising ggplot of data using the length column
ggplot(aes(Length_cm)) +
#telling ggplot that the plot type should be a histogram
geom_histogram() +
#chaning x-axis label
xlab("Length [cm]") +
#adding figure title
ggtitle("Length histogram of Caesio cuning in the catch by gear type\nLength at 95% maturity shown as a red line.") +
#adding a red line for m95
geom_vline(aes(xintercept=m95),color="red") +
#telling the figure to plot by all different gear types, known as facetting
facet_wrap(~Gear)
| /intro-to-r/intro-to-r.R | no_license | jacobpassfield/third-year-project | R | false | false | 7,568 | r | library(tidyverse)
library(lubridate)
### LOADING AND CLEANING DATA
landings_data <- read_csv("_data/sample_landings_data_raw.csv")
landings_data
#start with the landings_data data frame
landings_data <- landings_data %>%
#rename the columns
rename(Year = yy,
Date = dat,
Trip_ID = trip,
Effort_Hours = effort,
Gear = gr,
Species = sp,
Length_cm = l_cm,
Weight_g = w_cm) %>%
#turn the date column into a date format that R recognizes
mutate(Date = mdy(Date))
landings_data
#checking for missing values
landings_data[!complete.cases(landings_data),]
#removing observations with missing values
landings_data <- na.omit(landings_data)
landings_data
#checking for typos
unique(landings_data$Gear)
#fixing this by changing all to lowercase letters
landings_data <- landings_data %>%
mutate(Gear = tolower(Gear))
unique(landings_data$Gear)
#checking another variable
unique(landings_data$Species)
#checking how many times each of the 2 species spellings occurs
landings_data %>%
filter(Species == "Caesoi cunning") %>%
nrow()
landings_data %>%
filter(Species == "Caesio cuning") %>%
nrow()
#replacing misspelled values
landings_data <- landings_data %>%
mutate(Species = replace(Species,Species == "Caesoi cunning", "Caesio cuning"))
unique(landings_data$Species)
#looking at the range and distribution of a numeric variable
summary(landings_data$Length_cm)
#visualising data to idenitfy errors
plot(landings_data$Length_cm)
#removing error
landings_data <- landings_data %>%
filter(Length_cm < 100)
plot(landings_data$Length_cm)
#try with Weight_g and Effort_Hours
#saving this dataset using a new name so we have a copy of raw and clean data
write_csv(landings_data,"_data/sample_landings_data_clean.csv")
### BASIC FISHERIES STATITSICS
#start with the landings data frame
annual_landings <- landings_data %>%
#adding column for kilograms by dividing gram column by 1000
mutate(Weight_kg = Weight_g / 1000) %>%
#grouping data by year
group_by(Year) %>%
#summarising the total annual landings per year
summarize(Annual_Landings_kg = sum(Weight_kg,na.rm=TRUE))
#displaying a table of the annual landings data
annual_landings
#na.rm = TRUE tells R what to do with NA values in your data
#here we are remobing the values before summing Weight_kg
#many functions will return NA if any value is NA
#starting with the landings data frame
annual_gear_landings <- landings_data %>%
#adding column for kilograms by dividing gram column by 1000
mutate(Weight_kg = Weight_g / 1000) %>%
#grouping data by year and gear type
group_by(Year,Gear) %>%
#summarising the total annual landings per year and gear type
summarize(Annual_Landings_kg = sum(Weight_kg,na.rm=TRUE))
#displaying a table of the annual landings data by gear type
annual_gear_landings
#calculating catch-per-unit-effort (CPUE)
#CPUE is calculated by dividing the catch of each fishing trip by the number of
#hours fished during that trip
#unit of CPUE is kilograms per hour
#the median for every year is then calculated in order to remove outliers
#starting with the landings data frame
cpue_data <- landings_data %>%
#adding column for kilograms by dividing gram column by 1000
mutate(Weight_kg = Weight_g / 1000) %>%
#grouping by year and Trip ID to calculate CPUE for every trip in every year
group_by(Year,Trip_ID) %>%
#for each year and trip ID, calculating the CPUE for each trip by
#dividing the sum of the catch, converted from grams to kilograms,
#by the trip by the number of fishing hours
summarize(Trip_CPUE = sum(Weight_kg) / mean(Effort_Hours)) %>%
#group by year so we can calculate median CPUE for each year across all trips
#in the year
group_by(Year) %>%
#calculating median CPUE for each year
summarize(Median_CPUE_kg_hour = median(Trip_CPUE))
#displaying a table of the CPUE data
cpue_data
#determining the percentage of mature fish in the catch in every year of the
#data frame
#defining m95, the length at which 95% of fish are mature
m95 = 15.9
#starting with the landings data frame
landings_data %>%
#adding a column to the data that indicates whether each length measurement
#is from a mature or immature fish.
#If it's mature, this value should be TRUE; if immature, FALSE.
mutate(Mature = Length_cm > m95) %>%
#grouping by year so we can see the percent mature for every year
group_by(Year) %>%
#the percentage mature is equal to the number of mature fish divided by
#the total number of fish and multiplied by 100
summarize(Percent_Mature = sum(Mature) / n() * 100)
### PLOTTING FISHERIES DATA
#starting with the annual_landings data frame you created in the last step
annual_landings %>%
#initialising a ggplot of annual landings versus year
ggplot(aes(x=Year,y=Annual_Landings_kg)) +
#telling ggplot that the plot type should be a scatter plot
geom_point() +
#adding a line connecting the points
geom_line() +
#changing the y-axis title
ylab("Annual Landings [kg/year]") +
#adding figure title
ggtitle("Annual landings of Caesio cuning")
#starting with the landings data frame
annual_gear_landings %>%
#grouping the data by year
group_by(Year,Gear) %>%
#initialising a ggplot of annual landings versus year
ggplot(aes(x=Year,y=Annual_Landings_kg)) +
#telling ggplot that the plot type should be a scatter plot
geom_point() +
#adding a line connecting the points
geom_line() +
#changing the y-axis title
ylab("Normalized annual Landings [kg/year]") +
#adding figure title
ggtitle("Normalized annual landings of Caesio cuning") +
#telling the figure to plot by all different gear types
facet_wrap(~Gear)
#starting with the CPUE data frame
cpue_data %>%
#initialising a ggplot of median CPUE versus year
ggplot(aes(x=Year,y=Median_CPUE_kg_hour)) +
#telling ggplot that the plot type should be a scatter plot
geom_point() +
#adding a line connecting the points
geom_line() +
#changing the y-axis title
ylab("Median CPUE [kg/hour]") +
#adding a figure title
ggtitle("Median CPUE for Caesio cuning")
#starting with the landings data frame
landings_data %>%
#filtering data to only look at length measurements from 2014
filter(Year == 2014) %>%
#initialising ggplot of data using the length column
ggplot(aes(Length_cm)) +
#telling ggplot that the plot type should be a histogram
geom_histogram() +
#changing x-axis label
xlab("Length [cm]") +
#adding figure title
ggtitle("Length histogram of Caesio cuning in the catch\nLength at 95% maturity shown as a red line.") +
#adding a red vertical line for m95,
#the length at which 95% of fish are mature.
#Any fish below this length may be immature.
#Use the m95 value defined in the previous section
geom_vline(aes(xintercept=m95),color="red")
#starting with the landings data frame
landings_data %>%
#filtering data to only look at length measurements from 2014
filter(Year == 2014) %>%
#initalising ggplot of data using the length column
ggplot(aes(Length_cm)) +
#telling ggplot that the plot type should be a histogram
geom_histogram() +
#chaning x-axis label
xlab("Length [cm]") +
#adding figure title
ggtitle("Length histogram of Caesio cuning in the catch by gear type\nLength at 95% maturity shown as a red line.") +
#adding a red line for m95
geom_vline(aes(xintercept=m95),color="red") +
#telling the figure to plot by all different gear types, known as facetting
facet_wrap(~Gear)
|
#' @rdname create_dataset
#' @title Create or update a dataset
#' @description Create or update dataset within a Dataverse
#' @details \code{create_dataset} creates a Dataverse dataset. In Dataverse, a \dQuote{dataset} is the lowest-level structure in which to organize files. For example, a Dataverse dataset might contain the files used to reproduce a published article, including data, analysis code, and related materials. Datasets can be organized into \dQuote{Dataverse} objects, which can be further nested within other Dataverses. For someone creating an archive, this would be the first step to producing said archive (after creating a Dataverse, if one does not already exist). Once files and metadata have been added, the dataset can be published (i.e., made public) using \code{\link{publish_dataset}}.
#'
#' \code{update_dataset} updates a Dataverse dataset that has already been created using \code{\link{create_dataset}}. This creates a draft version of the dataset or modifies the current draft if one is already in-progress. It does not assign a new version number to the dataset nor does it make it publicly visible (which can be done with \code{\link{publish_dataset}}).
#'
#' @template dv
#' @template ds
#' @param body A list describing the dataset.
#' @template envvars
#' @template dots
#' @return An object of class \dQuote{dataverse_dataset}.
#' @seealso \code{\link{get_dataset}}, \code{\link{delete_dataset}}, \code{\link{publish_dataset}}
#' @examples
#' \dontrun{
#' meta <- list()
#' ds <- create_dataset("mydataverse", body = meta)
#'
#' meta2 <- list()
#' update_dataset(ds, body = meta2)
#'
#' # cleanup
#' delete_dataset(ds)
#' }
#' @export
create_dataset <- function(dataverse, body, key = Sys.getenv("DATAVERSE_KEY"), server = Sys.getenv("DATAVERSE_SERVER"), ...) {
dataverse <- dataverse_id(dataverse, key = key, server = server, ...)
u <- paste0(api_url(server), "dataverses/", dataverse, "/datasets/")
r <- httr::POST(u, httr::add_headers("X-Dataverse-key" = key), body = body, encode = "json", ...)
httr::stop_for_status(r, task = httr::content(r)$message)
httr::content(r)
}
#' @rdname create_dataset
#' @export
update_dataset <- function(dataset, body, key = Sys.getenv("DATAVERSE_KEY"), server = Sys.getenv("DATAVERSE_SERVER"), ...) {
dataset <- dataset_id(dataset, key = key, server = server, ...)
u <- paste0(api_url(server), "datasets/", dataset, "/versions/:draft")
r <- httr::PUT(u, httr::add_headers("X-Dataverse-key" = key), body = body, encode = "json", ...)
httr::stop_for_status(r, task = httr::content(r)$message)
httr::content(r, as = "text", encoding = "UTF-8")
}
| /R/create_dataset.R | no_license | IQSS/dataverse-client-r | R | false | false | 2,661 | r | #' @rdname create_dataset
#' @title Create or update a dataset
#' @description Create or update dataset within a Dataverse
#' @details \code{create_dataset} creates a Dataverse dataset. In Dataverse, a \dQuote{dataset} is the lowest-level structure in which to organize files. For example, a Dataverse dataset might contain the files used to reproduce a published article, including data, analysis code, and related materials. Datasets can be organized into \dQuote{Dataverse} objects, which can be further nested within other Dataverses. For someone creating an archive, this would be the first step to producing said archive (after creating a Dataverse, if one does not already exist). Once files and metadata have been added, the dataset can be published (i.e., made public) using \code{\link{publish_dataset}}.
#'
#' \code{update_dataset} updates a Dataverse dataset that has already been created using \code{\link{create_dataset}}. This creates a draft version of the dataset or modifies the current draft if one is already in-progress. It does not assign a new version number to the dataset nor does it make it publicly visible (which can be done with \code{\link{publish_dataset}}).
#'
#' @template dv
#' @template ds
#' @param body A list describing the dataset.
#' @template envvars
#' @template dots
#' @return An object of class \dQuote{dataverse_dataset}.
#' @seealso \code{\link{get_dataset}}, \code{\link{delete_dataset}}, \code{\link{publish_dataset}}
#' @examples
#' \dontrun{
#' meta <- list()
#' ds <- create_dataset("mydataverse", body = meta)
#'
#' meta2 <- list()
#' update_dataset(ds, body = meta2)
#'
#' # cleanup
#' delete_dataset(ds)
#' }
#' @export
create_dataset <- function(dataverse, body, key = Sys.getenv("DATAVERSE_KEY"), server = Sys.getenv("DATAVERSE_SERVER"), ...) {
dataverse <- dataverse_id(dataverse, key = key, server = server, ...)
u <- paste0(api_url(server), "dataverses/", dataverse, "/datasets/")
r <- httr::POST(u, httr::add_headers("X-Dataverse-key" = key), body = body, encode = "json", ...)
httr::stop_for_status(r, task = httr::content(r)$message)
httr::content(r)
}
#' @rdname create_dataset
#' @export
update_dataset <- function(dataset, body, key = Sys.getenv("DATAVERSE_KEY"), server = Sys.getenv("DATAVERSE_SERVER"), ...) {
dataset <- dataset_id(dataset, key = key, server = server, ...)
u <- paste0(api_url(server), "datasets/", dataset, "/versions/:draft")
r <- httr::PUT(u, httr::add_headers("X-Dataverse-key" = key), body = body, encode = "json", ...)
httr::stop_for_status(r, task = httr::content(r)$message)
httr::content(r, as = "text", encoding = "UTF-8")
}
|
###############################
# analysis script
#
#this script loads the processed, cleaned data, does a simple analysis
#and saves the results to the results folder
#Install additional packages
install.packages(c("ggpubr", "AICcmodavg"))
#load needed packages. make sure they are installed.
library(ggplot2) #for plotting
library(broom) #for cleaning up output from lm()
library(here) #for data loading/saving
library(tidyverse)
library(ggpubr)
library(AICcmodavg)
#path to data
#note the use of the here() package and not absolute paths
data_location <- here::here("data","processed_data","processeddata.rds")
#load data.
NYC_Virus_Deaths <- readRDS(data_location)
#re-label missing data (NA) as 0. It is fair to assume no reported deaths as 0, as it is assumed death data is coming from hospitals, who must report all deaths.
NYC_Virus_Deaths %>%
replace_na(list(COVID.19.Deaths = 0, Influenza.Deaths = 0, Pneumonia.Deaths = 0))
######################################
#Data exploration/description
######################################
#I'm using basic R commands here.
#Lots of good packages exist to do more.
#For instance check out the tableone or skimr packages
#summarize data
data_summary = summary(NYC_Virus_Deaths)
#look at summary
print(data_summary)
#do the same, but with a bit of trickery to get things into the
#shape of a data frame (for easier saving/showing in manuscript)
summary_df = data.frame(do.call(cbind, lapply(NYC_Virus_Deaths, summary)))
#save data frame table to file for later use in manuscript
summarytable_file = here("results", "summarytable.rds")
saveRDS(summary_df, file = summarytable_file)
#Once the data is loaded, we will want to alter the month variable a bit to make it "month during pandemic". This will create a sequential variable data set, irrespective of year. This will help up later on.
NYC_Virus_Deaths$Month[NYC_Virus_Deaths$Year == "2021"& NYC_Virus_Deaths$Month == 1] <- 13
NYC_Virus_Deaths$Month[NYC_Virus_Deaths$Year == "2021"& NYC_Virus_Deaths$Month == 2] <- 14
NYC_Virus_Deaths$Month[NYC_Virus_Deaths$Year == "2021"& NYC_Virus_Deaths$Month == 3] <- 15
NYC_Virus_Deaths$Month[NYC_Virus_Deaths$Year == "2021"& NYC_Virus_Deaths$Month == 4] <- 16
NYC_Virus_Deaths$Month[NYC_Virus_Deaths$Year == "2021"& NYC_Virus_Deaths$Month == 5] <- 17
NYC_Virus_Deaths$Month[NYC_Virus_Deaths$Year == "2021"& NYC_Virus_Deaths$Month == 6] <- 18
NYC_Virus_Deaths$Month[NYC_Virus_Deaths$Year == "2021"& NYC_Virus_Deaths$Month == 7] <- 19
NYC_Virus_Deaths$Month[NYC_Virus_Deaths$Year == "2021"& NYC_Virus_Deaths$Month == 8] <- 20
NYC_Virus_Deaths$Month[NYC_Virus_Deaths$Year == "2021"& NYC_Virus_Deaths$Month == 9] <- 21
#make a scatterplot of data (Covid-19 Deaths, Months)
#Make a separate data set for each of the sex-specific variables.
NVD_All <- NYC_Virus_Deaths %>% dplyr::filter(Sex == "All Sexes")
NVD_Female <- NYC_Virus_Deaths %>% dplyr::filter(Sex == "Female")
NVD_Male <- NYC_Virus_Deaths %>% dplyr::filter(Sex == "Male")
######################################
#Plotting Virus Deaths as a function of
#Time, separated by gender and by total population (ALL)
######################################
#Plot newly formed data sets for covid deaths with line of best fit for each gender and combination.
Covid_Month_All <- NVD_All %>% ggplot(aes(x=Month, y=COVID.19.Deaths)) + geom_point() + geom_smooth(method='lm') + ggtitle("Covid Deaths per Month Since March 2020: all genders")
Covid_Month_Female <- NVD_Female %>% ggplot(aes(x=Month, y=COVID.19.Deaths)) + geom_point() + geom_smooth(method='lm')+ ggtitle("Covid Deaths per Month Since March 2020: Females")
Covid_Month_Male <- NVD_Male %>% ggplot(aes(x=Month, y=COVID.19.Deaths)) + geom_point() + geom_smooth(method='lm')+ ggtitle("Covid Deaths per Month Since March 2020: Males")
#look at each figure
plot(Covid_Month_All)
plot(Covid_Month_Female)
plot(Covid_Month_Male)
##save figures
#Total population
figure_file = here("results","COVID_ALL_figure.png")
ggsave(filename = figure_file, plot=Covid_Month_All)
#Females
figure_file = here("results","COVID_FEMALE_figure.png")
ggsave(filename = figure_file, plot=Covid_Month_Female)
#Males
figure_file = here("results","COVID_MALE_figure.png")
ggsave(filename = figure_file, plot=Covid_Month_Male)
##Repeat Previous steps for Influenza
#Plot newly formed data sets for Influenza deaths with line of best fit for each gender and combination.
Influenza_Month_All <- NVD_All %>% ggplot(aes(x=Month, y=Influenza.Deaths)) + geom_point() + geom_smooth(method='lm') + ggtitle("Flu Deaths per Month Since March 2020: all genders")
Influenza_Month_Female <- NVD_Female %>% ggplot(aes(x=Month, y=Influenza.Deaths)) + geom_point() + geom_smooth(method='lm')+ ggtitle("Flu Deaths per Month Since March 2020: Females")
Influenza_Month_Male <- NVD_Male %>% ggplot(aes(x=Month, y=Influenza.Deaths)) + geom_point() + geom_smooth(method='lm')+ ggtitle("Flu Deaths per Month Since March 2020: Males")
#look at each figure
plot(Influenza_Month_All)
plot(Influenza_Month_Female)
plot(Influenza_Month_Male)
##save figures
#Total population
figure_file = here("results","Flu_ALL_figure.png")
ggsave(filename = figure_file, plot=Influenza_Month_All)
#Females
figure_file = here("results","Flu_FEMALE_figure.png")
ggsave(filename = figure_file, plot=Influenza_Month_Female)
#Males
figure_file = here("results","Flu_MALE_figure.png")
ggsave(filename = figure_file, plot=Influenza_Month_Male)
##Repeat Previous steps for Pneumonia
#Plot newly formed data sets for Influenza deaths with line of best fit for each gender and combination.
Pneumonia_Month_All <- NVD_All %>% ggplot(aes(x=Month, y=Pneumonia.Deaths)) + geom_point() + geom_smooth(method='lm') + ggtitle("Pneumonia Deaths per Month Since March 2020: all genders")
Pneumonia_Month_Female <- NVD_Female %>% ggplot(aes(x=Month, y=Pneumonia.Deaths)) + geom_point() + geom_smooth(method='lm')+ ggtitle("Pneumonia Deaths per Month Since March 2020: Females")
Pneumonia_Month_Male <- NVD_Male %>% ggplot(aes(x=Month, y=Influenza.Deaths)) + geom_point() + geom_smooth(method='lm')+ ggtitle("Pneumonia Deaths per Month Since March 2020: Males")
#look at each figure
plot(Pneumonia_Month_All)
plot(Pneumonia_Month_Female)
plot(Pneumonia_Month_Male)
##save figures
#Total population
figure_file = here("results","Pneumonia_ALL_figure.png")
ggsave(filename = figure_file, plot=Pneumonia_Month_All)
#Females
figure_file = here("results","Pneumonia_FEMALE_figure.png")
ggsave(filename = figure_file, plot=Pneumonia_Month_Female)
#Males
figure_file = here("results","Pneumonia_MALE_figure.png")
ggsave(filename = figure_file, plot=Pneumonia_Month_Male)
##Finally, we will want to visualize how the three different virus deaths compare over time.
NVD_ALL_2 <- pivot_longer(NVD_All, cols=6:8, names_to = "Virus_Type", values_to = "Deaths")
Virus_Death_Plot <- NVD_ALL_2 %>%
ggplot(aes(x=Month, y=Deaths, color = Virus_Type)) +
geom_point() +
geom_smooth(method = 'lm') +
ggtitle("Comparison of Virus Deaths Over Time")
#View combined virus death plot
plot(Virus_Death_Plot)
#Save the figue
figure_file = here("results","Virus_Deaths_figure.png")
ggsave(filename = figure_file, plot=Virus_Death_Plot)
######################################
#Data fitting/statistical analysis
######################################
# Run an ANOVA test for the three different virus deaths over the entire provided time period.
anova_one_way <- aov(Deaths~Virus_Type, data = NVD_ALL_2)
summary(anova_one_way)
# place results from fit into a data frame with the tidy function
aov_table <- broom::tidy(anova_one_way)
#look at fit results
print(aov_table)
# save fit results table
table_file_Covid = here("results", "Covid_resulttable.rds")
saveRDS(lmtable_Covid, file = table_file_Covid)
#Based on the reported p-value of 0.382, there is no significant difference between deaths resulting from COVID, Influenza, or Pneumonia.
#Based on previous knowledge about the high virulence of COVID-19 compared to Influenza and Pneumonia and the results of the produced figures, one possible explanation for this finding is reporting bias.
#COVID was the main focus of reported deaths during the pandemic. If you compare the created figures, n for Influenza and Pneumonia deaths is quite low.
#Low n suggests that the ANOVA performed was biased due to low sample size.
#Additionally, Pneumonia is a known secondary infection of both Influenza and Covid-19 and usually the cause of death when an individual dies from Covid. It is possible that there is a conflation of cause of death.
| /code/analysis_code/analysisscript.R | no_license | CarterColeman/MONICACHAN-MADA-analysis2 | R | false | false | 8,594 | r | ###############################
# analysis script
#
#this script loads the processed, cleaned data, does a simple analysis
#and saves the results to the results folder
#Install additional packages
install.packages(c("ggpubr", "AICcmodavg"))
#load needed packages. make sure they are installed.
library(ggplot2) #for plotting
library(broom) #for cleaning up output from lm()
library(here) #for data loading/saving
library(tidyverse)
library(ggpubr)
library(AICcmodavg)
#path to data
#note the use of the here() package and not absolute paths
data_location <- here::here("data","processed_data","processeddata.rds")
#load data.
NYC_Virus_Deaths <- readRDS(data_location)
#re-label missing data (NA) as 0. It is fair to assume no reported deaths as 0, as it is assumed death data is coming from hospitals, who must report all deaths.
NYC_Virus_Deaths %>%
replace_na(list(COVID.19.Deaths = 0, Influenza.Deaths = 0, Pneumonia.Deaths = 0))
######################################
#Data exploration/description
######################################
#I'm using basic R commands here.
#Lots of good packages exist to do more.
#For instance check out the tableone or skimr packages
#summarize data
data_summary = summary(NYC_Virus_Deaths)
#look at summary
print(data_summary)
#do the same, but with a bit of trickery to get things into the
#shape of a data frame (for easier saving/showing in manuscript)
summary_df = data.frame(do.call(cbind, lapply(NYC_Virus_Deaths, summary)))
#save data frame table to file for later use in manuscript
summarytable_file = here("results", "summarytable.rds")
saveRDS(summary_df, file = summarytable_file)
#Once the data is loaded, we will want to alter the month variable a bit to make it "month during pandemic". This will create a sequential variable data set, irrespective of year. This will help up later on.
NYC_Virus_Deaths$Month[NYC_Virus_Deaths$Year == "2021"& NYC_Virus_Deaths$Month == 1] <- 13
NYC_Virus_Deaths$Month[NYC_Virus_Deaths$Year == "2021"& NYC_Virus_Deaths$Month == 2] <- 14
NYC_Virus_Deaths$Month[NYC_Virus_Deaths$Year == "2021"& NYC_Virus_Deaths$Month == 3] <- 15
NYC_Virus_Deaths$Month[NYC_Virus_Deaths$Year == "2021"& NYC_Virus_Deaths$Month == 4] <- 16
NYC_Virus_Deaths$Month[NYC_Virus_Deaths$Year == "2021"& NYC_Virus_Deaths$Month == 5] <- 17
NYC_Virus_Deaths$Month[NYC_Virus_Deaths$Year == "2021"& NYC_Virus_Deaths$Month == 6] <- 18
NYC_Virus_Deaths$Month[NYC_Virus_Deaths$Year == "2021"& NYC_Virus_Deaths$Month == 7] <- 19
NYC_Virus_Deaths$Month[NYC_Virus_Deaths$Year == "2021"& NYC_Virus_Deaths$Month == 8] <- 20
NYC_Virus_Deaths$Month[NYC_Virus_Deaths$Year == "2021"& NYC_Virus_Deaths$Month == 9] <- 21
#make a scatterplot of data (Covid-19 Deaths, Months)
#Make a separate data set for each of the sex-specific variables.
NVD_All <- NYC_Virus_Deaths %>% dplyr::filter(Sex == "All Sexes")
NVD_Female <- NYC_Virus_Deaths %>% dplyr::filter(Sex == "Female")
NVD_Male <- NYC_Virus_Deaths %>% dplyr::filter(Sex == "Male")
######################################
#Plotting Virus Deaths as a function of
#Time, separated by gender and by total population (ALL)
######################################
#Plot newly formed data sets for covid deaths with line of best fit for each gender and combination.
Covid_Month_All <- NVD_All %>% ggplot(aes(x=Month, y=COVID.19.Deaths)) + geom_point() + geom_smooth(method='lm') + ggtitle("Covid Deaths per Month Since March 2020: all genders")
Covid_Month_Female <- NVD_Female %>% ggplot(aes(x=Month, y=COVID.19.Deaths)) + geom_point() + geom_smooth(method='lm')+ ggtitle("Covid Deaths per Month Since March 2020: Females")
Covid_Month_Male <- NVD_Male %>% ggplot(aes(x=Month, y=COVID.19.Deaths)) + geom_point() + geom_smooth(method='lm')+ ggtitle("Covid Deaths per Month Since March 2020: Males")
#look at each figure
plot(Covid_Month_All)
plot(Covid_Month_Female)
plot(Covid_Month_Male)
##save figures
#Total population
figure_file = here("results","COVID_ALL_figure.png")
ggsave(filename = figure_file, plot=Covid_Month_All)
#Females
figure_file = here("results","COVID_FEMALE_figure.png")
ggsave(filename = figure_file, plot=Covid_Month_Female)
#Males
figure_file = here("results","COVID_MALE_figure.png")
ggsave(filename = figure_file, plot=Covid_Month_Male)
##Repeat Previous steps for Influenza
#Plot newly formed data sets for Influenza deaths with line of best fit for each gender and combination.
Influenza_Month_All <- NVD_All %>% ggplot(aes(x=Month, y=Influenza.Deaths)) + geom_point() + geom_smooth(method='lm') + ggtitle("Flu Deaths per Month Since March 2020: all genders")
Influenza_Month_Female <- NVD_Female %>% ggplot(aes(x=Month, y=Influenza.Deaths)) + geom_point() + geom_smooth(method='lm')+ ggtitle("Flu Deaths per Month Since March 2020: Females")
Influenza_Month_Male <- NVD_Male %>% ggplot(aes(x=Month, y=Influenza.Deaths)) + geom_point() + geom_smooth(method='lm')+ ggtitle("Flu Deaths per Month Since March 2020: Males")
#look at each figure
plot(Influenza_Month_All)
plot(Influenza_Month_Female)
plot(Influenza_Month_Male)
##save figures
#Total population
figure_file = here("results","Flu_ALL_figure.png")
ggsave(filename = figure_file, plot=Influenza_Month_All)
#Females
figure_file = here("results","Flu_FEMALE_figure.png")
ggsave(filename = figure_file, plot=Influenza_Month_Female)
#Males
figure_file = here("results","Flu_MALE_figure.png")
ggsave(filename = figure_file, plot=Influenza_Month_Male)
##Repeat Previous steps for Pneumonia
#Plot newly formed data sets for Influenza deaths with line of best fit for each gender and combination.
Pneumonia_Month_All <- NVD_All %>% ggplot(aes(x=Month, y=Pneumonia.Deaths)) + geom_point() + geom_smooth(method='lm') + ggtitle("Pneumonia Deaths per Month Since March 2020: all genders")
Pneumonia_Month_Female <- NVD_Female %>% ggplot(aes(x=Month, y=Pneumonia.Deaths)) + geom_point() + geom_smooth(method='lm')+ ggtitle("Pneumonia Deaths per Month Since March 2020: Females")
Pneumonia_Month_Male <- NVD_Male %>% ggplot(aes(x=Month, y=Influenza.Deaths)) + geom_point() + geom_smooth(method='lm')+ ggtitle("Pneumonia Deaths per Month Since March 2020: Males")
#look at each figure
plot(Pneumonia_Month_All)
plot(Pneumonia_Month_Female)
plot(Pneumonia_Month_Male)
##save figures
#Total population
figure_file = here("results","Pneumonia_ALL_figure.png")
ggsave(filename = figure_file, plot=Pneumonia_Month_All)
#Females
figure_file = here("results","Pneumonia_FEMALE_figure.png")
ggsave(filename = figure_file, plot=Pneumonia_Month_Female)
#Males
figure_file = here("results","Pneumonia_MALE_figure.png")
ggsave(filename = figure_file, plot=Pneumonia_Month_Male)
##Finally, we will want to visualize how the three different virus deaths compare over time.
NVD_ALL_2 <- pivot_longer(NVD_All, cols=6:8, names_to = "Virus_Type", values_to = "Deaths")
Virus_Death_Plot <- NVD_ALL_2 %>%
ggplot(aes(x=Month, y=Deaths, color = Virus_Type)) +
geom_point() +
geom_smooth(method = 'lm') +
ggtitle("Comparison of Virus Deaths Over Time")
#View combined virus death plot
plot(Virus_Death_Plot)
#Save the figue
figure_file = here("results","Virus_Deaths_figure.png")
ggsave(filename = figure_file, plot=Virus_Death_Plot)
######################################
#Data fitting/statistical analysis
######################################
# Run an ANOVA test for the three different virus deaths over the entire provided time period.
anova_one_way <- aov(Deaths~Virus_Type, data = NVD_ALL_2)
summary(anova_one_way)
# place results from fit into a data frame with the tidy function
aov_table <- broom::tidy(anova_one_way)
#look at fit results
print(aov_table)
# save fit results table
table_file_Covid = here("results", "Covid_resulttable.rds")
saveRDS(lmtable_Covid, file = table_file_Covid)
#Based on the reported p-value of 0.382, there is no significant difference between deaths resulting from COVID, Influenza, or Pneumonia.
#Based on previous knowledge about the high virulence of COVID-19 compared to Influenza and Pneumonia and the results of the produced figures, one possible explanation for this finding is reporting bias.
#COVID was the main focus of reported deaths during the pandemic. If you compare the created figures, n for Influenza and Pneumonia deaths is quite low.
#Low n suggests that the ANOVA performed was biased due to low sample size.
#Additionally, Pneumonia is a known secondary infection of both Influenza and Covid-19 and usually the cause of death when an individual dies from Covid. It is possible that there is a conflation of cause of death.
|
#initiate multicore cluster and load packages
if (!require("pacman")) install.packages("pacman")
pacman::p_load(raster, rgdal, doParallel, foreach,tcltk)
cores<- 7
cl <- makeCluster(cores, output="") #output should make it spit errors
registerDoParallel(cl)
multicore.tabulate.intersect<- function(cores, polygonlist, rasterlayer){
foreach(i=1:cores, .packages= c("raster","tcltk","foreach"), .combine = rbind) %dopar% {
mypb <- tkProgressBar(title = "R progress bar", label = "", min = 0, max = length(polygonlist[[i]]), initial = 0, width = 300)
foreach(j = 1:length(polygonlist[[i]]), .combine = rbind) %do% {
final<-data.frame()
tryCatch({ #not sure if this is necessary now that I'm using foreach, but it is useful for loops.
single <- polygonlist[[i]][j,] #pull out individual polygon to be tabulated
dir.create (file.path("/media/sarvision/InternshipFilesAraza/test/",i,j,single@data$OWNER), showWarnings = FALSE) #creates unique filepath for temp directory
rasterOptions(tmpdir=file.path("/media/sarvision/InternshipFilesAraza/test/",i,j, single@data$OWNER)) #sets temp directory - this is important b/c it can fill up a hard drive if you're doing a lot of polygons
clip1 <- crop(rasterlayer, extent(single)) #crop to extent of polygon
clip2 <- rasterize(single, clip1, mask=TRUE) #crops to polygon edge & converts to raster
ext <- getValues(clip2) #much faster than extract
tab<-table(ext) #tabulates the values of the raster in the polygon
mat<- as.data.frame(tab)
final<-cbind(single@data$OWNER,mat) #combines it with the name of the polygon
unlink(file.path("/media/sarvision/InternshipFilesAraza/test/",i,j,single@data$OWNER), recursive = TRUE,force = TRUE) #delete temporary files
setTkProgressBar(mypb, j, title = "number complete", label = j)
}, error=function(e){cat("ERROR :",conditionMessage(e), "\n")}) #trycatch error so it doesn't kill the loop
return(final)
}
#close(mypb) #not sure why but closing the pb while operating causes it to return an empty final dataset... dunno why.
}
}
msk <- readOGR(dsn =paste0(mydir,'/data/'), layer = "Country")
myoutput <- multicore.tabulate.intersect(7, polygonlist, rasterlayer) | /zz_test_mask_ver2.R | no_license | arnanaraza/internship | R | false | false | 2,341 | r | #initiate multicore cluster and load packages
if (!require("pacman")) install.packages("pacman")
pacman::p_load(raster, rgdal, doParallel, foreach,tcltk)
cores<- 7
cl <- makeCluster(cores, output="") #output should make it spit errors
registerDoParallel(cl)
multicore.tabulate.intersect<- function(cores, polygonlist, rasterlayer){
foreach(i=1:cores, .packages= c("raster","tcltk","foreach"), .combine = rbind) %dopar% {
mypb <- tkProgressBar(title = "R progress bar", label = "", min = 0, max = length(polygonlist[[i]]), initial = 0, width = 300)
foreach(j = 1:length(polygonlist[[i]]), .combine = rbind) %do% {
final<-data.frame()
tryCatch({ #not sure if this is necessary now that I'm using foreach, but it is useful for loops.
single <- polygonlist[[i]][j,] #pull out individual polygon to be tabulated
dir.create (file.path("/media/sarvision/InternshipFilesAraza/test/",i,j,single@data$OWNER), showWarnings = FALSE) #creates unique filepath for temp directory
rasterOptions(tmpdir=file.path("/media/sarvision/InternshipFilesAraza/test/",i,j, single@data$OWNER)) #sets temp directory - this is important b/c it can fill up a hard drive if you're doing a lot of polygons
clip1 <- crop(rasterlayer, extent(single)) #crop to extent of polygon
clip2 <- rasterize(single, clip1, mask=TRUE) #crops to polygon edge & converts to raster
ext <- getValues(clip2) #much faster than extract
tab<-table(ext) #tabulates the values of the raster in the polygon
mat<- as.data.frame(tab)
final<-cbind(single@data$OWNER,mat) #combines it with the name of the polygon
unlink(file.path("/media/sarvision/InternshipFilesAraza/test/",i,j,single@data$OWNER), recursive = TRUE,force = TRUE) #delete temporary files
setTkProgressBar(mypb, j, title = "number complete", label = j)
}, error=function(e){cat("ERROR :",conditionMessage(e), "\n")}) #trycatch error so it doesn't kill the loop
return(final)
}
#close(mypb) #not sure why but closing the pb while operating causes it to return an empty final dataset... dunno why.
}
}
msk <- readOGR(dsn =paste0(mydir,'/data/'), layer = "Country")
myoutput <- multicore.tabulate.intersect(7, polygonlist, rasterlayer) |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/calculate_variable_importance.R
\name{base_model_loss_}
\alias{base_model_loss_}
\title{Internal function for use by calculate_marginal_vimp}
\usage{
base_model_loss_(method, x, y, resampling_indices, trControl, tuneGrid,
loss_metric, ...)
}
\arguments{
\item{method}{character string defining method to pass to caret}
\item{x}{data.table containing predictor variables}
\item{y}{vector containing target variable}
\item{resampling_indices}{a list of integer vectors corresponding to the row indices
used for each resampling iteration}
\item{trControl}{trainControl object to be passed to caret train.}
\item{tuneGrid}{a data.frame containing hyperparameter values for caret.
Should only contain one value for each hyperparameter. Set to NULL
if caret method does not have any hyperparameter values.}
\item{loss_metric}{character. Loss metric to evaluate accuracy of model}
\item{...}{additional arguments to pass to caret train}
}
\description{
Calculate baseline accuracy using all variables in training data
}
| /man/base_model_loss_.Rd | permissive | breather/brightbox | R | false | true | 1,101 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/calculate_variable_importance.R
\name{base_model_loss_}
\alias{base_model_loss_}
\title{Internal function for use by calculate_marginal_vimp}
\usage{
base_model_loss_(method, x, y, resampling_indices, trControl, tuneGrid,
loss_metric, ...)
}
\arguments{
\item{method}{character string defining method to pass to caret}
\item{x}{data.table containing predictor variables}
\item{y}{vector containing target variable}
\item{resampling_indices}{a list of integer vectors corresponding to the row indices
used for each resampling iteration}
\item{trControl}{trainControl object to be passed to caret train.}
\item{tuneGrid}{a data.frame containing hyperparameter values for caret.
Should only contain one value for each hyperparameter. Set to NULL
if caret method does not have any hyperparameter values.}
\item{loss_metric}{character. Loss metric to evaluate accuracy of model}
\item{...}{additional arguments to pass to caret train}
}
\description{
Calculate baseline accuracy using all variables in training data
}
|
# This file is auto-generated by h2o-3/h2o-bindings/bin/gen_R.py
# Copyright 2016 H2O.ai; Apache License Version 2.0 (see LICENSE for details)
#'
# -------------------------- KMeans Model in H2O -------------------------- #
#'
#' Performs k-means clustering on an H2O dataset
#'
#' @param x A vector containing the \code{character} names of the predictors in the model.
#' @param model_id Destination id for this model; auto-generated if not specified.
#' @param training_frame Id of the training data frame.
#' @param validation_frame Id of the validation data frame.
#' @param nfolds Number of folds for K-fold cross-validation (0 to disable or >= 2). Defaults to 0.
#' @param keep_cross_validation_models \code{Logical}. Whether to keep the cross-validation models. Defaults to TRUE.
#' @param keep_cross_validation_predictions \code{Logical}. Whether to keep the predictions of the cross-validation models. Defaults to FALSE.
#' @param keep_cross_validation_fold_assignment \code{Logical}. Whether to keep the cross-validation fold assignment. Defaults to FALSE.
#' @param fold_assignment Cross-validation fold assignment scheme, if fold_column is not specified. The 'Stratified' option will
#' stratify the folds based on the response variable, for classification problems. Must be one of: "AUTO",
#' "Random", "Modulo", "Stratified". Defaults to AUTO.
#' @param fold_column Column with cross-validation fold index assignment per observation.
#' @param ignore_const_cols \code{Logical}. Ignore constant columns. Defaults to TRUE.
#' @param score_each_iteration \code{Logical}. Whether to score during each iteration of model training. Defaults to FALSE.
#' @param k The max. number of clusters. If estimate_k is disabled, the model will find k centroids, otherwise it will
#' find up to k centroids. Defaults to 1.
#' @param estimate_k \code{Logical}. Whether to estimate the number of clusters (<=k) iteratively and deterministically. Defaults
#' to FALSE.
#' @param user_points This option allows you to specify a dataframe, where each row represents an initial cluster center. The user-
#' specified points must have the same number of columns as the training observations. The number of rows must
#' equal the number of clusters
#' @param max_iterations Maximum training iterations (if estimate_k is enabled, then this is for each inner Lloyds iteration) Defaults
#' to 10.
#' @param standardize \code{Logical}. Standardize columns before computing distances Defaults to TRUE.
#' @param seed Seed for random numbers (affects certain parts of the algo that are stochastic and those might or might not be enabled by default)
#' Defaults to -1 (time-based random number).
#' @param init Initialization mode Must be one of: "Random", "PlusPlus", "Furthest", "User". Defaults to Furthest.
#' @param max_runtime_secs Maximum allowed runtime in seconds for model training. Use 0 to disable. Defaults to 0.
#' @param categorical_encoding Encoding scheme for categorical features Must be one of: "AUTO", "Enum", "OneHotInternal", "OneHotExplicit",
#' "Binary", "Eigen", "LabelEncoder", "SortByResponse", "EnumLimited". Defaults to AUTO.
#' @param export_checkpoints_dir Automatically export generated models to this directory.
#' @return Returns an object of class \linkS4class{H2OClusteringModel}.
#' @seealso \code{\link{h2o.cluster_sizes}}, \code{\link{h2o.totss}}, \code{\link{h2o.num_iterations}},
#' \code{\link{h2o.betweenss}}, \code{\link{h2o.tot_withinss}}, \code{\link{h2o.withinss}},
#' \code{\link{h2o.centersSTD}}, \code{\link{h2o.centers}}
#' @examples
#' \dontrun{
#' library(h2o)
#' h2o.init()
#' prostate_path <- system.file("extdata", "prostate.csv", package = "h2o")
#' prostate <- h2o.uploadFile(path = prostate_path)
#' h2o.kmeans(training_frame = prostate, k = 10, x = c("AGE", "RACE", "VOL", "GLEASON"))
#' }
#' @export
h2o.kmeans <- function(training_frame, x,
model_id = NULL,
validation_frame = NULL,
nfolds = 0,
keep_cross_validation_models = TRUE,
keep_cross_validation_predictions = FALSE,
keep_cross_validation_fold_assignment = FALSE,
fold_assignment = c("AUTO", "Random", "Modulo", "Stratified"),
fold_column = NULL,
ignore_const_cols = TRUE,
score_each_iteration = FALSE,
k = 1,
estimate_k = FALSE,
user_points = NULL,
max_iterations = 10,
standardize = TRUE,
seed = -1,
init = c("Random", "PlusPlus", "Furthest", "User"),
max_runtime_secs = 0,
categorical_encoding = c("AUTO", "Enum", "OneHotInternal", "OneHotExplicit", "Binary", "Eigen", "LabelEncoder", "SortByResponse", "EnumLimited"),
export_checkpoints_dir = NULL
)
{
# Validate required training_frame first and other frame args: should be a valid key or an H2OFrame object
training_frame <- .validate.H2OFrame(training_frame, required=TRUE)
validation_frame <- .validate.H2OFrame(validation_frame)
# Handle other args
# Parameter list to send to model builder
parms <- list()
parms$training_frame <- training_frame
if(!missing(x)){
parms$ignored_columns <- .verify_datacols(training_frame, x)$cols_ignore
if(!missing(fold_column)){
parms$ignored_columns <- setdiff(parms$ignored_columns, fold_column)
}
}
if (!missing(model_id))
parms$model_id <- model_id
if (!missing(validation_frame))
parms$validation_frame <- validation_frame
if (!missing(nfolds))
parms$nfolds <- nfolds
if (!missing(keep_cross_validation_models))
parms$keep_cross_validation_models <- keep_cross_validation_models
if (!missing(keep_cross_validation_predictions))
parms$keep_cross_validation_predictions <- keep_cross_validation_predictions
if (!missing(keep_cross_validation_fold_assignment))
parms$keep_cross_validation_fold_assignment <- keep_cross_validation_fold_assignment
if (!missing(fold_assignment))
parms$fold_assignment <- fold_assignment
if (!missing(fold_column))
parms$fold_column <- fold_column
if (!missing(ignore_const_cols))
parms$ignore_const_cols <- ignore_const_cols
if (!missing(score_each_iteration))
parms$score_each_iteration <- score_each_iteration
if (!missing(k))
parms$k <- k
if (!missing(estimate_k))
parms$estimate_k <- estimate_k
if (!missing(user_points))
parms$user_points <- user_points
if (!missing(max_iterations))
parms$max_iterations <- max_iterations
if (!missing(standardize))
parms$standardize <- standardize
if (!missing(seed))
parms$seed <- seed
if (!missing(init))
parms$init <- init
if (!missing(max_runtime_secs))
parms$max_runtime_secs <- max_runtime_secs
if (!missing(categorical_encoding))
parms$categorical_encoding <- categorical_encoding
if (!missing(export_checkpoints_dir))
parms$export_checkpoints_dir <- export_checkpoints_dir
# Check if user_points is an acceptable set of user-specified starting points
if( is.data.frame(user_points) || is.matrix(user_points) || is.list(user_points) || is.H2OFrame(user_points) ) {
if ( length(init) > 1 || init == 'User') {
parms[["init"]] <- "User"
} else {
warning(paste0("Parameter init must equal 'User' when user_points is set. Ignoring init = '", init, "'. Setting init = 'User'."))
}
parms[["init"]] <- "User"
# Convert user-specified starting points to H2OFrame
if( is.data.frame(user_points) || is.matrix(user_points) || is.list(user_points) ) {
if( !is.data.frame(user_points) && !is.matrix(user_points) ) user_points <- t(as.data.frame(user_points))
user_points <- as.h2o(user_points)
}
parms[["user_points"]] <- user_points
# Set k
if( !(missing(k)) && k!=as.integer(nrow(user_points)) ) {
warning("Parameter k is not equal to the number of user-specified starting points. Ignoring k. Using specified starting points.")
}
parms[["k"]] <- as.numeric(nrow(user_points))
} else if ( is.character(init) ) { # Furthest, Random, PlusPlus{
parms[["user_points"]] <- NULL
} else{
stop ("argument init must be set to Furthest, Random, PlusPlus, or a valid set of user-defined starting points.")
}
# Error check and build model
.h2o.modelJob('kmeans', parms, h2oRestApiVersion = 3)
}
| /h2o-r/h2o-package/R/kmeans.R | permissive | mirekphd/h2o-3 | R | false | false | 8,663 | r | # This file is auto-generated by h2o-3/h2o-bindings/bin/gen_R.py
# Copyright 2016 H2O.ai; Apache License Version 2.0 (see LICENSE for details)
#'
# -------------------------- KMeans Model in H2O -------------------------- #
#'
#' Performs k-means clustering on an H2O dataset
#'
#' @param x A vector containing the \code{character} names of the predictors in the model.
#' @param model_id Destination id for this model; auto-generated if not specified.
#' @param training_frame Id of the training data frame.
#' @param validation_frame Id of the validation data frame.
#' @param nfolds Number of folds for K-fold cross-validation (0 to disable or >= 2). Defaults to 0.
#' @param keep_cross_validation_models \code{Logical}. Whether to keep the cross-validation models. Defaults to TRUE.
#' @param keep_cross_validation_predictions \code{Logical}. Whether to keep the predictions of the cross-validation models. Defaults to FALSE.
#' @param keep_cross_validation_fold_assignment \code{Logical}. Whether to keep the cross-validation fold assignment. Defaults to FALSE.
#' @param fold_assignment Cross-validation fold assignment scheme, if fold_column is not specified. The 'Stratified' option will
#' stratify the folds based on the response variable, for classification problems. Must be one of: "AUTO",
#' "Random", "Modulo", "Stratified". Defaults to AUTO.
#' @param fold_column Column with cross-validation fold index assignment per observation.
#' @param ignore_const_cols \code{Logical}. Ignore constant columns. Defaults to TRUE.
#' @param score_each_iteration \code{Logical}. Whether to score during each iteration of model training. Defaults to FALSE.
#' @param k The max. number of clusters. If estimate_k is disabled, the model will find k centroids, otherwise it will
#' find up to k centroids. Defaults to 1.
#' @param estimate_k \code{Logical}. Whether to estimate the number of clusters (<=k) iteratively and deterministically. Defaults
#' to FALSE.
#' @param user_points This option allows you to specify a dataframe, where each row represents an initial cluster center. The user-
#' specified points must have the same number of columns as the training observations. The number of rows must
#' equal the number of clusters
#' @param max_iterations Maximum training iterations (if estimate_k is enabled, then this is for each inner Lloyds iteration) Defaults
#' to 10.
#' @param standardize \code{Logical}. Standardize columns before computing distances Defaults to TRUE.
#' @param seed Seed for random numbers (affects certain parts of the algo that are stochastic and those might or might not be enabled by default)
#' Defaults to -1 (time-based random number).
#' @param init Initialization mode Must be one of: "Random", "PlusPlus", "Furthest", "User". Defaults to Furthest.
#' @param max_runtime_secs Maximum allowed runtime in seconds for model training. Use 0 to disable. Defaults to 0.
#' @param categorical_encoding Encoding scheme for categorical features Must be one of: "AUTO", "Enum", "OneHotInternal", "OneHotExplicit",
#' "Binary", "Eigen", "LabelEncoder", "SortByResponse", "EnumLimited". Defaults to AUTO.
#' @param export_checkpoints_dir Automatically export generated models to this directory.
#' @return Returns an object of class \linkS4class{H2OClusteringModel}.
#' @seealso \code{\link{h2o.cluster_sizes}}, \code{\link{h2o.totss}}, \code{\link{h2o.num_iterations}},
#' \code{\link{h2o.betweenss}}, \code{\link{h2o.tot_withinss}}, \code{\link{h2o.withinss}},
#' \code{\link{h2o.centersSTD}}, \code{\link{h2o.centers}}
#' @examples
#' \dontrun{
#' library(h2o)
#' h2o.init()
#' prostate_path <- system.file("extdata", "prostate.csv", package = "h2o")
#' prostate <- h2o.uploadFile(path = prostate_path)
#' h2o.kmeans(training_frame = prostate, k = 10, x = c("AGE", "RACE", "VOL", "GLEASON"))
#' }
#' @export
h2o.kmeans <- function(training_frame, x,
model_id = NULL,
validation_frame = NULL,
nfolds = 0,
keep_cross_validation_models = TRUE,
keep_cross_validation_predictions = FALSE,
keep_cross_validation_fold_assignment = FALSE,
fold_assignment = c("AUTO", "Random", "Modulo", "Stratified"),
fold_column = NULL,
ignore_const_cols = TRUE,
score_each_iteration = FALSE,
k = 1,
estimate_k = FALSE,
user_points = NULL,
max_iterations = 10,
standardize = TRUE,
seed = -1,
init = c("Random", "PlusPlus", "Furthest", "User"),
max_runtime_secs = 0,
categorical_encoding = c("AUTO", "Enum", "OneHotInternal", "OneHotExplicit", "Binary", "Eigen", "LabelEncoder", "SortByResponse", "EnumLimited"),
export_checkpoints_dir = NULL
)
{
# Validate required training_frame first and other frame args: should be a valid key or an H2OFrame object
training_frame <- .validate.H2OFrame(training_frame, required=TRUE)
validation_frame <- .validate.H2OFrame(validation_frame)
# Handle other args
# Parameter list to send to model builder
parms <- list()
parms$training_frame <- training_frame
if(!missing(x)){
parms$ignored_columns <- .verify_datacols(training_frame, x)$cols_ignore
if(!missing(fold_column)){
parms$ignored_columns <- setdiff(parms$ignored_columns, fold_column)
}
}
if (!missing(model_id))
parms$model_id <- model_id
if (!missing(validation_frame))
parms$validation_frame <- validation_frame
if (!missing(nfolds))
parms$nfolds <- nfolds
if (!missing(keep_cross_validation_models))
parms$keep_cross_validation_models <- keep_cross_validation_models
if (!missing(keep_cross_validation_predictions))
parms$keep_cross_validation_predictions <- keep_cross_validation_predictions
if (!missing(keep_cross_validation_fold_assignment))
parms$keep_cross_validation_fold_assignment <- keep_cross_validation_fold_assignment
if (!missing(fold_assignment))
parms$fold_assignment <- fold_assignment
if (!missing(fold_column))
parms$fold_column <- fold_column
if (!missing(ignore_const_cols))
parms$ignore_const_cols <- ignore_const_cols
if (!missing(score_each_iteration))
parms$score_each_iteration <- score_each_iteration
if (!missing(k))
parms$k <- k
if (!missing(estimate_k))
parms$estimate_k <- estimate_k
if (!missing(user_points))
parms$user_points <- user_points
if (!missing(max_iterations))
parms$max_iterations <- max_iterations
if (!missing(standardize))
parms$standardize <- standardize
if (!missing(seed))
parms$seed <- seed
if (!missing(init))
parms$init <- init
if (!missing(max_runtime_secs))
parms$max_runtime_secs <- max_runtime_secs
if (!missing(categorical_encoding))
parms$categorical_encoding <- categorical_encoding
if (!missing(export_checkpoints_dir))
parms$export_checkpoints_dir <- export_checkpoints_dir
# Check if user_points is an acceptable set of user-specified starting points
if( is.data.frame(user_points) || is.matrix(user_points) || is.list(user_points) || is.H2OFrame(user_points) ) {
if ( length(init) > 1 || init == 'User') {
parms[["init"]] <- "User"
} else {
warning(paste0("Parameter init must equal 'User' when user_points is set. Ignoring init = '", init, "'. Setting init = 'User'."))
}
parms[["init"]] <- "User"
# Convert user-specified starting points to H2OFrame
if( is.data.frame(user_points) || is.matrix(user_points) || is.list(user_points) ) {
if( !is.data.frame(user_points) && !is.matrix(user_points) ) user_points <- t(as.data.frame(user_points))
user_points <- as.h2o(user_points)
}
parms[["user_points"]] <- user_points
# Set k
if( !(missing(k)) && k!=as.integer(nrow(user_points)) ) {
warning("Parameter k is not equal to the number of user-specified starting points. Ignoring k. Using specified starting points.")
}
parms[["k"]] <- as.numeric(nrow(user_points))
} else if ( is.character(init) ) { # Furthest, Random, PlusPlus{
parms[["user_points"]] <- NULL
} else{
stop ("argument init must be set to Furthest, Random, PlusPlus, or a valid set of user-defined starting points.")
}
# Error check and build model
.h2o.modelJob('kmeans', parms, h2oRestApiVersion = 3)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Heston.R
\name{HestonRealizedVariance}
\alias{HestonRealizedVariance}
\title{Heston Realized Variance}
\usage{
HestonRealizedVariance(realizedVar_0, time, kappa, theta)
}
\arguments{
\item{realizedVar_0}{Initial realized variance of a stock repeated T times}
\item{time}{Number of realized variance observations}
\item{kappa}{Mean reverting parameter.}
\item{theta}{The long term average.}
}
\value{
Realized variance according to the Heston model when the correct kappa and theta values are used
}
\description{
The Heston model's approximation for realized variance.
}
| /man/HestonRealizedVariance.Rd | no_license | FinTrek/FinMod | R | false | true | 652 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Heston.R
\name{HestonRealizedVariance}
\alias{HestonRealizedVariance}
\title{Heston Realized Variance}
\usage{
HestonRealizedVariance(realizedVar_0, time, kappa, theta)
}
\arguments{
\item{realizedVar_0}{Initial realized variance of a stock repeated T times}
\item{time}{Number of realized variance observations}
\item{kappa}{Mean reverting parameter.}
\item{theta}{The long term average.}
}
\value{
Realized variance according to the Heston model when the correct kappa and theta values are used
}
\description{
The Heston model's approximation for realized variance.
}
|
---------------NEWFOUNDLAND_AND_LABRADOR_PROVINCE------------------
install.packages("ggplot2", dependencies = TRUE)
require("ggplot2")
barchart = read.csv("C:\\Users\\Vimohi\\Desktop\\Work_Challenge\\Employement_By_Industry\\Data_Of_Provinces\\Newfoundland and Labrador_Province.csv")
barchart
dat <- data.frame(Industry= barchart$Industry,
Number_of_People_Employed = as.numeric(barchart$Dates))
ggplot(data=dat, aes(x=Industry, y=Number_of_People_Employed, fill=Industry))+
geom_bar(stat="identity")+
ggtitle("Newfoundland and Labrador's Employement By Industries")
| /R_Studio_Code/Newfoundland_and_Labrador_P_R_Code.R | no_license | Vimohi/Vimohi_Shah_Machine_Learning_Work_Challenge | R | false | false | 610 | r |
---------------NEWFOUNDLAND_AND_LABRADOR_PROVINCE------------------
install.packages("ggplot2", dependencies = TRUE)
require("ggplot2")
barchart = read.csv("C:\\Users\\Vimohi\\Desktop\\Work_Challenge\\Employement_By_Industry\\Data_Of_Provinces\\Newfoundland and Labrador_Province.csv")
barchart
dat <- data.frame(Industry= barchart$Industry,
Number_of_People_Employed = as.numeric(barchart$Dates))
ggplot(data=dat, aes(x=Industry, y=Number_of_People_Employed, fill=Industry))+
geom_bar(stat="identity")+
ggtitle("Newfoundland and Labrador's Employement By Industries")
|
\name{gtcpsem}
\alias{gtcpsem-function}
\alias{gtcpsem}
\docType{package}
\title{Generate a low-rank semi-symmetric tensor characterizing the form of CP decomposition}
\description{
This function generates a low-rank semi-symmetric tensor characterizing the form of CANDECOMP/PARAFAC (CP) decomposition with
the dimension \code{dims} and factors \code{lambda}.
The semi-symmetric tensor means that both mode-\code{r1} and mode-\code{r2} unfoldings are equal, that is,
\eqn{T_{(r1)} = T_{(r2)}} for the output tensor \eqn{T}.
}
\usage{
gtcpsem(dims, lambda=NULL, r1=1, r2=2, d0=NULL, dr=NULL, seed_id=2)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{dims}{The size of the tensor, which is a vector \eqn{(n_1,\cdots,n_d)}.
\code{dims} must be specified.}
\item{lambda}{The factors of CP decomposition. It is an vector. Factors \code{lambda} will be given randomly if it is \code{NULL}.}
\item{r1}{Both \code{r1} and \code{r2} are the user-specified modes, which means
that both mode-\code{r1} and mode-\code{r2} unfoldings are equal. Default is \code{r1 = 1} and \code{r2 = 2}.}
\item{r2}{Both \code{r1} and \code{r2} are the user-specified modes, which means
that both mode-\code{r1} and mode-\code{r2} unfoldings are equal. Default is \code{r1 = 1} and \code{r2 = 2}.}
\item{d0}{\code{d0} is the mode. The output tensor is the mode-\code{d0} unfolding of the tensor. \code{d0} can be NULL (the default)
if the output tensor is an array with dimension \code{dims}.}
\item{dr}{The user-specified rank. Default is \code{10}.}
\item{seed_id}{A positive integer, the seed for generating the random numbers. Default is 2.}
}
\details{
This function generates a low-rank semi-symmetric tensor characterizing the form of CP decomposition with dimension \code{dims} and factors \code{lambda}.
}
\value{
%% ~Describe the value returned
%% If it is a LIST, use
\item{Dn}{the output mode-\code{d0}-unfolding, \eqn{D_{(d_0)}}. Or an array with dimesion \code{dims} if \code{d0} is \code{NULL}.}
}
%\author{
%Your Name, email optional.
%Maintainer: Xu Liu <liu.xu@sufe.edu.cn>
%}
\keyword{ CP decomposition; HOSVD; Tucker decomposition}
\seealso{
gtcp, gttsem
}
\examples{
dims <- c(8,6,10,6,7)
N <- length(dims)
lambda <- seq(6,1,by=-1)
r1 <- 2
r2 <- 4
dr <- 5
T1 <- gtcpsem(dims=dims,lambda=lambda,r1=r1,r2=r2,d0=r1,dr=dr)
T2 <- ttu(T1,r1,r2,dims)
}
| /man/gtcpsem.Rd | no_license | xliusufe/tensorApp | R | false | false | 2,587 | rd | \name{gtcpsem}
\alias{gtcpsem-function}
\alias{gtcpsem}
\docType{package}
\title{Generate a low-rank semi-symmetric tensor characterizing the form of CP decomposition}
\description{
This function generates a low-rank semi-symmetric tensor characterizing the form of CANDECOMP/PARAFAC (CP) decomposition with
the dimension \code{dims} and factors \code{lambda}.
The semi-symmetric tensor means that both mode-\code{r1} and mode-\code{r2} unfoldings are equal, that is,
\eqn{T_{(r1)} = T_{(r2)}} for the output tensor \eqn{T}.
}
\usage{
gtcpsem(dims, lambda=NULL, r1=1, r2=2, d0=NULL, dr=NULL, seed_id=2)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{dims}{The size of the tensor, which is a vector \eqn{(n_1,\cdots,n_d)}.
\code{dims} must be specified.}
\item{lambda}{The factors of CP decomposition. It is an vector. Factors \code{lambda} will be given randomly if it is \code{NULL}.}
\item{r1}{Both \code{r1} and \code{r2} are the user-specified modes, which means
that both mode-\code{r1} and mode-\code{r2} unfoldings are equal. Default is \code{r1 = 1} and \code{r2 = 2}.}
\item{r2}{Both \code{r1} and \code{r2} are the user-specified modes, which means
that both mode-\code{r1} and mode-\code{r2} unfoldings are equal. Default is \code{r1 = 1} and \code{r2 = 2}.}
\item{d0}{\code{d0} is the mode. The output tensor is the mode-\code{d0} unfolding of the tensor. \code{d0} can be NULL (the default)
if the output tensor is an array with dimension \code{dims}.}
\item{dr}{The user-specified rank. Default is \code{10}.}
\item{seed_id}{A positive integer, the seed for generating the random numbers. Default is 2.}
}
\details{
This function generates a low-rank semi-symmetric tensor characterizing the form of CP decomposition with dimension \code{dims} and factors \code{lambda}.
}
\value{
%% ~Describe the value returned
%% If it is a LIST, use
\item{Dn}{the output mode-\code{d0}-unfolding, \eqn{D_{(d_0)}}. Or an array with dimesion \code{dims} if \code{d0} is \code{NULL}.}
}
%\author{
%Your Name, email optional.
%Maintainer: Xu Liu <liu.xu@sufe.edu.cn>
%}
\keyword{ CP decomposition; HOSVD; Tucker decomposition}
\seealso{
gtcp, gttsem
}
\examples{
dims <- c(8,6,10,6,7)
N <- length(dims)
lambda <- seq(6,1,by=-1)
r1 <- 2
r2 <- 4
dr <- 5
T1 <- gtcpsem(dims=dims,lambda=lambda,r1=r1,r2=r2,d0=r1,dr=dr)
T2 <- ttu(T1,r1,r2,dims)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/race_urls.R
\name{fill_missing_url}
\alias{fill_missing_url}
\title{Fill Missing URLs}
\usage{
fill_missing_url()
}
\description{
Utility function to fill missing race URLs.
}
| /man/fill_missing_url.Rd | no_license | joranE/statskier2 | R | false | true | 254 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/race_urls.R
\name{fill_missing_url}
\alias{fill_missing_url}
\title{Fill Missing URLs}
\usage{
fill_missing_url()
}
\description{
Utility function to fill missing race URLs.
}
|
# sensitivity_analysis.R
# Run sensitivity analysis, i.e.
# check what happens if one changes the parameters
# OVERALL SENSITIVITY ANALYSIS IS BASED ON ALL_LIMMA (ALL_G)
#############################
## Change pathway threshold
############################
library(foreach)
library(doParallel)
library(parallel)
numCores <- detectCores()-2
numCores
registerDoParallel(numCores) # use multicore, set to the number of our cores
threshold_range <- seq(0.9, 1.8, 0.1)
g <- neuro_g
thresh_score_sum <- numeric()
candidate_wp <- wpid2name[wpid2name$wpid != "WP4657",]$wpid
wp_list <- candidate_wp
for (thresh in threshold_range){
#res_score <- list()
# We'll only use pathways having a significant path for thresh==0.9.
# Update and fix wp_list accordingly, starting from thresh==1.0
if (thresh == 1.0){
wp_list <- one_names
}
reses <- foreach (one_name=wp_list) %dopar% {
one_res <- calculatePathwayScore(input_g=g,
another_pathway_node=one_name,
WEIGHT_THRESHOLD=thresh,
print_path = FALSE)
}
one_sums <- unlist(lapply(reses, function(x) if (length(x[[1]])>0) return(sum(1/x[[1]]))))
one_names <- unlist(lapply(reses, function(x) if (length(x[[1]])>0) return(names(x[[1]])[1])))
print(paste("threshold is", thresh))
print(paste("The score sum is:", sum(one_sums)))
thresh_score_sum <- c(thresh_score_sum, sum(one_sums))
}
neuro_thresh_score_sum <- thresh_score_sum
g <- nonneuro_g
thresh_score_sum <- numeric()
candidate_wp <- wpid2name[wpid2name$wpid != "WP4657",]$wpid
wp_list <- candidate_wp
for (thresh in threshold_range){
#res_score <- list()
# We'll only use pathways having a significant path for thresh==0.9.
# Update and fix wp_list accordingly, starting from thresh==1.0
if (thresh == 1.0){
wp_list <- one_names
}
reses <- foreach (one_name=wp_list) %dopar% {
one_res <- calculatePathwayScore(input_g=g,
another_pathway_node=one_name,
WEIGHT_THRESHOLD=thresh,
print_path = FALSE)
}
one_sums <- unlist(lapply(reses, function(x) if (length(x[[1]])>0) return(sum(1/x[[1]]))))
one_names <- unlist(lapply(reses, function(x) if (length(x[[1]])>0) return(names(x[[1]])[1])))
print(paste("threshold is", thresh))
print(paste("The score sum is:", sum(one_sums)))
thresh_score_sum <- c(thresh_score_sum, sum(one_sums))
}
nonneuro_thresh_score_sum <- thresh_score_sum
# sensitivity_scores <- list(neuro_thresh_score_sum, nonneuro_thresh_score_sum)
# save(sensitivity_scores, file=file.path(RESULT_DIR, "sensitivity_scores.rda"))
splitVecSum <- function(inp_vec, n){
# Split inp_vec into
# equl lengths of n
num_subvec <- length(inp_vec)/n
vec_list <- lapply(seq(0, num_subvec-1), function(x) return(inp_vec[(x*n+1):((x+1)*n)]))
return (vec_list)
}
neuro_thresh_score_list <- splitVecSum(neuro_thresh_score_sum, 167)
neuro_sensitivity_sum <- unlist(lapply(neuro_thresh_score_list, sum))
nonneuro_thresh_score_list <- splitVecSum(nonneuro_thresh_score_sum, 255)
nonneuro_sensitivity_sum <- unlist(lapply(nonneuro_thresh_score_list, sum))
compare_sensitivity <- data.frame(val=threshold_range,
neuro_score=neuro_sensitivity_sum,
nonneuro_score=nonneuro_sensitivity_sum)
write.csv(compare_sensitivity, file.path(RESULT_DIR, "compare_sensitivity.csv"), row.names = F)
stopImplicitCluster()
| /R/sensitivity_analysis.R | permissive | woosubs/PathwayInteraction | R | false | false | 3,558 | r | # sensitivity_analysis.R
# Run sensitivity analysis, i.e.
# check what happens if one changes the parameters
# OVERALL SENSITIVITY ANALYSIS IS BASED ON ALL_LIMMA (ALL_G)
#############################
## Change pathway threshold
############################
library(foreach)
library(doParallel)
library(parallel)
numCores <- detectCores()-2
numCores
registerDoParallel(numCores) # use multicore, set to the number of our cores
threshold_range <- seq(0.9, 1.8, 0.1)
g <- neuro_g
thresh_score_sum <- numeric()
candidate_wp <- wpid2name[wpid2name$wpid != "WP4657",]$wpid
wp_list <- candidate_wp
for (thresh in threshold_range){
#res_score <- list()
# We'll only use pathways having a significant path for thresh==0.9.
# Update and fix wp_list accordingly, starting from thresh==1.0
if (thresh == 1.0){
wp_list <- one_names
}
reses <- foreach (one_name=wp_list) %dopar% {
one_res <- calculatePathwayScore(input_g=g,
another_pathway_node=one_name,
WEIGHT_THRESHOLD=thresh,
print_path = FALSE)
}
one_sums <- unlist(lapply(reses, function(x) if (length(x[[1]])>0) return(sum(1/x[[1]]))))
one_names <- unlist(lapply(reses, function(x) if (length(x[[1]])>0) return(names(x[[1]])[1])))
print(paste("threshold is", thresh))
print(paste("The score sum is:", sum(one_sums)))
thresh_score_sum <- c(thresh_score_sum, sum(one_sums))
}
neuro_thresh_score_sum <- thresh_score_sum
g <- nonneuro_g
thresh_score_sum <- numeric()
candidate_wp <- wpid2name[wpid2name$wpid != "WP4657",]$wpid
wp_list <- candidate_wp
for (thresh in threshold_range){
#res_score <- list()
# We'll only use pathways having a significant path for thresh==0.9.
# Update and fix wp_list accordingly, starting from thresh==1.0
if (thresh == 1.0){
wp_list <- one_names
}
reses <- foreach (one_name=wp_list) %dopar% {
one_res <- calculatePathwayScore(input_g=g,
another_pathway_node=one_name,
WEIGHT_THRESHOLD=thresh,
print_path = FALSE)
}
one_sums <- unlist(lapply(reses, function(x) if (length(x[[1]])>0) return(sum(1/x[[1]]))))
one_names <- unlist(lapply(reses, function(x) if (length(x[[1]])>0) return(names(x[[1]])[1])))
print(paste("threshold is", thresh))
print(paste("The score sum is:", sum(one_sums)))
thresh_score_sum <- c(thresh_score_sum, sum(one_sums))
}
nonneuro_thresh_score_sum <- thresh_score_sum
# sensitivity_scores <- list(neuro_thresh_score_sum, nonneuro_thresh_score_sum)
# save(sensitivity_scores, file=file.path(RESULT_DIR, "sensitivity_scores.rda"))
splitVecSum <- function(inp_vec, n){
# Split inp_vec into
# equl lengths of n
num_subvec <- length(inp_vec)/n
vec_list <- lapply(seq(0, num_subvec-1), function(x) return(inp_vec[(x*n+1):((x+1)*n)]))
return (vec_list)
}
neuro_thresh_score_list <- splitVecSum(neuro_thresh_score_sum, 167)
neuro_sensitivity_sum <- unlist(lapply(neuro_thresh_score_list, sum))
nonneuro_thresh_score_list <- splitVecSum(nonneuro_thresh_score_sum, 255)
nonneuro_sensitivity_sum <- unlist(lapply(nonneuro_thresh_score_list, sum))
compare_sensitivity <- data.frame(val=threshold_range,
neuro_score=neuro_sensitivity_sum,
nonneuro_score=nonneuro_sensitivity_sum)
write.csv(compare_sensitivity, file.path(RESULT_DIR, "compare_sensitivity.csv"), row.names = F)
stopImplicitCluster()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/expandTaxonTree.R
\name{expandTaxonTree}
\alias{expandTaxonTree}
\title{Extrapolating Lower-Level Taxon Phylogenies from Higher-Level Taxon Trees}
\usage{
expandTaxonTree(taxonTree, taxaData, collapse = NULL, keepBrLen = FALSE,
plot = FALSE)
}
\arguments{
\item{taxonTree}{A phylo object where tips represent higher taxa}
\item{taxaData}{Character vector of higher taxa, with elements names equal
to the lower taxa. See below.}
\item{collapse}{Character vector of non-monophyletic higher taxa to be
collapsed}
\item{keepBrLen}{Logical, decides if branch lengths should be kept or
discarded. FALSE by default. See details below.}
\item{plot}{If true, plots a comparison between input and output trees}
}
\value{
Outputs the modified tree as an object of class phylo, with the
higher-level taxa expanded into polytomies and the lower-level taxa as the
tip labels.
}
\description{
This function takes a tree composed of higher-level taxa and a vector of
lower-level taxa belonging to the set of higher-level taxa included in the
input tree and produces a tree composed of the lower-level taxa, by treating
the higher-level taxa as unresolved monophyletic polytomies. A user can also
mark higher taxa as paraphyletic such that these are secondarily collapsed
and do not form monophyletic clades in the output tree.
}
\details{
The output tree will probably be a rough unresolved view of the
relationships among the taxa, due to the treatment of higher-level taxa as
polytomies. This is similar to the methods used in Webb and Donoghue (2005)
and Friedman (2009). Any analyses should be done by resolving this tree with
\code{\link{multi2di}} in the ape package or via the various time-scaling
functions found in this package (paleotree).
The taxaData vector should have one element per lower-level taxon that is to
be added to the tree. The name of each element in the vector should be the
names of the lower-level taxa, which will be used as new tip labels of the
output lower-taxon tree. There should be no empty elements! expandTaxonTree
won't know what to do with taxa that don't go anywhere.
By default, all higher-level taxa are treated as monophyletic clades if not
otherwise specified. The collapse vector can (and probably should) be used
if there is doubt about the monophyly of any higher-level taxa included in
the input taxon-tree, so that such a group would be treated as a paraphyletic
group in the output tree.
Also by default, the output tree will lack branch lengths and thus will not be
time-scaled. If keepBrLen is true, then the tree's edge lengths are kept and
new taxa are added as zero length branches attaching to a node that
represents the previous higher-taxon. This tree is probably not useful for
most applications, and may even strongly bias some analyses. USE WITH
CAUTION! The 'collapse' vector will cause such edges to be replaced by
zero-length branches rather than fully collapsing them, which could have odd
effects. If 'collapse' is not null and keepBrLen is true, a warning is
issued that the output probably won't make much sense at all.
}
\examples{
set.seed(444)
#lets make our hypothetical simulated tree of higher taxa
taxtr <- rtree(10)
taxd <- sample(taxtr$tip.label,30,replace=TRUE) #taxa to place within higher taxa
names(taxd) <- paste(taxd,"_x",1:30,sep="")
coll <- sample(taxtr$tip.label,3) #what to collapse?
expandTaxonTree(taxonTree=taxtr,taxaData=taxd,collapse=coll,plot=TRUE)
}
\author{
David W. Bapst
}
\references{
Friedman, M. 2009 Ecomorphological selectivity among marine
teleost fishes during the end-Cretaceous extinction. \emph{Proceedings of
the National Academy of Sciences} \bold{106}(13):5218--5223.
Webb, C. O., and M. J. Donoghue. 2005 Phylomatic: tree assembly for applied
phylogenetics. \emph{Molecular Ecology Notes} \bold{5}(1):181--183.
}
\seealso{
\code{\link{multi2di}}, \code{\link{bind.tree}}
}
| /man/expandTaxonTree.Rd | permissive | KlausVigo/paleotree | R | false | true | 3,961 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/expandTaxonTree.R
\name{expandTaxonTree}
\alias{expandTaxonTree}
\title{Extrapolating Lower-Level Taxon Phylogenies from Higher-Level Taxon Trees}
\usage{
expandTaxonTree(taxonTree, taxaData, collapse = NULL, keepBrLen = FALSE,
plot = FALSE)
}
\arguments{
\item{taxonTree}{A phylo object where tips represent higher taxa}
\item{taxaData}{Character vector of higher taxa, with elements names equal
to the lower taxa. See below.}
\item{collapse}{Character vector of non-monophyletic higher taxa to be
collapsed}
\item{keepBrLen}{Logical, decides if branch lengths should be kept or
discarded. FALSE by default. See details below.}
\item{plot}{If true, plots a comparison between input and output trees}
}
\value{
Outputs the modified tree as an object of class phylo, with the
higher-level taxa expanded into polytomies and the lower-level taxa as the
tip labels.
}
\description{
This function takes a tree composed of higher-level taxa and a vector of
lower-level taxa belonging to the set of higher-level taxa included in the
input tree and produces a tree composed of the lower-level taxa, by treating
the higher-level taxa as unresolved monophyletic polytomies. A user can also
mark higher taxa as paraphyletic such that these are secondarily collapsed
and do not form monophyletic clades in the output tree.
}
\details{
The output tree will probably be a rough unresolved view of the
relationships among the taxa, due to the treatment of higher-level taxa as
polytomies. This is similar to the methods used in Webb and Donoghue (2005)
and Friedman (2009). Any analyses should be done by resolving this tree with
\code{\link{multi2di}} in the ape package or via the various time-scaling
functions found in this package (paleotree).
The taxaData vector should have one element per lower-level taxon that is to
be added to the tree. The name of each element in the vector should be the
names of the lower-level taxa, which will be used as new tip labels of the
output lower-taxon tree. There should be no empty elements! expandTaxonTree
won't know what to do with taxa that don't go anywhere.
By default, all higher-level taxa are treated as monophyletic clades if not
otherwise specified. The collapse vector can (and probably should) be used
if there is doubt about the monophyly of any higher-level taxa included in
the input taxon-tree, so that such a group would be treated as a paraphyletic
group in the output tree.
Also by default, the output tree will lack branch lengths and thus will not be
time-scaled. If keepBrLen is true, then the tree's edge lengths are kept and
new taxa are added as zero length branches attaching to a node that
represents the previous higher-taxon. This tree is probably not useful for
most applications, and may even strongly bias some analyses. USE WITH
CAUTION! The 'collapse' vector will cause such edges to be replaced by
zero-length branches rather than fully collapsing them, which could have odd
effects. If 'collapse' is not null and keepBrLen is true, a warning is
issued that the output probably won't make much sense at all.
}
\examples{
set.seed(444)
#lets make our hypothetical simulated tree of higher taxa
taxtr <- rtree(10)
taxd <- sample(taxtr$tip.label,30,replace=TRUE) #taxa to place within higher taxa
names(taxd) <- paste(taxd,"_x",1:30,sep="")
coll <- sample(taxtr$tip.label,3) #what to collapse?
expandTaxonTree(taxonTree=taxtr,taxaData=taxd,collapse=coll,plot=TRUE)
}
\author{
David W. Bapst
}
\references{
Friedman, M. 2009 Ecomorphological selectivity among marine
teleost fishes during the end-Cretaceous extinction. \emph{Proceedings of
the National Academy of Sciences} \bold{106}(13):5218--5223.
Webb, C. O., and M. J. Donoghue. 2005 Phylomatic: tree assembly for applied
phylogenetics. \emph{Molecular Ecology Notes} \bold{5}(1):181--183.
}
\seealso{
\code{\link{multi2di}}, \code{\link{bind.tree}}
}
|
#' Function to deal with nulls inside of glue
#'
#' @param str The string to treat as NULL
#'
#' @return The new string
#' @keywords internal
null_transformer <- function(str = "null") {
function(text, envir) {
out <- glue::identity_transformer(text, envir)
if (is.null(out)) {
return(str)
}
out
}
}
| /R/null_transformer.R | permissive | ian-flores/metadata | R | false | false | 328 | r | #' Function to deal with nulls inside of glue
#'
#' @param str The string to treat as NULL
#'
#' @return The new string
#' @keywords internal
null_transformer <- function(str = "null") {
function(text, envir) {
out <- glue::identity_transformer(text, envir)
if (is.null(out)) {
return(str)
}
out
}
}
|
context("my_range")
test_that("numerics", {
expect_equal(my_range(1:10), "1.0--10.0")
expect_equal(my_range(c(1, 2, 3, 4, 5, 6, 7, 8, 9, 10)), "1.0--10.0")
expect_equal(my_range(1:10 * 2), "2.0--20.0")
})
| /tests/testthat/test-my_range.R | permissive | fshin3/wsdevops | R | false | false | 212 | r | context("my_range")
test_that("numerics", {
expect_equal(my_range(1:10), "1.0--10.0")
expect_equal(my_range(c(1, 2, 3, 4, 5, 6, 7, 8, 9, 10)), "1.0--10.0")
expect_equal(my_range(1:10 * 2), "2.0--20.0")
})
|
getwd()
rm(list=ls())
setwd("/Users/sungjinpark/Desktop/OneDrive - konkuk.ac.kr/datamining/midterm")
xy.df = read.csv("../old.sam.for.reg.fit.csv")
y.df = read.csv("../old.sam.for.reg.pred.csv")
## make dummy variable
x = sample(x=c('Vision','NLP','ML'),size=10,replace = TRUE)
ux = unique(x)
ux=ux[-1] #baseline
result = t(t(matrix(rep(x,length(ux)),ncol = length(ux)))==ux)*1
colnames(result)
colnames(result)=ux
result
cbind(result,x)
| /3wk/dummy.R | no_license | Chad4545/datamining | R | false | false | 442 | r | getwd()
rm(list=ls())
setwd("/Users/sungjinpark/Desktop/OneDrive - konkuk.ac.kr/datamining/midterm")
xy.df = read.csv("../old.sam.for.reg.fit.csv")
y.df = read.csv("../old.sam.for.reg.pred.csv")
## make dummy variable
x = sample(x=c('Vision','NLP','ML'),size=10,replace = TRUE)
ux = unique(x)
ux=ux[-1] #baseline
result = t(t(matrix(rep(x,length(ux)),ncol = length(ux)))==ux)*1
colnames(result)
colnames(result)=ux
result
cbind(result,x)
|
#Define custom scripts functions
#------------------------------
#Function that simplifies loading .RData objects
assignLoad <- function(filename){
load(filename)
get(ls()[ls() != "filename"])
}
#Items from Data Dictionary
######################################
#Load User Type sheet---
Data_Dictionary.. <- read_excel("Documentation/Data Dictionary.xlsx", sheet = "User_Type_Code")
Eco_User_Type. <- Data_Dictionary..$User_Type
names(Eco_User_Type.) <- Data_Dictionary..$Eco_User_Type_Desc
Eco_User_Type. <- Eco_User_Type.[!(is.na(names(Eco_User_Type.)))]
#Define additional user types codes since Eco Counter does not correctly define users when bike/ped collected together---
User_Type. <- Data_Dictionary..$User_Type
names(User_Type.) <- Data_Dictionary..$User_Type_Desc
User_Type. <- User_Type.[!(is.na(names(User_Type.)))]
#Load Direction Code sheet---
Data_Dictionary.. <- read_excel("Documentation/Data Dictionary.xlsx", sheet = "Direction")
#Create a direction description vector
Direction_Desc. <- Data_Dictionary..$Direction_Code
names(Direction_Desc.) <- Data_Dictionary..$Direction_Desc
#Load Collection Type Code sheet---
Data_Dictionary.. <- read_excel("Documentation/Data Dictionary.xlsx", sheet = "Collection_Type_Code")
#Collection Type Codes---
Collection_Type. <- Data_Dictionary..$Collection_Type
names(Collection_Type.) <- Data_Dictionary..$Collection_Type_Desc
#Load Facility Type Code sheet---
Data_Dictionary.. <- read_excel("Documentation/Data Dictionary.xlsx", sheet = "Facility_Type_Code")
#Create facility type desc vector
Facility_Type. <- Data_Dictionary..$Facility_Type
names(Facility_Type.) <-Data_Dictionary..$Facility_Type_Desc
#Load Facility Type Code sheet---
Data_Dictionary.. <- read_excel("Documentation/Data Dictionary.xlsx", sheet = "Device_Type")
#Create device type desc vector
Device_Type. <- Data_Dictionary..$Device_Type
names(Device_Type.) <- Data_Dictionary..$Device_Type_Desc
#Load Error Code sheet---
Data_Dictionary.. <- read_excel("Documentation/Data Dictionary.xlsx", sheet = "Error_Codes")
#Create device type desc vector
Error_Code. <- Data_Dictionary..$Error_Code
names(Error_Code.) <- Data_Dictionary..$Error_Name
#Load Data
#--------------------
#Step 3 Data ---------------------------------
All_Files. <- file.info(dir(paste(getwd(),"/Counts Data/Step 3 - Counts with Errors",sep=""), full.names=TRUE))$ctime
names(All_Files.) <- dir(paste(getwd(),"/Counts Data/Step 3 - Counts with Errors",sep=""), full.names=TRUE)
All_Files.<- All_Files.[!grepl(".csv", names(All_Files.))]
#Sub_Location_Id
##########
#Daily Sub Location---
File <- All_Files.[grep("Daily_Sub_Location_Id", names(All_Files.))]
File <- names(File[File%in%max(File)])
Load_Daily_Sub_Location_Id.. <- assignLoad(File)
#Hourly---
#File <- All_Files.[grep("Hourly_Sub_Location_Id", names(All_Files.))]
#File <- names(File[File%in%max(File)])
#Load_Hourly_Sub_Location_Id.. <- assignLoad(File)
#Select and rename data
Daily_Sub_Location_Id.. <- Load_Daily_Sub_Location_Id..[ Load_Daily_Sub_Location_Id..$Direction%in%"Total" & Load_Daily_Sub_Location_Id..$Obs_Hours%in%24,]
#Create a location Id summary table
#############################
Daily_Location_Id.. <- Daily_Sub_Location_Id.. %>% group_by(Device_Name, User_Type_Desc,Date) %>% summarise(Counts = sum(Counts,na.rm=T))
#Add columns
Daily_Location_Id..$Month <- months(Daily_Location_Id..$Date)
Daily_Location_Id..$Year <- year(Daily_Location_Id..$Date)
Daily_Location_Id..$Weekday <- weekdays(Daily_Location_Id..$Date)
Daily_Location_Id.. <- Daily_Location_Id.. %>% mutate(Is_Weekday = ifelse(Weekday%in%c("Saturday","Sunday"),"Weekend","Weekday"))
#Error code - if any code is not 0 assign a new error falg
Error_Codes.. <- Daily_Sub_Location_Id.. %>% group_by(Device_Name,User_Type_Desc,Date) %>%
summarise(Error_Code = case_when(
all(Error_Code == 0) ~ 0,
any(Error_Code == 1) ~ 1,
any(Error_Code == 2) ~ 2,
any(Error_Code == 3) ~ 3,
any(Error_Code == 4) ~ 4,
any(Error_Code == 5) ~ 5))
#Join error codes to dailky counts
Daily_Location_Id.. <- left_join(Daily_Location_Id.., Error_Codes.., by = c("Device_Name","User_Type_Desc","Date"))
#Append some columns
#############
#Descriptive of Error Code
Daily_Sub_Location_Id..$Error_Code_Desc <- names(Error_Code.)[match(Daily_Sub_Location_Id..$Error_Code, Error_Code.)]
Daily_Location_Id..$Error_Code_Desc <- names(Error_Code.)[match(Daily_Location_Id..$Error_Code, Error_Code.)]
#Load projection file from file or download if not on file
#Download spatial data for select counties for defining project
if(!(any(list.files(paste(getwd(),"/Supporting Data/Spatial/Census/",sep=""))%in%"County_Spatial_Data.RData"))){
County_Sp <- counties("Oregon")
save(County_Spatial, file = paste(getwd(),"/Supporting Data/Spatial/Census/County_Spatial_Data.RData",sep=""))
} else {County_Sp <- assignLoad(file = paste(getwd(),"/Supporting Data/Spatial/Census/County_Spatial_Data.RData",sep=""))}
| /scripts/viz_app_setup.r | permissive | UrbanStudy/Counts-Processor-in-R | R | false | false | 5,077 | r |
#Define custom scripts functions
#------------------------------
#Function that simplifies loading .RData objects
assignLoad <- function(filename){
load(filename)
get(ls()[ls() != "filename"])
}
#Items from Data Dictionary
######################################
#Load User Type sheet---
Data_Dictionary.. <- read_excel("Documentation/Data Dictionary.xlsx", sheet = "User_Type_Code")
Eco_User_Type. <- Data_Dictionary..$User_Type
names(Eco_User_Type.) <- Data_Dictionary..$Eco_User_Type_Desc
Eco_User_Type. <- Eco_User_Type.[!(is.na(names(Eco_User_Type.)))]
#Define additional user types codes since Eco Counter does not correctly define users when bike/ped collected together---
User_Type. <- Data_Dictionary..$User_Type
names(User_Type.) <- Data_Dictionary..$User_Type_Desc
User_Type. <- User_Type.[!(is.na(names(User_Type.)))]
#Load Direction Code sheet---
Data_Dictionary.. <- read_excel("Documentation/Data Dictionary.xlsx", sheet = "Direction")
#Create a direction description vector
Direction_Desc. <- Data_Dictionary..$Direction_Code
names(Direction_Desc.) <- Data_Dictionary..$Direction_Desc
#Load Collection Type Code sheet---
Data_Dictionary.. <- read_excel("Documentation/Data Dictionary.xlsx", sheet = "Collection_Type_Code")
#Collection Type Codes---
Collection_Type. <- Data_Dictionary..$Collection_Type
names(Collection_Type.) <- Data_Dictionary..$Collection_Type_Desc
#Load Facility Type Code sheet---
Data_Dictionary.. <- read_excel("Documentation/Data Dictionary.xlsx", sheet = "Facility_Type_Code")
#Create facility type desc vector
Facility_Type. <- Data_Dictionary..$Facility_Type
names(Facility_Type.) <-Data_Dictionary..$Facility_Type_Desc
#Load Facility Type Code sheet---
Data_Dictionary.. <- read_excel("Documentation/Data Dictionary.xlsx", sheet = "Device_Type")
#Create device type desc vector
Device_Type. <- Data_Dictionary..$Device_Type
names(Device_Type.) <- Data_Dictionary..$Device_Type_Desc
#Load Error Code sheet---
Data_Dictionary.. <- read_excel("Documentation/Data Dictionary.xlsx", sheet = "Error_Codes")
#Create device type desc vector
Error_Code. <- Data_Dictionary..$Error_Code
names(Error_Code.) <- Data_Dictionary..$Error_Name
#Load Data
#--------------------
#Step 3 Data ---------------------------------
All_Files. <- file.info(dir(paste(getwd(),"/Counts Data/Step 3 - Counts with Errors",sep=""), full.names=TRUE))$ctime
names(All_Files.) <- dir(paste(getwd(),"/Counts Data/Step 3 - Counts with Errors",sep=""), full.names=TRUE)
All_Files.<- All_Files.[!grepl(".csv", names(All_Files.))]
#Sub_Location_Id
##########
#Daily Sub Location---
File <- All_Files.[grep("Daily_Sub_Location_Id", names(All_Files.))]
File <- names(File[File%in%max(File)])
Load_Daily_Sub_Location_Id.. <- assignLoad(File)
#Hourly---
#File <- All_Files.[grep("Hourly_Sub_Location_Id", names(All_Files.))]
#File <- names(File[File%in%max(File)])
#Load_Hourly_Sub_Location_Id.. <- assignLoad(File)
#Select and rename data
Daily_Sub_Location_Id.. <- Load_Daily_Sub_Location_Id..[ Load_Daily_Sub_Location_Id..$Direction%in%"Total" & Load_Daily_Sub_Location_Id..$Obs_Hours%in%24,]
#Create a location Id summary table
#############################
Daily_Location_Id.. <- Daily_Sub_Location_Id.. %>% group_by(Device_Name, User_Type_Desc,Date) %>% summarise(Counts = sum(Counts,na.rm=T))
#Add columns
Daily_Location_Id..$Month <- months(Daily_Location_Id..$Date)
Daily_Location_Id..$Year <- year(Daily_Location_Id..$Date)
Daily_Location_Id..$Weekday <- weekdays(Daily_Location_Id..$Date)
Daily_Location_Id.. <- Daily_Location_Id.. %>% mutate(Is_Weekday = ifelse(Weekday%in%c("Saturday","Sunday"),"Weekend","Weekday"))
#Error code - if any code is not 0 assign a new error falg
Error_Codes.. <- Daily_Sub_Location_Id.. %>% group_by(Device_Name,User_Type_Desc,Date) %>%
summarise(Error_Code = case_when(
all(Error_Code == 0) ~ 0,
any(Error_Code == 1) ~ 1,
any(Error_Code == 2) ~ 2,
any(Error_Code == 3) ~ 3,
any(Error_Code == 4) ~ 4,
any(Error_Code == 5) ~ 5))
#Join error codes to dailky counts
Daily_Location_Id.. <- left_join(Daily_Location_Id.., Error_Codes.., by = c("Device_Name","User_Type_Desc","Date"))
#Append some columns
#############
#Descriptive of Error Code
Daily_Sub_Location_Id..$Error_Code_Desc <- names(Error_Code.)[match(Daily_Sub_Location_Id..$Error_Code, Error_Code.)]
Daily_Location_Id..$Error_Code_Desc <- names(Error_Code.)[match(Daily_Location_Id..$Error_Code, Error_Code.)]
#Load projection file from file or download if not on file
#Download spatial data for select counties for defining project
if(!(any(list.files(paste(getwd(),"/Supporting Data/Spatial/Census/",sep=""))%in%"County_Spatial_Data.RData"))){
County_Sp <- counties("Oregon")
save(County_Spatial, file = paste(getwd(),"/Supporting Data/Spatial/Census/County_Spatial_Data.RData",sep=""))
} else {County_Sp <- assignLoad(file = paste(getwd(),"/Supporting Data/Spatial/Census/County_Spatial_Data.RData",sep=""))}
|
## version: 1.27
## method: get
## path: /plugins
## code: 200
## response: [{"Id":"5724e2c8652da337ab2eedd19fc6fc0ec908e4bd907c7421bf6a8dfc70c4c078","Name":"tiborvass/sample-volume-plugin","Tag":"latest","Active":true,"Settings":{"Env":"DEBUG=0","Args":{},"Devices":null},"Config":{"Description":"A sample volume plugin for Docker","Documentation":"https://docs.docker.com/engine/extend/plugins/","Interface":{"Types":"docker.volumedriver/1.0","Socket":"plugins.sock"},"Entrypoint":["/usr/bin/sample-volume-plugin","/data"],"WorkDir":"","User":{},"Network":{"Type":""},"Linux":{"Capabilities":{},"AllowAllDevices":false,"Devices":null},"Mounts":null,"PropagatedMount":"/data","Env":[{"Name":"DEBUG","Description":"If set, prints debug messages","Settable":{},"Value":"0"}],"Args":{"Name":"args","Description":"command line arguments","Settable":{},"Value":[]}}}]
NULL
## NOTE: compared with the version in the spec, I have set
## "Devices":null (both places it is used), and "Mounts":null - all
## were :{} which does not agree with the spec. The example for 1.33
## shows something more sensible as output but that's using the new
## pattern (see plugin_inspect.R) - if you don't do this then you get
## an error like "Was handed the wrong sort of thing" because the
## response is incorrect.
data_frame <- function(...) {
data.frame(..., stringsAsFactors = FALSE)
}
settings <- list(
mounts = data_frame(
name = character(),
description = character(),
settable = I(list()),
source = character(),
destination = character(),
type = character(),
options = I(list())),
env = "DEBUG=0",
args = character(),
devices = data_frame(
name = character(),
description = character(),
settable = I(list()),
path = character()))
config <- list(
description = "A sample volume plugin for Docker",
documentation = "https://docs.docker.com/engine/extend/plugins/",
interface = list(
types = data_frame(
prefix = NA_character_,
capability = NA_character_,
version = NA_character_),
socket = "plugins.sock"),
entrypoint = c("/usr/bin/sample-volume-plugin", "/data"),
work_dir = "",
user = list(uid = NA_integer_, gid = NA_integer_),
network = list(type = ""),
linux = list(
capabilities = character(0),
allow_all_devices = FALSE,
devices = data_frame(
name = character(0),
description = character(0),
settable = I(list()),
path = character(0))),
propagated_mount = "/data",
mounts = data_frame(
name = character(0),
description = character(0),
settable = I(list()),
source = character(0),
destination = character(0),
type = character(0),
options = I(list())),
env = data_frame(
name = "DEBUG",
description = "If set, prints debug messages",
settable = I(list(character(0))),
value = "0"),
args = list(
name = "args",
description = "command line arguments",
settable = character(0),
value = character(0)),
rootfs = NULL)
data_frame(
id = "5724e2c8652da337ab2eedd19fc6fc0ec908e4bd907c7421bf6a8dfc70c4c078",
name = "tiborvass/sample-volume-plugin",
enabled = NA,
settings = I(list(settings)),
plugin_reference = NA_character_,
config = I(list(config)))
| /tests/testthat/sample_responses/v1.27/plugin_list.R | no_license | cran/stevedore | R | false | false | 3,253 | r | ## version: 1.27
## method: get
## path: /plugins
## code: 200
## response: [{"Id":"5724e2c8652da337ab2eedd19fc6fc0ec908e4bd907c7421bf6a8dfc70c4c078","Name":"tiborvass/sample-volume-plugin","Tag":"latest","Active":true,"Settings":{"Env":"DEBUG=0","Args":{},"Devices":null},"Config":{"Description":"A sample volume plugin for Docker","Documentation":"https://docs.docker.com/engine/extend/plugins/","Interface":{"Types":"docker.volumedriver/1.0","Socket":"plugins.sock"},"Entrypoint":["/usr/bin/sample-volume-plugin","/data"],"WorkDir":"","User":{},"Network":{"Type":""},"Linux":{"Capabilities":{},"AllowAllDevices":false,"Devices":null},"Mounts":null,"PropagatedMount":"/data","Env":[{"Name":"DEBUG","Description":"If set, prints debug messages","Settable":{},"Value":"0"}],"Args":{"Name":"args","Description":"command line arguments","Settable":{},"Value":[]}}}]
NULL
## NOTE: compared with the version in the spec, I have set
## "Devices":null (both places it is used), and "Mounts":null - all
## were :{} which does not agree with the spec. The example for 1.33
## shows something more sensible as output but that's using the new
## pattern (see plugin_inspect.R) - if you don't do this then you get
## an error like "Was handed the wrong sort of thing" because the
## response is incorrect.
data_frame <- function(...) {
data.frame(..., stringsAsFactors = FALSE)
}
settings <- list(
mounts = data_frame(
name = character(),
description = character(),
settable = I(list()),
source = character(),
destination = character(),
type = character(),
options = I(list())),
env = "DEBUG=0",
args = character(),
devices = data_frame(
name = character(),
description = character(),
settable = I(list()),
path = character()))
config <- list(
description = "A sample volume plugin for Docker",
documentation = "https://docs.docker.com/engine/extend/plugins/",
interface = list(
types = data_frame(
prefix = NA_character_,
capability = NA_character_,
version = NA_character_),
socket = "plugins.sock"),
entrypoint = c("/usr/bin/sample-volume-plugin", "/data"),
work_dir = "",
user = list(uid = NA_integer_, gid = NA_integer_),
network = list(type = ""),
linux = list(
capabilities = character(0),
allow_all_devices = FALSE,
devices = data_frame(
name = character(0),
description = character(0),
settable = I(list()),
path = character(0))),
propagated_mount = "/data",
mounts = data_frame(
name = character(0),
description = character(0),
settable = I(list()),
source = character(0),
destination = character(0),
type = character(0),
options = I(list())),
env = data_frame(
name = "DEBUG",
description = "If set, prints debug messages",
settable = I(list(character(0))),
value = "0"),
args = list(
name = "args",
description = "command line arguments",
settable = character(0),
value = character(0)),
rootfs = NULL)
data_frame(
id = "5724e2c8652da337ab2eedd19fc6fc0ec908e4bd907c7421bf6a8dfc70c4c078",
name = "tiborvass/sample-volume-plugin",
enabled = NA,
settings = I(list(settings)),
plugin_reference = NA_character_,
config = I(list(config)))
|
library(glmnet)
mydata = read.table("./TrainingSet/Correlation/central_nervous_system.csv",head=T,sep=",")
x = as.matrix(mydata[,4:ncol(mydata)])
y = as.matrix(mydata[,1])
set.seed(123)
glm = cv.glmnet(x,y,nfolds=10,type.measure="mse",alpha=0.05,family="gaussian",standardize=TRUE)
sink('./Model/EN/Correlation/central_nervous_system/central_nervous_system_021.txt',append=TRUE)
print(glm$glmnet.fit)
sink()
| /Model/EN/Correlation/central_nervous_system/central_nervous_system_021.R | no_license | leon1003/QSMART | R | false | false | 408 | r | library(glmnet)
mydata = read.table("./TrainingSet/Correlation/central_nervous_system.csv",head=T,sep=",")
x = as.matrix(mydata[,4:ncol(mydata)])
y = as.matrix(mydata[,1])
set.seed(123)
glm = cv.glmnet(x,y,nfolds=10,type.measure="mse",alpha=0.05,family="gaussian",standardize=TRUE)
sink('./Model/EN/Correlation/central_nervous_system/central_nervous_system_021.txt',append=TRUE)
print(glm$glmnet.fit)
sink()
|
context("plotBMRBoxplots")
test_that("BenchmarkResult", {
lrns = list(makeLearner("classif.nnet"), makeLearner("classif.rpart"))
tasks = list(multiclass.task, binaryclass.task)
rdesc = makeResampleDesc("CV", iters = 2L)
meas = list(acc, mmce, ber, featperc)
res = benchmark(lrns, tasks, rdesc, meas)
plotBMRBoxplots(res)
dir = tempdir()
path = paste0(dir, "/test.svg")
ggsave(path)
doc = XML::xmlParse(path)
expect_equal(length(XML::getNodeSet(doc, grey.rect.xpath, ns.svg)), length(getBMRTaskIds(res)))
# facetting works:
q = plotBMRBoxplots(res, facet.wrap.nrow = 2L)
testFacetting(q, 2L)
q = plotBMRBoxplots(res, facet.wrap.ncol = 2L)
testFacetting(q, ncol = 2L)
q = plotBMRBoxplots(res, facet.wrap.nrow = 2L, facet.wrap.ncol = 2L)
testFacetting(q, 2L, 2L)
# pretty names works
plotBMRBoxplots(res)
dir = tempdir()
path = paste0(dir, "/test.svg")
ggsave(path)
doc = XML::xmlParse(path)
testDocForStrings(doc, getBMRLearnerShortNames(res), grid.size = 2L)
testDocForStrings(doc, getBMRMeasures(res)[[1L]]$name)
plotBMRBoxplots(res, pretty.names = FALSE)
dir = tempdir()
path = paste0(dir, "/test.svg")
ggsave(path)
doc = XML::xmlParse(path)
testDocForStrings(doc, getBMRLearnerIds(res), grid.size = 2L)
testDocForStrings(doc, getBMRMeasureIds(res)[[1L]])
})
test_that("BenchmarkResult allows spaces", {
cv = makeResampleDesc("CV", iters = 2L)
measures = list(mlr::auc)
learners = list(
makeLearner("classif.rpart", predict.type = "prob")
)
res = benchmark(learners, sonar.task, cv, measures)
plotBMRBoxplots(res, measure=auc)
ggsave(tempfile(fileext = ".png"))
})
| /tests/testthat/test_base_plotBMRBoxplots.R | no_license | JackStat/mlr | R | false | false | 1,659 | r | context("plotBMRBoxplots")
test_that("BenchmarkResult", {
lrns = list(makeLearner("classif.nnet"), makeLearner("classif.rpart"))
tasks = list(multiclass.task, binaryclass.task)
rdesc = makeResampleDesc("CV", iters = 2L)
meas = list(acc, mmce, ber, featperc)
res = benchmark(lrns, tasks, rdesc, meas)
plotBMRBoxplots(res)
dir = tempdir()
path = paste0(dir, "/test.svg")
ggsave(path)
doc = XML::xmlParse(path)
expect_equal(length(XML::getNodeSet(doc, grey.rect.xpath, ns.svg)), length(getBMRTaskIds(res)))
# facetting works:
q = plotBMRBoxplots(res, facet.wrap.nrow = 2L)
testFacetting(q, 2L)
q = plotBMRBoxplots(res, facet.wrap.ncol = 2L)
testFacetting(q, ncol = 2L)
q = plotBMRBoxplots(res, facet.wrap.nrow = 2L, facet.wrap.ncol = 2L)
testFacetting(q, 2L, 2L)
# pretty names works
plotBMRBoxplots(res)
dir = tempdir()
path = paste0(dir, "/test.svg")
ggsave(path)
doc = XML::xmlParse(path)
testDocForStrings(doc, getBMRLearnerShortNames(res), grid.size = 2L)
testDocForStrings(doc, getBMRMeasures(res)[[1L]]$name)
plotBMRBoxplots(res, pretty.names = FALSE)
dir = tempdir()
path = paste0(dir, "/test.svg")
ggsave(path)
doc = XML::xmlParse(path)
testDocForStrings(doc, getBMRLearnerIds(res), grid.size = 2L)
testDocForStrings(doc, getBMRMeasureIds(res)[[1L]])
})
test_that("BenchmarkResult allows spaces", {
cv = makeResampleDesc("CV", iters = 2L)
measures = list(mlr::auc)
learners = list(
makeLearner("classif.rpart", predict.type = "prob")
)
res = benchmark(learners, sonar.task, cv, measures)
plotBMRBoxplots(res, measure=auc)
ggsave(tempfile(fileext = ".png"))
})
|
# Regresión Lineal Múltiple
# Importar el dataset
dataset = read.csv('50_Startups.csv')
#dataset = dataset[, 2:3]
# Codificar las variables categóricas
dataset$State = factor(dataset$State,
levels = c("New York", "California", "Florida"),
labels = c(1, 2, 3))
# Dividir los datos en conjunto de entrenamiento y conjunto de test
# install.packages("caTools")
library(caTools)
set.seed(123)
split = sample.split(dataset$Profit, SplitRatio = 0.8)
training_set = subset(dataset, split == TRUE)
testing_set = subset(dataset, split == FALSE)
# Escalado de valores
# training_set[,2:3] = scale(training_set[,2:3])
# testing_set[,2:3] = scale(testing_set[,2:3])
# Ajustar el modelo de Regresión Lineal Múltiple con el Conjunto de Entrenamiento
regression = lm(formula = Profit ~ .,
data = training_set)
# Predecir los resultados con el conjunto de testing
y_pred = predict(regression, newdata = testing_set)
# Construir un modelo óptimo con la Eliminación hacia atrás
SL = 0.05
regression = lm(formula = Profit ~ R.D.Spend + Administration + Marketing.Spend + State,
data = dataset)
summary(regression)
regression = lm(formula = Profit ~ R.D.Spend + Administration + Marketing.Spend,
data = dataset)
summary(regression)
regression = lm(formula = Profit ~ R.D.Spend + Marketing.Spend,
data = dataset)
summary(regression)
regression = lm(formula = Profit ~ R.D.Spend,
data = dataset)
summary(regression)
| /datasets/Part 2 - Regression/Section 5 - Multiple Linear Regression/multiple_linear_regression.R | permissive | Yerkorvs/machinelearning-az | R | false | false | 1,544 | r | # Regresión Lineal Múltiple
# Importar el dataset
dataset = read.csv('50_Startups.csv')
#dataset = dataset[, 2:3]
# Codificar las variables categóricas
dataset$State = factor(dataset$State,
levels = c("New York", "California", "Florida"),
labels = c(1, 2, 3))
# Dividir los datos en conjunto de entrenamiento y conjunto de test
# install.packages("caTools")
library(caTools)
set.seed(123)
split = sample.split(dataset$Profit, SplitRatio = 0.8)
training_set = subset(dataset, split == TRUE)
testing_set = subset(dataset, split == FALSE)
# Escalado de valores
# training_set[,2:3] = scale(training_set[,2:3])
# testing_set[,2:3] = scale(testing_set[,2:3])
# Ajustar el modelo de Regresión Lineal Múltiple con el Conjunto de Entrenamiento
regression = lm(formula = Profit ~ .,
data = training_set)
# Predecir los resultados con el conjunto de testing
y_pred = predict(regression, newdata = testing_set)
# Construir un modelo óptimo con la Eliminación hacia atrás
SL = 0.05
regression = lm(formula = Profit ~ R.D.Spend + Administration + Marketing.Spend + State,
data = dataset)
summary(regression)
regression = lm(formula = Profit ~ R.D.Spend + Administration + Marketing.Spend,
data = dataset)
summary(regression)
regression = lm(formula = Profit ~ R.D.Spend + Marketing.Spend,
data = dataset)
summary(regression)
regression = lm(formula = Profit ~ R.D.Spend,
data = dataset)
summary(regression)
|
#Matrices is a 2-dimensional array
#Includes properties of both vectors and dataframes
# Create a Matrix with a single column
matrix1 = matrix(data=c(1,2,3,4))
matrix1
# Create a matrix with defined rows and columns
matrix2 = matrix(data=c(1,2,3,4), nrow=2, ncol=2)
matrix2
# You can also fill by row (You can use T or TRUE)
matrix3 = matrix(data=c(1,2,3,4), nrow=2, ncol=2, byrow=T)
matrix3
# Get a Matrix dimension
dim(matrix3)
# A value at row, column
matrix3[1,2]
# Get a whole row
matrix3[1,]
# Get a whole column
matrix3[,2]
# Combine vectors to make a Matrix
matrix4 = rbind(1:3, 4:6, 7:9)
matrix4
# Get 2nd and 3rd row
matrix4[2:3,]
# Get 2nd and 3rd row by ommitting the 1st
matrix4[-1,]
# Change the first value
matrix4[1,1] = 0
matrix4
# Change the 1st row
matrix4[1,] = c(10,11,12)
matrix4 | /Matrices.R | no_license | Amanraj2212/DSA-IMT-Ghaziabad | R | false | false | 856 | r | #Matrices is a 2-dimensional array
#Includes properties of both vectors and dataframes
# Create a Matrix with a single column
matrix1 = matrix(data=c(1,2,3,4))
matrix1
# Create a matrix with defined rows and columns
matrix2 = matrix(data=c(1,2,3,4), nrow=2, ncol=2)
matrix2
# You can also fill by row (You can use T or TRUE)
matrix3 = matrix(data=c(1,2,3,4), nrow=2, ncol=2, byrow=T)
matrix3
# Get a Matrix dimension
dim(matrix3)
# A value at row, column
matrix3[1,2]
# Get a whole row
matrix3[1,]
# Get a whole column
matrix3[,2]
# Combine vectors to make a Matrix
matrix4 = rbind(1:3, 4:6, 7:9)
matrix4
# Get 2nd and 3rd row
matrix4[2:3,]
# Get 2nd and 3rd row by ommitting the 1st
matrix4[-1,]
# Change the first value
matrix4[1,1] = 0
matrix4
# Change the 1st row
matrix4[1,] = c(10,11,12)
matrix4 |
compute_tsne<- function(treeseobject){
tsne <- Rtsne(t(as.matrix(assays(treeseobject)$counts)))
treeseobject@metadata[['tsne']]<- tsne
treeseobject
}
getPCA=function(treeseobject) {
" Compute PCA over all features for given samples
\\describe{
\\item{measurements}{Samples to compute PCA over}
\\item{start}{Start of feature range to query }
\\item{end}{End of feature range to query}
}
"
if(is.null(metadata(treeseobject)$tsne)){
treeseobject<- compute_tsne(treeseobject)
}
#SE_tsne<-Rtsne(t(as.matrix(assays(treeseobject)$counts)))
#print(SE_tsne)
measurements<- metadata(treeseobject)$tsne
data <- list()
#tsne_plot <- data.frame(x = SE_tsne$Y[,1], y = SE_tsne$Y[,2])
#ggplot(tsne_plot) + geom_point(aes(x=x, y=y, color=col))
for (col in seq(colnames(treeseobject))) {
#print(col)
temp
<- list(sample_id = colnames(treeseobject)[col], PC1 = measurements$Y[col,1], PC2 = measurements$Y[col,2])
data[[col]] <- temp
}
print(data)
#result <- list(data = unname(data), pca_variance_explained = ord$sdev[1:2])
#return(result)
return(data)
}
json <- toJSON( data )
| /getPCA.R | no_license | kzintas/SCRNAviz | R | false | false | 1,155 | r | compute_tsne<- function(treeseobject){
tsne <- Rtsne(t(as.matrix(assays(treeseobject)$counts)))
treeseobject@metadata[['tsne']]<- tsne
treeseobject
}
getPCA=function(treeseobject) {
" Compute PCA over all features for given samples
\\describe{
\\item{measurements}{Samples to compute PCA over}
\\item{start}{Start of feature range to query }
\\item{end}{End of feature range to query}
}
"
if(is.null(metadata(treeseobject)$tsne)){
treeseobject<- compute_tsne(treeseobject)
}
#SE_tsne<-Rtsne(t(as.matrix(assays(treeseobject)$counts)))
#print(SE_tsne)
measurements<- metadata(treeseobject)$tsne
data <- list()
#tsne_plot <- data.frame(x = SE_tsne$Y[,1], y = SE_tsne$Y[,2])
#ggplot(tsne_plot) + geom_point(aes(x=x, y=y, color=col))
for (col in seq(colnames(treeseobject))) {
#print(col)
temp
<- list(sample_id = colnames(treeseobject)[col], PC1 = measurements$Y[col,1], PC2 = measurements$Y[col,2])
data[[col]] <- temp
}
print(data)
#result <- list(data = unname(data), pca_variance_explained = ord$sdev[1:2])
#return(result)
return(data)
}
json <- toJSON( data )
|
rm(list=ls())
require <- function(x) {if (!base::require(x, character.only = TRUE)) {install.packages(x, dep = TRUE) ; base::require(x, character.only = TRUE)}}#overwrites 'require' function to install missing packages
##### Loads the data-files. Might take a while, be patient! #####
NEI <- readRDS("data/summarySCC_PM25.rds")
SCC <- readRDS("data/Source_Classification_Code.rds")
##### Have total emissions from PM2.5 decreased in the United States from 1999 to 2008? #####
# Using the base plotting system, make a plot showing the total PM2.5 emission
# from all sources for each of the years 1999, 2002, 2005, and 2008.
#calculates the mean of values from each location for each year
mean.from.each.fip <- with(NEI, tapply(Emissions, list(fips,year), mean, na.rm=TRUE))
#creates a vector of which locations has values for each year
completes <- complete.cases(mean.from.each.fip)
#creates a matrix of means from each fip for each year, but only the locations with measures from all years
complete.mean.from.each.fip <- mean.from.each.fip[completes,]
#calculates the total emissions from each year by adding the means from each fip together
total.emissions <- colSums(complete.mean.from.each.fip)
#plots the total emissions from each year as a barchart
png(filename="plot1.png")
barplot(total.emissions, main = "Total PM2.5 emissions", ylab = "PM2.5 in tonnes")
dev.off()
| /plot1.R | no_license | JonathanYde/CourseProject2 | R | false | false | 1,384 | r | rm(list=ls())
require <- function(x) {if (!base::require(x, character.only = TRUE)) {install.packages(x, dep = TRUE) ; base::require(x, character.only = TRUE)}}#overwrites 'require' function to install missing packages
##### Loads the data-files. Might take a while, be patient! #####
NEI <- readRDS("data/summarySCC_PM25.rds")
SCC <- readRDS("data/Source_Classification_Code.rds")
##### Have total emissions from PM2.5 decreased in the United States from 1999 to 2008? #####
# Using the base plotting system, make a plot showing the total PM2.5 emission
# from all sources for each of the years 1999, 2002, 2005, and 2008.
#calculates the mean of values from each location for each year
mean.from.each.fip <- with(NEI, tapply(Emissions, list(fips,year), mean, na.rm=TRUE))
#creates a vector of which locations has values for each year
completes <- complete.cases(mean.from.each.fip)
#creates a matrix of means from each fip for each year, but only the locations with measures from all years
complete.mean.from.each.fip <- mean.from.each.fip[completes,]
#calculates the total emissions from each year by adding the means from each fip together
total.emissions <- colSums(complete.mean.from.each.fip)
#plots the total emissions from each year as a barchart
png(filename="plot1.png")
barplot(total.emissions, main = "Total PM2.5 emissions", ylab = "PM2.5 in tonnes")
dev.off()
|
##----------------------------------------------------------------------------##
## parse BED files
#' read_bed
#'
#' @param x string path to the peak file
#'
#' @import rtracklayer
#' @export
#'
read_bed <- function(x) {
rtracklayer::import(x, format = "BED")
}
#' peak.xls
#'
#' @param x string path to the peak xls, output fo MACS2
#' @export
#'
read_peak_xls <- function(x) {
xlines <- readLines(x, n = 50)
xlines <- xlines[grep("^#", xlines)] # head lines
xlines <- xlines[grep(":", xlines)] # records
xlines <- gsub("^#", "", xlines) # remove #
as.data.frame(stringr::str_split(xlines, ": ", simplify = TRUE))
}
#' read_narrowpeak
#'
#' @param x string path to the peak file
#'
#' @import rtracklayer
#' @export
#'
read_narrowpeak <- function(x) {
ext <- c(signalValue = "numeric", pValue = "numeric",
qValue = "numeric", peak = "integer")
rtracklayer::import(x, format = "BED", extraCols = ext)
}
#' intersect_bed2
#'
#' @param blist list, bed files
#' @import GenomicRanges
#' @export
intersect_bed2 <- function(blist){
stopifnot(length(blist) >= 2)
bed1 <- blist[[1]]
bed2 <- blist[[2]]
# bed 2 gr
gr1 <- read_narrowpeak(bed1)
gr2 <- read_narrowpeak(bed2)
gr12 <- GenomicRanges::findOverlaps(gr1, gr2)
# intersect
n1 <- paste("a", seq_len(length(gr1) - length(gr12)), sep = "")
n2 <- paste("b", seq_len(length(gr12)), sep = "")
n3 <- paste("c", seq_len(length(gr2) - length(gr12)), sep = "")
## list
x <- list(rep1 = c(n1, n2), rep2 = c(n2, n3))
# out
return(x)
}
#' intersect_bed3
#'
#' @param blist list, intersect 3 bed files
#' @import GenomicRanges
#' @export
intersect_bed3 <- function(blist){
stopifnot(length(blist) >= 3)
bed1 <- blist[[1]]
bed2 <- blist[[2]]
bed3 <- blist[[3]]
# bed to gr
gr1 <- read_narrowpeak(bed1)
gr2 <- read_narrowpeak(bed2)
gr3 <- read_narrowpeak(bed3)
# intersect
gr12 <- findOverlaps(gr1, gr2, ignore.strand = TRUE)
gr13 <- findOverlaps(gr1, gr3, ignore.strand = TRUE)
gr23 <- findOverlaps(gr2, gr3, ignore.strand = TRUE)
gr123 <- GenomicRanges::findOverlapsfindOverlaps(gr1[gr12@from],
gr3,
ignore.strand = TRUE)
# numbers
n12 <- length(gr12)
n13 <- length(gr13)
n23 <- length(gr23)
#
n123 <- length(gr123)
n12 <- n12 - n123
n13 <- n13 - n123
n23 <- n23 - n123
n1 <- length(gr1) - n12 - n13 - n123
n2 <- length(gr2) - n12 - n23 - n123
n3 <- length(gr3) - n13 - n23 - n123
# overlap
out <- c(n1, n2, n3, n12, n13, n23, n123)
names(out) <- c("n1", "n2", "n3", "n12", "n13", "n23", "n123")
# out list
p <- lapply(seq_len(7), function(i){
paste(letters[i], seq_len(out[i]), sep = "")
})
names(p) <- names(out)
# combine
x <- list(
rep1 = c(p$n1, p$n12, p$n13, p$n123),
rep2 = c(p$n2, p$n12, p$n23, p$n123),
rep3 = c(p$n3, p$n13, p$n23, p$n123))
return(x)
}
#' intersect_bed4
#'
#' @param blist list, intersect 4 bed files
#' @import GenomicRanges
#' @export
intersect_bed4 <- function(blist){
stopifnot(length(blist) >= 4)
bed1 = blist[[1]]
bed2 = blist[[2]]
bed3 = blist[[3]]
bed4 = blist[[4]]
# bed to gr
}
#' bed_venn
#'
#' @param blist list, a list of BED files
#' @param names character, the names for each group
#'
#' @export
bed_venn <- function(blist, names = NULL){
if(length(blist) == 2){
# x <- bedIntersect2(blist)
x <- intersect_bed2(blist)
} else if(length(blist) == 3) {
# x <- bedIntersect3(blist)
x <- intersect_bed3(blist)
} else if(length(blist) == 4) {
# x <- bedIntersect4(blist)
x <- intersect_bed4(blist)
} else {
stop("only accept narrowpeaks: 2-4 files")
}
p <- vennplot(x, names)
return(p)
}
| /R/bed.R | permissive | bakerwm/hiseqr | R | false | false | 3,816 | r |
##----------------------------------------------------------------------------##
## parse BED files
#' read_bed
#'
#' @param x string path to the peak file
#'
#' @import rtracklayer
#' @export
#'
read_bed <- function(x) {
rtracklayer::import(x, format = "BED")
}
#' peak.xls
#'
#' @param x string path to the peak xls, output fo MACS2
#' @export
#'
read_peak_xls <- function(x) {
xlines <- readLines(x, n = 50)
xlines <- xlines[grep("^#", xlines)] # head lines
xlines <- xlines[grep(":", xlines)] # records
xlines <- gsub("^#", "", xlines) # remove #
as.data.frame(stringr::str_split(xlines, ": ", simplify = TRUE))
}
#' read_narrowpeak
#'
#' @param x string path to the peak file
#'
#' @import rtracklayer
#' @export
#'
read_narrowpeak <- function(x) {
ext <- c(signalValue = "numeric", pValue = "numeric",
qValue = "numeric", peak = "integer")
rtracklayer::import(x, format = "BED", extraCols = ext)
}
#' intersect_bed2
#'
#' @param blist list, bed files
#' @import GenomicRanges
#' @export
intersect_bed2 <- function(blist){
stopifnot(length(blist) >= 2)
bed1 <- blist[[1]]
bed2 <- blist[[2]]
# bed 2 gr
gr1 <- read_narrowpeak(bed1)
gr2 <- read_narrowpeak(bed2)
gr12 <- GenomicRanges::findOverlaps(gr1, gr2)
# intersect
n1 <- paste("a", seq_len(length(gr1) - length(gr12)), sep = "")
n2 <- paste("b", seq_len(length(gr12)), sep = "")
n3 <- paste("c", seq_len(length(gr2) - length(gr12)), sep = "")
## list
x <- list(rep1 = c(n1, n2), rep2 = c(n2, n3))
# out
return(x)
}
#' intersect_bed3
#'
#' @param blist list, intersect 3 bed files
#' @import GenomicRanges
#' @export
intersect_bed3 <- function(blist){
stopifnot(length(blist) >= 3)
bed1 <- blist[[1]]
bed2 <- blist[[2]]
bed3 <- blist[[3]]
# bed to gr
gr1 <- read_narrowpeak(bed1)
gr2 <- read_narrowpeak(bed2)
gr3 <- read_narrowpeak(bed3)
# intersect
gr12 <- findOverlaps(gr1, gr2, ignore.strand = TRUE)
gr13 <- findOverlaps(gr1, gr3, ignore.strand = TRUE)
gr23 <- findOverlaps(gr2, gr3, ignore.strand = TRUE)
gr123 <- GenomicRanges::findOverlapsfindOverlaps(gr1[gr12@from],
gr3,
ignore.strand = TRUE)
# numbers
n12 <- length(gr12)
n13 <- length(gr13)
n23 <- length(gr23)
#
n123 <- length(gr123)
n12 <- n12 - n123
n13 <- n13 - n123
n23 <- n23 - n123
n1 <- length(gr1) - n12 - n13 - n123
n2 <- length(gr2) - n12 - n23 - n123
n3 <- length(gr3) - n13 - n23 - n123
# overlap
out <- c(n1, n2, n3, n12, n13, n23, n123)
names(out) <- c("n1", "n2", "n3", "n12", "n13", "n23", "n123")
# out list
p <- lapply(seq_len(7), function(i){
paste(letters[i], seq_len(out[i]), sep = "")
})
names(p) <- names(out)
# combine
x <- list(
rep1 = c(p$n1, p$n12, p$n13, p$n123),
rep2 = c(p$n2, p$n12, p$n23, p$n123),
rep3 = c(p$n3, p$n13, p$n23, p$n123))
return(x)
}
#' intersect_bed4
#'
#' @param blist list, intersect 4 bed files
#' @import GenomicRanges
#' @export
intersect_bed4 <- function(blist){
stopifnot(length(blist) >= 4)
bed1 = blist[[1]]
bed2 = blist[[2]]
bed3 = blist[[3]]
bed4 = blist[[4]]
# bed to gr
}
#' bed_venn
#'
#' @param blist list, a list of BED files
#' @param names character, the names for each group
#'
#' @export
bed_venn <- function(blist, names = NULL){
if(length(blist) == 2){
# x <- bedIntersect2(blist)
x <- intersect_bed2(blist)
} else if(length(blist) == 3) {
# x <- bedIntersect3(blist)
x <- intersect_bed3(blist)
} else if(length(blist) == 4) {
# x <- bedIntersect4(blist)
x <- intersect_bed4(blist)
} else {
stop("only accept narrowpeaks: 2-4 files")
}
p <- vennplot(x, names)
return(p)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/StyleMappings.R
\name{setEdgeSourceArrowColorMapping}
\alias{setEdgeSourceArrowColorMapping}
\title{Set Edge Source Arrow Color Mapping}
\usage{
setEdgeSourceArrowColorMapping(
table.column,
table.column.values = NULL,
colors = NULL,
mapping.type = "c",
default.color = NULL,
style.name = NULL,
network = NULL,
base.url = .defaultBaseUrl
)
}
\arguments{
\item{table.column}{Name of Cytoscape table column to map values from}
\item{table.column.values}{List of values from Cytoscape table to be used in mapping}
\item{colors}{List of hex colors to map to table.column.values}
\item{mapping.type}{(char) continuous, discrete or passthrough (c,d,p); default is continuous}
\item{default.color}{Hex color to set as default}
\item{style.name}{Name of style; default is "default" style}
\item{network}{(optional) Name or SUID of the network. Default is the "current"
network active in Cytoscape.}
\item{base.url}{(optional) Ignore unless you need to specify a custom domain,
port or version to connect to the CyREST API. Default is http://localhost:1234
and the latest version of the CyREST API supported by this version of RCy3.}
}
\value{
None
}
\description{
Map table column values to colors to set the source arrow color.
}
\examples{
\donttest{
setEdgeSourceArrowColorMapping('score', c(0,5), c('#FFFFFF','#FF7755'))
}
}
| /man/setEdgeSourceArrowColorMapping.Rd | permissive | kumonismo/RCy3 | R | false | true | 1,423 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/StyleMappings.R
\name{setEdgeSourceArrowColorMapping}
\alias{setEdgeSourceArrowColorMapping}
\title{Set Edge Source Arrow Color Mapping}
\usage{
setEdgeSourceArrowColorMapping(
table.column,
table.column.values = NULL,
colors = NULL,
mapping.type = "c",
default.color = NULL,
style.name = NULL,
network = NULL,
base.url = .defaultBaseUrl
)
}
\arguments{
\item{table.column}{Name of Cytoscape table column to map values from}
\item{table.column.values}{List of values from Cytoscape table to be used in mapping}
\item{colors}{List of hex colors to map to table.column.values}
\item{mapping.type}{(char) continuous, discrete or passthrough (c,d,p); default is continuous}
\item{default.color}{Hex color to set as default}
\item{style.name}{Name of style; default is "default" style}
\item{network}{(optional) Name or SUID of the network. Default is the "current"
network active in Cytoscape.}
\item{base.url}{(optional) Ignore unless you need to specify a custom domain,
port or version to connect to the CyREST API. Default is http://localhost:1234
and the latest version of the CyREST API supported by this version of RCy3.}
}
\value{
None
}
\description{
Map table column values to colors to set the source arrow color.
}
\examples{
\donttest{
setEdgeSourceArrowColorMapping('score', c(0,5), c('#FFFFFF','#FF7755'))
}
}
|
/Ejemplo07.R | no_license | Jgallo-R/Clase2R4DS | R | false | false | 1,468 | r | ||
################################################################################
context("test-split.R")
################################################################################
test_that("'split_every_nlines()' works", {
tmp <- bigreadr::fwrite2(iris)
test <- bigreadr:::split_every_nlines(tmp, tmp, 20, TRUE)
files <- list.files(tempdir(), basename(tmp), full.names = TRUE)
files2 <- c(tmp, paste0(tmp, "_", 1:8, ".txt"))
expect_identical(normalizePath(sort(files)), normalizePath(files2))
})
################################################################################
test_that("'split_file()' works", {
strings <- c("", "", " ", sapply(10^(seq(0, 4, by = 0.2)), function(i) {
paste(as.matrix(iris)[sample(nrow(iris), i, TRUE), ], collapse = " ")
}))
for (every in c(1, 2, 4, 12, 24, 25)) {
writeLines(sample(strings, replace = TRUE), tmp <- tempfile())
# Infos are correct
infos <- split_file(tmp, every, tmp2 <- tempfile())
expect_identical(infos[["name_in"]], normalizePath(tmp))
expect_identical(infos[["prefix_out"]], path.expand(tmp2))
expect_identical(infos[["repeat_header"]], FALSE)
expect_equal(ceiling(infos[["nlines_all"]] / infos[["nlines_part"]]),
infos[["nfiles"]])
expect_equal(infos[["nlines_all"]], 24)
# New files all exist
files <- get_split_files(infos)
expect_true(all(file.exists(files)))
# Number of lines and size is summing to whole input file
expect_identical(sum(sapply(files, nlines)), nlines(tmp))
expect_identical(sum(file.size(files)), file.size(tmp))
# Content is the same
expect_identical(do.call('c', lapply(files, readLines)), readLines(tmp))
}
})
################################################################################
test_that("'split_file()' works with a repeated header", {
# Reading splitted files is easier
tf <- fwrite2(cars, tempfile(fileext = ".csv"))
sf1 <- split_file(tf, 10)
gsf1 <- get_split_files(sf1)
expect_equal(sum(sapply(gsf1, nlines)), 51)
expect_error(Reduce(rbind, lapply(gsf1, fread2)),
"names do not match previous names")
sf2 <- split_file(tf, 10, repeat_header = TRUE)
gsf2 <- get_split_files(sf2)
expect_equal(sapply(gsf2, readLines, n = 1), rep(readLines(tf, n = 1), 6),
check.attributes = FALSE)
loaded_df <- Reduce(rbind, lapply(gsf2, read.csv))
expect_equal(names(loaded_df), c("speed", "dist"))
expect_equal(nrow(loaded_df), 50)
# Content is the same
first_part <- readLines(gsf2[1])
other_parts <- unlist(lapply(gsf2[-1], function(f) readLines(f)[-1]))
expect_identical(c(first_part, other_parts), readLines(tf))
})
################################################################################
test_that("'split_file()' works with a repeated header (special cases)", {
strings <- c("", "", " ", sapply(10^(seq(0, 4, by = 0.2)), function(i) {
paste(as.matrix(iris)[sample(nrow(iris), i, TRUE), ], collapse = " ")
}))
for (every in c(1, 2, 4, 12, 24, 25)) {
writeLines(sample(strings, replace = TRUE), tmp <- tempfile())
# Infos are correct
infos <- split_file(tmp, every, tmp2 <- tempfile(), repeat_header = TRUE)
expect_identical(infos[["name_in"]], normalizePath(tmp))
expect_identical(infos[["prefix_out"]], path.expand(tmp2))
expect_identical(infos[["repeat_header"]], TRUE)
nlines_all_without_header <- infos[["nlines_all"]] - infos[["nfiles"]]
expect_equal(nlines_all_without_header + 1, 24)
expect_equal(ceiling((nlines_all_without_header + 1) / infos[["nlines_part"]]),
infos[["nfiles"]])
# New files all exist
files <- get_split_files(infos)
expect_true(all(file.exists(files)))
# Same first line for each file
expect_equal(sapply(files, readLines, n = 1),
rep(readLines(tmp, n = 1), infos[["nfiles"]]),
check.attributes = FALSE)
# Content is the same
first_part <- readLines(files[1])
other_parts <- unlist(lapply(files[-1], function(f) readLines(f)[-1]))
expect_identical(c(first_part, other_parts), readLines(tmp))
}
})
################################################################################
| /tests/testthat/test-split.R | no_license | cran/bigreadr | R | false | false | 4,330 | r | ################################################################################
context("test-split.R")
################################################################################
test_that("'split_every_nlines()' works", {
tmp <- bigreadr::fwrite2(iris)
test <- bigreadr:::split_every_nlines(tmp, tmp, 20, TRUE)
files <- list.files(tempdir(), basename(tmp), full.names = TRUE)
files2 <- c(tmp, paste0(tmp, "_", 1:8, ".txt"))
expect_identical(normalizePath(sort(files)), normalizePath(files2))
})
################################################################################
test_that("'split_file()' works", {
strings <- c("", "", " ", sapply(10^(seq(0, 4, by = 0.2)), function(i) {
paste(as.matrix(iris)[sample(nrow(iris), i, TRUE), ], collapse = " ")
}))
for (every in c(1, 2, 4, 12, 24, 25)) {
writeLines(sample(strings, replace = TRUE), tmp <- tempfile())
# Infos are correct
infos <- split_file(tmp, every, tmp2 <- tempfile())
expect_identical(infos[["name_in"]], normalizePath(tmp))
expect_identical(infos[["prefix_out"]], path.expand(tmp2))
expect_identical(infos[["repeat_header"]], FALSE)
expect_equal(ceiling(infos[["nlines_all"]] / infos[["nlines_part"]]),
infos[["nfiles"]])
expect_equal(infos[["nlines_all"]], 24)
# New files all exist
files <- get_split_files(infos)
expect_true(all(file.exists(files)))
# Number of lines and size is summing to whole input file
expect_identical(sum(sapply(files, nlines)), nlines(tmp))
expect_identical(sum(file.size(files)), file.size(tmp))
# Content is the same
expect_identical(do.call('c', lapply(files, readLines)), readLines(tmp))
}
})
################################################################################
test_that("'split_file()' works with a repeated header", {
# Reading splitted files is easier
tf <- fwrite2(cars, tempfile(fileext = ".csv"))
sf1 <- split_file(tf, 10)
gsf1 <- get_split_files(sf1)
expect_equal(sum(sapply(gsf1, nlines)), 51)
expect_error(Reduce(rbind, lapply(gsf1, fread2)),
"names do not match previous names")
sf2 <- split_file(tf, 10, repeat_header = TRUE)
gsf2 <- get_split_files(sf2)
expect_equal(sapply(gsf2, readLines, n = 1), rep(readLines(tf, n = 1), 6),
check.attributes = FALSE)
loaded_df <- Reduce(rbind, lapply(gsf2, read.csv))
expect_equal(names(loaded_df), c("speed", "dist"))
expect_equal(nrow(loaded_df), 50)
# Content is the same
first_part <- readLines(gsf2[1])
other_parts <- unlist(lapply(gsf2[-1], function(f) readLines(f)[-1]))
expect_identical(c(first_part, other_parts), readLines(tf))
})
################################################################################
test_that("'split_file()' works with a repeated header (special cases)", {
strings <- c("", "", " ", sapply(10^(seq(0, 4, by = 0.2)), function(i) {
paste(as.matrix(iris)[sample(nrow(iris), i, TRUE), ], collapse = " ")
}))
for (every in c(1, 2, 4, 12, 24, 25)) {
writeLines(sample(strings, replace = TRUE), tmp <- tempfile())
# Infos are correct
infos <- split_file(tmp, every, tmp2 <- tempfile(), repeat_header = TRUE)
expect_identical(infos[["name_in"]], normalizePath(tmp))
expect_identical(infos[["prefix_out"]], path.expand(tmp2))
expect_identical(infos[["repeat_header"]], TRUE)
nlines_all_without_header <- infos[["nlines_all"]] - infos[["nfiles"]]
expect_equal(nlines_all_without_header + 1, 24)
expect_equal(ceiling((nlines_all_without_header + 1) / infos[["nlines_part"]]),
infos[["nfiles"]])
# New files all exist
files <- get_split_files(infos)
expect_true(all(file.exists(files)))
# Same first line for each file
expect_equal(sapply(files, readLines, n = 1),
rep(readLines(tmp, n = 1), infos[["nfiles"]]),
check.attributes = FALSE)
# Content is the same
first_part <- readLines(files[1])
other_parts <- unlist(lapply(files[-1], function(f) readLines(f)[-1]))
expect_identical(c(first_part, other_parts), readLines(tmp))
}
})
################################################################################
|
## test tidy and glance methods from mcmc_tidiers
stopifnot(require("testthat"), require("broom.mixed"))
context("stan tidiers")
if (suppressPackageStartupMessages(require(rstan, quietly = TRUE))) {
test_that("tidy returns indexes if requested on rstanarm fits", {
# Make sure that (inst/)extdata/run_examples.R was run to generate rds
rstan_example <- readRDS(system.file("extdata", "rstan_example.rds", package = "broom.mixed"))
# check_tidy from helper-checkers
td <- tidy(rstan_example)
check_tidy(td, 18, 3, c("term", "estimate", "std.error"))
td <- tidy(rstan_example, index = TRUE)
check_tidy(td, 18, 4, c("term", "index", "estimate", "std.error"))
td <- tidy(rstan_example, drop.pars = NULL)
expect_equal(td[19, ][["term"]], "lp__")
td <- tidy(rstan_example, conf.int = TRUE)
check_tidy(td, 18, 5, c("term", "estimate", "std.error", "conf.low", "conf.high"))
td <- tidy(rstan_example, rhat = TRUE)
check_tidy(td, 18, 4, c("term", "estimate", "std.error", "rhat"))
td <- tidy(rstan_example, ess = TRUE)
check_tidy(td, 18, 4, c("term", "estimate", "std.error", "ess"))
})
}
context("brms tidiers")
if (suppressPackageStartupMessages(require("brms", quietly = TRUE))) {
load(system.file("extdata","brms_example.rda",
package="broom.mixed",
mustWork=TRUE))
## n.b. different S3 methods found depending on environment
zz <- tidy(brms_zip,effects="ran_vals")
zz2 <- tidy(brms_zip)
zz3 <- tidy(brms_multi)
expect_warning(tidy(brms_multi_RE),"currently incorrect")
suppressWarnings(zz4 <- tidy(brms_multi_RE))
zz5 <- tidy(brms_RE, effects = "ran_vals")
test_that("correct levels for models with zi/ranef",{
expect_equal(zz[["level"]],
rep(c(paste("R",1:12,sep="-"),paste("VF",1:11,sep="-")),2))
})
test_that("component returned for brms models",
{
expect_equal(zz2[["component"]],
unlist(lapply(list(c(1,1),c(13,1),c(1,1)),
rep,x=c("cond","zi"))))
})
test_that("component tags stripped from brms models",
{
expect_equal(c(table(zz2[["term"]])),
c(`(Intercept)` = 2L, minedno = 2L, `sd__(Intercept)` = 2L,
sppDESML = 1L,
`sppDESML:minedno` = 1L, sppDF = 1L, `sppDF:minedno` = 1L,
sppDM = 1L, `sppDM:minedno` = 1L, sppECMA = 1L,
`sppECMA:minedno` = 1L, sppECML = 1L, `sppECML:minedno` = 1L,
sppPR = 1L, `sppPR:minedno` = 1L))
})
test_that("multi-component brms models",
{
check_tidy(zz3, 8, 9,
c("response", "effect", "component", "group",
"term", "estimate", "std.error",
"conf.low", "conf.high"))
})
sleepstudy.levels <- rep(c("308", "309", "310", "330", "331", "332", "333", "334", "335", "337",
"349", "350", "351", "352", "369", "370", "371", "372"), 2)
test_that("ran_vals returns correct output", {
expect_equal(nrow(zz5), 36)
expect_equal(nrow(zz5 %>% filter(group == "Subject")), 36)
expect_equal(nrow(zz5 %>% filter(term == "(Intercept)")), 18)
expect_equal(nrow(zz5 %>% filter(term == "Days_extra")), 18)
expect_equal(zz5$level, sleepstudy.levels)
})
} ## if require("brms")
context("mcmc tidiers")
if (suppressPackageStartupMessages(require(coda, quietly = TRUE))) {
data(line)
x1 <- line[[1]]
test_that("mcmc with ess",
{
expect_warning(td <- tidy(
x = x1,
conf.int = TRUE,
robust = TRUE,
rhat = TRUE,
index = TRUE,
ess = TRUE),
"ignoring 'rhat'")
check_tidy(td, 3, 7,
c("term","index","estimate","std.error",
"conf.low","conf.high","ess"))
})
}
| /tests/testthat/test-mcmc.R | no_license | bbolker/broom.mixed | R | false | false | 4,080 | r | ## test tidy and glance methods from mcmc_tidiers
stopifnot(require("testthat"), require("broom.mixed"))
context("stan tidiers")
if (suppressPackageStartupMessages(require(rstan, quietly = TRUE))) {
test_that("tidy returns indexes if requested on rstanarm fits", {
# Make sure that (inst/)extdata/run_examples.R was run to generate rds
rstan_example <- readRDS(system.file("extdata", "rstan_example.rds", package = "broom.mixed"))
# check_tidy from helper-checkers
td <- tidy(rstan_example)
check_tidy(td, 18, 3, c("term", "estimate", "std.error"))
td <- tidy(rstan_example, index = TRUE)
check_tidy(td, 18, 4, c("term", "index", "estimate", "std.error"))
td <- tidy(rstan_example, drop.pars = NULL)
expect_equal(td[19, ][["term"]], "lp__")
td <- tidy(rstan_example, conf.int = TRUE)
check_tidy(td, 18, 5, c("term", "estimate", "std.error", "conf.low", "conf.high"))
td <- tidy(rstan_example, rhat = TRUE)
check_tidy(td, 18, 4, c("term", "estimate", "std.error", "rhat"))
td <- tidy(rstan_example, ess = TRUE)
check_tidy(td, 18, 4, c("term", "estimate", "std.error", "ess"))
})
}
context("brms tidiers")
if (suppressPackageStartupMessages(require("brms", quietly = TRUE))) {
load(system.file("extdata","brms_example.rda",
package="broom.mixed",
mustWork=TRUE))
## n.b. different S3 methods found depending on environment
zz <- tidy(brms_zip,effects="ran_vals")
zz2 <- tidy(brms_zip)
zz3 <- tidy(brms_multi)
expect_warning(tidy(brms_multi_RE),"currently incorrect")
suppressWarnings(zz4 <- tidy(brms_multi_RE))
zz5 <- tidy(brms_RE, effects = "ran_vals")
test_that("correct levels for models with zi/ranef",{
expect_equal(zz[["level"]],
rep(c(paste("R",1:12,sep="-"),paste("VF",1:11,sep="-")),2))
})
test_that("component returned for brms models",
{
expect_equal(zz2[["component"]],
unlist(lapply(list(c(1,1),c(13,1),c(1,1)),
rep,x=c("cond","zi"))))
})
test_that("component tags stripped from brms models",
{
expect_equal(c(table(zz2[["term"]])),
c(`(Intercept)` = 2L, minedno = 2L, `sd__(Intercept)` = 2L,
sppDESML = 1L,
`sppDESML:minedno` = 1L, sppDF = 1L, `sppDF:minedno` = 1L,
sppDM = 1L, `sppDM:minedno` = 1L, sppECMA = 1L,
`sppECMA:minedno` = 1L, sppECML = 1L, `sppECML:minedno` = 1L,
sppPR = 1L, `sppPR:minedno` = 1L))
})
test_that("multi-component brms models",
{
check_tidy(zz3, 8, 9,
c("response", "effect", "component", "group",
"term", "estimate", "std.error",
"conf.low", "conf.high"))
})
sleepstudy.levels <- rep(c("308", "309", "310", "330", "331", "332", "333", "334", "335", "337",
"349", "350", "351", "352", "369", "370", "371", "372"), 2)
test_that("ran_vals returns correct output", {
expect_equal(nrow(zz5), 36)
expect_equal(nrow(zz5 %>% filter(group == "Subject")), 36)
expect_equal(nrow(zz5 %>% filter(term == "(Intercept)")), 18)
expect_equal(nrow(zz5 %>% filter(term == "Days_extra")), 18)
expect_equal(zz5$level, sleepstudy.levels)
})
} ## if require("brms")
context("mcmc tidiers")
if (suppressPackageStartupMessages(require(coda, quietly = TRUE))) {
data(line)
x1 <- line[[1]]
test_that("mcmc with ess",
{
expect_warning(td <- tidy(
x = x1,
conf.int = TRUE,
robust = TRUE,
rhat = TRUE,
index = TRUE,
ess = TRUE),
"ignoring 'rhat'")
check_tidy(td, 3, 7,
c("term","index","estimate","std.error",
"conf.low","conf.high","ess"))
})
}
|
library(testthat)
library(hypoRF)
test_check("hypoRF")
| /hypoRF/.Rproj.user/3E841124/sources/per/t/DDAE7624-contents | no_license | hedigers/RandomForestTest | R | false | false | 56 | library(testthat)
library(hypoRF)
test_check("hypoRF")
| |
#read example file
template <- read.csv("E:/Users/spectR/Desktop/%Rutgers/RESEARCH/brainpaint/template/DK_template.csv")
brainpaint <- read.csv("E:/Users/spectR/Desktop/%Rutgers/RESEARCH/brainpaint/template/LoadExample4BrainPainter.csv")
brainpaint <- t(brainpaint)
#remove subcortical measurements
#brainpaint[2, grep('_vol',(brainpaint[1,]))] <- 0
numZeroed <- 0
#zero out ~0 values
lowerZero <- -0.03
upperZero <- 0.03
for(i in 1:220){
if(as.numeric(brainpaint[[2,i]]) > lowerZero && as.numeric(brainpaint[2,i]) < upperZero){
brainpaint[2,i] <- as.numeric(brainpaint[[2,i]]) * 0
numZeroed <- numZeroed + 1
}
}
#negtopos
#for(i in 1:220){
# if(as.numeric(brainpaint[[2,i]]) < 0){
# brainpaint[2,i] <- as.numeric(brainpaint[[2,i]]) * -1
# }
#}
#0to4
#for(i in 1:220){
# if(as.numeric(brainpaint[[2,i]]) == 0){
# brainpaint[2,i] <- 4
# }
#}
#min/max
min <- as.numeric(brainpaint[[2,which.min(brainpaint[2,])]])
max <- as.numeric(brainpaint[[2,which.max(brainpaint[2,])]])
scale <- 2
bound <- min
if(abs(min) < abs(max)){
bound <- max
}
#scale values between -3 and 3
if(min<0){
for(i in 1:220){
if(as.numeric(brainpaint[[2,i]]) < 0){
brainpaint[2,i] <- (as.numeric(brainpaint[[2,i]]) * scale/abs(bound))
} else {
brainpaint[2,i] <- (as.numeric(brainpaint[[2,i]]) * scale/abs(bound))
}
}
} else {
#scale values between 0 and 3
for(i in 1:220){
brainpaint[2,i] <- (as.numeric(brainpaint[[2,i]]) - min)/(max-min) * scale
}
}
#changing 0 values to white color
for(i in 1:220){
if(as.numeric(brainpaint[[2,i]]) == 0){
brainpaint[2,i] <- scale+1
}
}
#split data into 3 regions + subcortical for each brain hemisphere
r_area_brainpaint <- brainpaint[, grep('fs_r_.*_area',(brainpaint[1,]))]
r_grayvol_brainpaint <- brainpaint[, grep('fs_r_.*_grayvol',(brainpaint[1,]))]
r_thck_brainpaint <- brainpaint[, grep('fs_r_.*_thck',(brainpaint[1,]))]
r_area_brainpaint[1,] <- substring(r_area_brainpaint[1,],6,(nchar(r_area_brainpaint[1,]))-5)
r_grayvol_brainpaint[1,] <- substring(r_grayvol_brainpaint[1,],6,(nchar(r_grayvol_brainpaint[1,]))-8)
r_thck_brainpaint[1,] <- substring(r_thck_brainpaint[1,],6,(nchar(r_thck_brainpaint[1,]))-5)
r_subcortical_brainpaint <- brainpaint[, grep('fs_r_.*_vol',(brainpaint[1,]))]
r_subcortical_brainpaint[1, ] <- substring(r_subcortical_brainpaint[1,],6,(nchar(r_subcortical_brainpaint[1,]))-4)
l_area_brainpaint <- brainpaint[, grep('fs_l_.*_area',(brainpaint[1,]))]
l_grayvol_brainpaint <- brainpaint[, grep('fs_l_.*_grayvol',(brainpaint[1,]))]
l_thck_brainpaint <- brainpaint[, grep('fs_l_.*_thck',(brainpaint[1,]))]
l_area_brainpaint[1,] <- substring(l_area_brainpaint[1,],6,(nchar(l_area_brainpaint[1,]))-5)
l_grayvol_brainpaint[1,] <- substring(l_grayvol_brainpaint[1,],6,(nchar(l_grayvol_brainpaint[1,]))-8)
l_thck_brainpaint[1,] <- substring(l_thck_brainpaint[1,],6,(nchar(l_thck_brainpaint[1,]))-5)
l_subcortical_brainpaint <- brainpaint[, grep('fs_l_.*_vol',(brainpaint[1,]))]
l_subcortical_brainpaint[1, ] <- substring(l_subcortical_brainpaint[1,],6,(nchar(l_subcortical_brainpaint[1,]))-4)
#function to format to match the template
format<- function(df,imgName,left){
if(left == 1){
df <- cbind(df,l_subcortical_brainpaint)
} else {
df <- cbind(df,r_subcortical_brainpaint)
}
df <- cbind(image.name.unique = imgName,df)
df[1,1]<-"Image-name-unique"
colnames(df) <- df[1,]
df<- df[-1,]
}
#left boolean for which subcortical regions to append
left <- 1
#names of the generated brainpaints
l_area_brainpaint <- format(l_area_brainpaint,"left_area",left)
l_grayvol_brainpaint <- format(l_grayvol_brainpaint,"left_grayvol",left)
l_thck_brainpaint <- format(l_thck_brainpaint,"left_thck",left)
left <- 0
r_area_brainpaint <- format(r_area_brainpaint,"right_area",left)
r_grayvol_brainpaint <- format(r_grayvol_brainpaint,"right_grayvol",left)
r_thck_brainpaint <- format(r_thck_brainpaint,"right_thck",left)
#build final template, stacking
exampleBrainPaint <- rbind(l_area_brainpaint,l_grayvol_brainpaint,l_thck_brainpaint,r_area_brainpaint,r_grayvol_brainpaint,r_thck_brainpaint)
exampleBrainPaint <- as.data.frame(exampleBrainPaint)
#export
write.csv(exampleBrainPaint,"E:/Users/spectR/Desktop/%Rutgers/RESEARCH/brainpaint/template/formattedExampleBrainPaintTest.csv",row.names=FALSE)
yellow <- rgb(1, 1, 0, maxColorValue = 1)
orange <- rgb(1, 0.4, 0, maxColorValue = 1)
red <- rgb(1, 0, 0, maxColorValue = 1)
cyan <- rgb(0, 1, 1, maxColorValue = 1)
azure <- rgb(0, 0.5, 1, maxColorValue = 1)
blue <- rgb(0, 0, 1, maxColorValue = 1)
colfunc_warm <- colorRampPalette(c(yellow, orange, red))
colfunc_cold <- colorRampPalette(c(blue, azure, cyan))
z=matrix(1:100,nrow=1)
x=1
y=seq(0,abs(bound),len=100)
image(x,y,z,col=colfunc_warm(100),axes=FALSE,xlab="",ylab="")
axis(2)
x=seq(abs(bound)*-1,0,len=100)
image(x,y,z,col=colfunc_cold(100),axes=FALSE,xlab="",ylab="")
axis(2)
| /Brainpainter/brainpaint.R | no_license | jutchn/Research | R | false | false | 5,092 | r |
#read example file
template <- read.csv("E:/Users/spectR/Desktop/%Rutgers/RESEARCH/brainpaint/template/DK_template.csv")
brainpaint <- read.csv("E:/Users/spectR/Desktop/%Rutgers/RESEARCH/brainpaint/template/LoadExample4BrainPainter.csv")
brainpaint <- t(brainpaint)
#remove subcortical measurements
#brainpaint[2, grep('_vol',(brainpaint[1,]))] <- 0
numZeroed <- 0
#zero out ~0 values
lowerZero <- -0.03
upperZero <- 0.03
for(i in 1:220){
if(as.numeric(brainpaint[[2,i]]) > lowerZero && as.numeric(brainpaint[2,i]) < upperZero){
brainpaint[2,i] <- as.numeric(brainpaint[[2,i]]) * 0
numZeroed <- numZeroed + 1
}
}
#negtopos
#for(i in 1:220){
# if(as.numeric(brainpaint[[2,i]]) < 0){
# brainpaint[2,i] <- as.numeric(brainpaint[[2,i]]) * -1
# }
#}
#0to4
#for(i in 1:220){
# if(as.numeric(brainpaint[[2,i]]) == 0){
# brainpaint[2,i] <- 4
# }
#}
#min/max
min <- as.numeric(brainpaint[[2,which.min(brainpaint[2,])]])
max <- as.numeric(brainpaint[[2,which.max(brainpaint[2,])]])
scale <- 2
bound <- min
if(abs(min) < abs(max)){
bound <- max
}
#scale values between -3 and 3
if(min<0){
for(i in 1:220){
if(as.numeric(brainpaint[[2,i]]) < 0){
brainpaint[2,i] <- (as.numeric(brainpaint[[2,i]]) * scale/abs(bound))
} else {
brainpaint[2,i] <- (as.numeric(brainpaint[[2,i]]) * scale/abs(bound))
}
}
} else {
#scale values between 0 and 3
for(i in 1:220){
brainpaint[2,i] <- (as.numeric(brainpaint[[2,i]]) - min)/(max-min) * scale
}
}
#changing 0 values to white color
for(i in 1:220){
if(as.numeric(brainpaint[[2,i]]) == 0){
brainpaint[2,i] <- scale+1
}
}
#split data into 3 regions + subcortical for each brain hemisphere
r_area_brainpaint <- brainpaint[, grep('fs_r_.*_area',(brainpaint[1,]))]
r_grayvol_brainpaint <- brainpaint[, grep('fs_r_.*_grayvol',(brainpaint[1,]))]
r_thck_brainpaint <- brainpaint[, grep('fs_r_.*_thck',(brainpaint[1,]))]
r_area_brainpaint[1,] <- substring(r_area_brainpaint[1,],6,(nchar(r_area_brainpaint[1,]))-5)
r_grayvol_brainpaint[1,] <- substring(r_grayvol_brainpaint[1,],6,(nchar(r_grayvol_brainpaint[1,]))-8)
r_thck_brainpaint[1,] <- substring(r_thck_brainpaint[1,],6,(nchar(r_thck_brainpaint[1,]))-5)
r_subcortical_brainpaint <- brainpaint[, grep('fs_r_.*_vol',(brainpaint[1,]))]
r_subcortical_brainpaint[1, ] <- substring(r_subcortical_brainpaint[1,],6,(nchar(r_subcortical_brainpaint[1,]))-4)
l_area_brainpaint <- brainpaint[, grep('fs_l_.*_area',(brainpaint[1,]))]
l_grayvol_brainpaint <- brainpaint[, grep('fs_l_.*_grayvol',(brainpaint[1,]))]
l_thck_brainpaint <- brainpaint[, grep('fs_l_.*_thck',(brainpaint[1,]))]
l_area_brainpaint[1,] <- substring(l_area_brainpaint[1,],6,(nchar(l_area_brainpaint[1,]))-5)
l_grayvol_brainpaint[1,] <- substring(l_grayvol_brainpaint[1,],6,(nchar(l_grayvol_brainpaint[1,]))-8)
l_thck_brainpaint[1,] <- substring(l_thck_brainpaint[1,],6,(nchar(l_thck_brainpaint[1,]))-5)
l_subcortical_brainpaint <- brainpaint[, grep('fs_l_.*_vol',(brainpaint[1,]))]
l_subcortical_brainpaint[1, ] <- substring(l_subcortical_brainpaint[1,],6,(nchar(l_subcortical_brainpaint[1,]))-4)
#function to format to match the template
format<- function(df,imgName,left){
if(left == 1){
df <- cbind(df,l_subcortical_brainpaint)
} else {
df <- cbind(df,r_subcortical_brainpaint)
}
df <- cbind(image.name.unique = imgName,df)
df[1,1]<-"Image-name-unique"
colnames(df) <- df[1,]
df<- df[-1,]
}
#left boolean for which subcortical regions to append
left <- 1
#names of the generated brainpaints
l_area_brainpaint <- format(l_area_brainpaint,"left_area",left)
l_grayvol_brainpaint <- format(l_grayvol_brainpaint,"left_grayvol",left)
l_thck_brainpaint <- format(l_thck_brainpaint,"left_thck",left)
left <- 0
r_area_brainpaint <- format(r_area_brainpaint,"right_area",left)
r_grayvol_brainpaint <- format(r_grayvol_brainpaint,"right_grayvol",left)
r_thck_brainpaint <- format(r_thck_brainpaint,"right_thck",left)
#build final template, stacking
exampleBrainPaint <- rbind(l_area_brainpaint,l_grayvol_brainpaint,l_thck_brainpaint,r_area_brainpaint,r_grayvol_brainpaint,r_thck_brainpaint)
exampleBrainPaint <- as.data.frame(exampleBrainPaint)
#export
write.csv(exampleBrainPaint,"E:/Users/spectR/Desktop/%Rutgers/RESEARCH/brainpaint/template/formattedExampleBrainPaintTest.csv",row.names=FALSE)
yellow <- rgb(1, 1, 0, maxColorValue = 1)
orange <- rgb(1, 0.4, 0, maxColorValue = 1)
red <- rgb(1, 0, 0, maxColorValue = 1)
cyan <- rgb(0, 1, 1, maxColorValue = 1)
azure <- rgb(0, 0.5, 1, maxColorValue = 1)
blue <- rgb(0, 0, 1, maxColorValue = 1)
colfunc_warm <- colorRampPalette(c(yellow, orange, red))
colfunc_cold <- colorRampPalette(c(blue, azure, cyan))
z=matrix(1:100,nrow=1)
x=1
y=seq(0,abs(bound),len=100)
image(x,y,z,col=colfunc_warm(100),axes=FALSE,xlab="",ylab="")
axis(2)
x=seq(abs(bound)*-1,0,len=100)
image(x,y,z,col=colfunc_cold(100),axes=FALSE,xlab="",ylab="")
axis(2)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/print.summary.bpr.R
\name{print.summary.bpr}
\alias{print.summary.bpr}
\title{Print method for class "summary.bpr"}
\usage{
\method{print}{summary.bpr}(x, ...)
}
\arguments{
\item{x}{object of class summary.bpr}
\item{...}{ignored}
}
\value{
call to summary.bpr
}
\description{
Print method for class "summary.bpr"
}
| /man/print.summary.bpr.Rd | no_license | lvhoskovec/mmpack | R | false | true | 398 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/print.summary.bpr.R
\name{print.summary.bpr}
\alias{print.summary.bpr}
\title{Print method for class "summary.bpr"}
\usage{
\method{print}{summary.bpr}(x, ...)
}
\arguments{
\item{x}{object of class summary.bpr}
\item{...}{ignored}
}
\value{
call to summary.bpr
}
\description{
Print method for class "summary.bpr"
}
|
## File: plot4.R
## Date: 01-10-2016
## Author: bartvdt
## Version: 1
##
library(dplyr)
PreviousWorkingDirectory <- getwd()
WorkingDirectory <- getwd()
WorkingDirectory <- paste(WorkingDirectory,"/Exploratory Data Analysis",sep="")
setwd(WorkingDirectory)
GrossData <- read.table("household_power_consumption.txt",header=TRUE,sep=";",na.strings="?")
WorkingData <- filter(GrossData,Date=="1/2/2007" | Date=="2/2/2007" )
rm(GrossData)
## Begin plot 4
png(file="plot4.png",width=480,height=480)
par(mfrow = c(2,2),mar=c(4.1,4.1,2,2),oma=c(1,1,0,0))
WorkingData <- mutate(WorkingData,DateTime = (paste(Date,Time,sep=" ")))
with(WorkingData,
{
plot(strptime(WorkingData$DateTime,"%d/%m/%Y %H:%M:%S")
,WorkingData$Global_active_power,type="l"
,xlab="",ylab = "Global Active Power")
plot(strptime(WorkingData$DateTime,"%d/%m/%Y %H:%M:%S")
,WorkingData$Voltage,type="l"
,xlab="datetime",ylab = "Voltage")
ylim=c(234,238,242,246)
plot(strptime(WorkingData$DateTime,"%d/%m/%Y %H:%M:%S")
,WorkingData$Sub_metering_1,type="l"
,xlab="",ylab = "Energy sub metering "
)
lines(strptime(WorkingData$DateTime,"%d/%m/%Y %H:%M:%S"),WorkingData$Sub_metering_2,type="l",col="red")
lines(strptime(WorkingData$DateTime,"%d/%m/%Y %H:%M:%S"),WorkingData$Sub_metering_3,type="l",col="blue")
legend("topright", pch="-", bty="n"
,col=c("black","red","blue")
,legend=c("Sub_metering_1","Sub_metering_2","Sub_metering_3")
)
plot(strptime(WorkingData$DateTime,"%d/%m/%Y %H:%M:%S")
,WorkingData$Global_reactive_power,type="l"
,xlab="datetime",ylab="Global_reactive_power"
)
ylim=c(0.0,0.1,0.2,0.3,0.4,0.5)
})
dev.off()
## End plot 4
setwd(PreviousWorkingDirectory) | /plot4.R | no_license | bartvdt/Exploratory-Data-Analysis | R | false | false | 1,912 | r | ## File: plot4.R
## Date: 01-10-2016
## Author: bartvdt
## Version: 1
##
library(dplyr)
PreviousWorkingDirectory <- getwd()
WorkingDirectory <- getwd()
WorkingDirectory <- paste(WorkingDirectory,"/Exploratory Data Analysis",sep="")
setwd(WorkingDirectory)
GrossData <- read.table("household_power_consumption.txt",header=TRUE,sep=";",na.strings="?")
WorkingData <- filter(GrossData,Date=="1/2/2007" | Date=="2/2/2007" )
rm(GrossData)
## Begin plot 4
png(file="plot4.png",width=480,height=480)
par(mfrow = c(2,2),mar=c(4.1,4.1,2,2),oma=c(1,1,0,0))
WorkingData <- mutate(WorkingData,DateTime = (paste(Date,Time,sep=" ")))
with(WorkingData,
{
plot(strptime(WorkingData$DateTime,"%d/%m/%Y %H:%M:%S")
,WorkingData$Global_active_power,type="l"
,xlab="",ylab = "Global Active Power")
plot(strptime(WorkingData$DateTime,"%d/%m/%Y %H:%M:%S")
,WorkingData$Voltage,type="l"
,xlab="datetime",ylab = "Voltage")
ylim=c(234,238,242,246)
plot(strptime(WorkingData$DateTime,"%d/%m/%Y %H:%M:%S")
,WorkingData$Sub_metering_1,type="l"
,xlab="",ylab = "Energy sub metering "
)
lines(strptime(WorkingData$DateTime,"%d/%m/%Y %H:%M:%S"),WorkingData$Sub_metering_2,type="l",col="red")
lines(strptime(WorkingData$DateTime,"%d/%m/%Y %H:%M:%S"),WorkingData$Sub_metering_3,type="l",col="blue")
legend("topright", pch="-", bty="n"
,col=c("black","red","blue")
,legend=c("Sub_metering_1","Sub_metering_2","Sub_metering_3")
)
plot(strptime(WorkingData$DateTime,"%d/%m/%Y %H:%M:%S")
,WorkingData$Global_reactive_power,type="l"
,xlab="datetime",ylab="Global_reactive_power"
)
ylim=c(0.0,0.1,0.2,0.3,0.4,0.5)
})
dev.off()
## End plot 4
setwd(PreviousWorkingDirectory) |
##' QGIS Algorithm provided by QGIS (native c++) Assign projection (native:assignprojection)
##'
##' @title QGIS algorithm Assign projection
##'
##' @param INPUT `source` - Input layer. Path to a vector layer.
##' @param CRS `crs` - Assigned CRS. CRS as an auth ID (e.g. 'EPSG:3111'). CRS as a PROJ4 string (e.g. 'PROJ4:…'). CRS as a WKT string (e.g. 'WKT:…'). Path to a layer. The CRS of the layer is used..
##' @param OUTPUT `sink` - Assigned CRS. Path for new vector layer.
##' @param ... further parameters passed to `qgisprocess::qgis_run_algorithm()`
##' @param .complete_output logical specifing if complete out of `qgisprocess::qgis_run_algorithm()` should be used (`TRUE`) or first output (most likely the main) should read (`FALSE`). Default value is `TRUE`.
##'
##' @details
##' ## Outputs description
##' * OUTPUT - outputVector - Assigned CRS
##'
##'
##' @export
##' @md
##' @importFrom qgisprocess qgis_run_algorithm qgis_default_value
qgis_assignprojection <- function(INPUT = qgisprocess::qgis_default_value(), CRS = qgisprocess::qgis_default_value(), OUTPUT = qgisprocess::qgis_default_value(),..., .complete_output = TRUE) {
check_algorithm_necessities("native:assignprojection")
output <- qgisprocess::qgis_run_algorithm("native:assignprojection", `INPUT` = INPUT, `CRS` = CRS, `OUTPUT` = OUTPUT,...)
if (.complete_output) {
return(output)
}
else{
qgisprocess::qgis_output(output, "OUTPUT")
}
} | /R/qgis_assignprojection.R | permissive | VB6Hobbyst7/r_package_qgis | R | false | false | 1,441 | r | ##' QGIS Algorithm provided by QGIS (native c++) Assign projection (native:assignprojection)
##'
##' @title QGIS algorithm Assign projection
##'
##' @param INPUT `source` - Input layer. Path to a vector layer.
##' @param CRS `crs` - Assigned CRS. CRS as an auth ID (e.g. 'EPSG:3111'). CRS as a PROJ4 string (e.g. 'PROJ4:…'). CRS as a WKT string (e.g. 'WKT:…'). Path to a layer. The CRS of the layer is used..
##' @param OUTPUT `sink` - Assigned CRS. Path for new vector layer.
##' @param ... further parameters passed to `qgisprocess::qgis_run_algorithm()`
##' @param .complete_output logical specifing if complete out of `qgisprocess::qgis_run_algorithm()` should be used (`TRUE`) or first output (most likely the main) should read (`FALSE`). Default value is `TRUE`.
##'
##' @details
##' ## Outputs description
##' * OUTPUT - outputVector - Assigned CRS
##'
##'
##' @export
##' @md
##' @importFrom qgisprocess qgis_run_algorithm qgis_default_value
qgis_assignprojection <- function(INPUT = qgisprocess::qgis_default_value(), CRS = qgisprocess::qgis_default_value(), OUTPUT = qgisprocess::qgis_default_value(),..., .complete_output = TRUE) {
check_algorithm_necessities("native:assignprojection")
output <- qgisprocess::qgis_run_algorithm("native:assignprojection", `INPUT` = INPUT, `CRS` = CRS, `OUTPUT` = OUTPUT,...)
if (.complete_output) {
return(output)
}
else{
qgisprocess::qgis_output(output, "OUTPUT")
}
} |
get_reference_names <- function(ribo.object){
# Retrieves the reference transcript names
check_ribo(ribo.object)
return(h5read(ribo.object$handle&'reference', name = "reference_names"))
}
get_reference_lengths <- function(ribo.object){
# Retrieves the reference transcript lengths
check_ribo(ribo.object)
row.names <- h5read(ribo.object$handle&'reference',
name = "reference_names")
lengths <- h5read(ribo.object$handle&'reference',
name = "reference_lengths")
return(data.table(transcript = row.names, length = lengths))
}
get_content_info <- function(ribo.handle) {
experiments <- h5ls(ribo.handle&'experiments', recursive = FALSE)$name
length <- length(experiments)
#creates the separate lists for reads, coverage, rna.seq, and metadata
#to eventually put in a data frame
reads.list <- vector(mode = "integer", length = length)
coverage.list <- vector(mode = "logical", length = length)
rna.seq.list <- vector(mode = "logical", length = length)
metadata.list <- vector(mode = "logical", length = length)
#ls function provides information about the contents of each experiment
ls <- h5ls(ribo.handle)
#loop over all of the experiments
for (i in 1:length) {
experiment <- experiments[i]
#gathers information on the number of reads for each experiment by looking at
#the attributes
name <- paste("/experiments/", experiment, sep = "")
attribute <- h5readAttributes(ribo.handle, name)
reads.list[i] <- attribute[["total_reads"]]
#creates separate logical lists to denote the presence of
#reads, coverage, RNA-seq, metadata
metadata.list[i] <- ("metadata" %in% names(attribute))
group.contents <- ls[ls$group == name,]
group.names <- group.contents$name
coverage.list[i] <- ("coverage" %in% group.names)
rna.seq.list[i] <- ("rnaseq" %in% group.names)
}
experiments.info <- data.table(experiment = experiments,
total.reads = reads.list,
coverage = coverage.list,
rna.seq = rna.seq.list,
metadata = metadata.list)
return(experiments.info)
}
get_attributes <- function(ribo.object) {
# Retrieves the attributes of the ribo.object
handle <- ribo.object$handle
attribute <- h5readAttributes(handle, "/")
return(attribute[-which(names(attribute) == "time")])
}
get_read_lengths <- function(ribo.object) {
# Retrieves the minimum and maximum read lengths
#
# get_read_lengths finds the minimum and maximum read lengths of the .ribo file
attributes <- get_attributes(ribo.object)
result <- c(attributes$length_min, attributes$length_max)
return(result)
}
fill_matrix <- function(info) {
# helper method of the get_region_counts function that fills in the matrix
# Returns:
# result - an updated version of the matrix that has been filled in
length <- info[["conditions"]][["length"]]
transcript <- info[["conditions"]][["transcript"]]
ref.length <- info[["ref.length"]]
data <- info[["data"]]
index <- info[["index"]]
result <- info[["result"]]
range.lower <- info[["range.info"]][["range.lower"]]
range.upper <- info[["range.info"]][["range.upper"]]
current.length <- info[["current.length"]]
#sums across each length
if (length & !transcript) {
row.start <- (index - 1) * ref.length + 1
row.stop <- index * ref.length
#add the current matrix to the result
result[row.start:row.stop, ] <- result[row.start:row.stop, ] + data
} else if (transcript) {
temp <- colSums(data)
if (length) {
#add to current result
result[index, ] <- result[index, ] + temp
} else {
#compute the row offset
row.start <- (index - 1) * (range.upper - range.lower + 1)
row.start <- row.start + 1
row.start <- row.start + (current.length - range.lower)
#fill in matrix
result[row.start, ] <- temp
}
} else {
#compute the row offset
row.start <- (index - 1) * ref.length * (range.upper - range.lower + 1)
add <- (current.length - range.lower) * ref.length
row.start <- row.start + 1
row.start <- row.start + add
row.stop <- row.start + ref.length - 1
#fill in matrix
result[row.start:row.stop, ] <- data
}
return(result)
}
determine_matrix_size <- function(conditions,
ref.length,
total.indices,
total.experiments,
read.length.range) {
# helper function that determines the size of the matrix
# Returns:
# Matrix of the appropriate size based on the read lengths,
# number of experiments, number of reference transcripts, sum.transcripts
# and aggregate
length <- conditions[["length"]]
transcript <- conditions[["transcript"]]
if (length & transcript) {
return (matrix(0L,
nrow = total.experiments,
ncol = total.indices))
} else if (length) { #condense across lengths only
return (matrix(0L,
nrow = ref.length * total.experiments,
ncol = total.indices))
} else if (transcript) { #condense across transcript only
return (matrix(nrow = read.length.range * total.experiments,
ncol = total.indices))
} #!transcript & !length
return (matrix(nrow = ref.length * read.length.range * total.experiments,
ncol = total.indices))
}
make_datatable <- function(ref.names,
transcript,
length,
matched.list,
range.lower,
range.upper,
matrix) {
# helper function that creates a polished data table out of the matrix
#
# Returns:
# Data table containing the metagene data based on the specifications of the
# experiments to include, read length ranges, sum.transcript, aggregate,
# and the matrix structure
total.list <- length(matched.list)
ref.length <- length(ref.names)
num.reads <- range.upper - range.lower + 1
#determine columns for each separate case
if (transcript & length) {
experiment.list <- matched.list
return (data.table(experiment = matched.list,
matrix))
} else if (transcript) { #sum.transcripts only
experiment.column <- rep(matched.list, each = num.reads)
read.column <- rep(c(range.lower:range.upper), total.list)
return (data.table(experiment = experiment.column,
length = read.column,
matrix))
} else if (length) { #aggregate only
experiment.column <- rep(matched.list, each = ref.length)
transcript.column <- rep(ref.names, total.list)
return (data.table(experiment = experiment.column,
transcript = transcript.column,
matrix))
}
#!sum.transcripts and !aggregate
experiment.column <- rep(matched.list, each = num.reads * ref.length)
transcripts <- rep(ref.names, total.list * num.reads)
ref.read <- rep(c(range.lower:range.upper), each = ref.length)
read.length <- rep(ref.read, total.list)
return (data.table(experiment = experiment.column,
transcript = transcripts,
read.length = read.length,
matrix))
}
| /ribor/R/helper_functions.R | permissive | ijhoskins/riboR_alpha | R | false | false | 7,781 | r | get_reference_names <- function(ribo.object){
# Retrieves the reference transcript names
check_ribo(ribo.object)
return(h5read(ribo.object$handle&'reference', name = "reference_names"))
}
get_reference_lengths <- function(ribo.object){
# Retrieves the reference transcript lengths
check_ribo(ribo.object)
row.names <- h5read(ribo.object$handle&'reference',
name = "reference_names")
lengths <- h5read(ribo.object$handle&'reference',
name = "reference_lengths")
return(data.table(transcript = row.names, length = lengths))
}
get_content_info <- function(ribo.handle) {
experiments <- h5ls(ribo.handle&'experiments', recursive = FALSE)$name
length <- length(experiments)
#creates the separate lists for reads, coverage, rna.seq, and metadata
#to eventually put in a data frame
reads.list <- vector(mode = "integer", length = length)
coverage.list <- vector(mode = "logical", length = length)
rna.seq.list <- vector(mode = "logical", length = length)
metadata.list <- vector(mode = "logical", length = length)
#ls function provides information about the contents of each experiment
ls <- h5ls(ribo.handle)
#loop over all of the experiments
for (i in 1:length) {
experiment <- experiments[i]
#gathers information on the number of reads for each experiment by looking at
#the attributes
name <- paste("/experiments/", experiment, sep = "")
attribute <- h5readAttributes(ribo.handle, name)
reads.list[i] <- attribute[["total_reads"]]
#creates separate logical lists to denote the presence of
#reads, coverage, RNA-seq, metadata
metadata.list[i] <- ("metadata" %in% names(attribute))
group.contents <- ls[ls$group == name,]
group.names <- group.contents$name
coverage.list[i] <- ("coverage" %in% group.names)
rna.seq.list[i] <- ("rnaseq" %in% group.names)
}
experiments.info <- data.table(experiment = experiments,
total.reads = reads.list,
coverage = coverage.list,
rna.seq = rna.seq.list,
metadata = metadata.list)
return(experiments.info)
}
get_attributes <- function(ribo.object) {
# Retrieves the attributes of the ribo.object
handle <- ribo.object$handle
attribute <- h5readAttributes(handle, "/")
return(attribute[-which(names(attribute) == "time")])
}
get_read_lengths <- function(ribo.object) {
# Retrieves the minimum and maximum read lengths
#
# get_read_lengths finds the minimum and maximum read lengths of the .ribo file
attributes <- get_attributes(ribo.object)
result <- c(attributes$length_min, attributes$length_max)
return(result)
}
fill_matrix <- function(info) {
# helper method of the get_region_counts function that fills in the matrix
# Returns:
# result - an updated version of the matrix that has been filled in
length <- info[["conditions"]][["length"]]
transcript <- info[["conditions"]][["transcript"]]
ref.length <- info[["ref.length"]]
data <- info[["data"]]
index <- info[["index"]]
result <- info[["result"]]
range.lower <- info[["range.info"]][["range.lower"]]
range.upper <- info[["range.info"]][["range.upper"]]
current.length <- info[["current.length"]]
#sums across each length
if (length & !transcript) {
row.start <- (index - 1) * ref.length + 1
row.stop <- index * ref.length
#add the current matrix to the result
result[row.start:row.stop, ] <- result[row.start:row.stop, ] + data
} else if (transcript) {
temp <- colSums(data)
if (length) {
#add to current result
result[index, ] <- result[index, ] + temp
} else {
#compute the row offset
row.start <- (index - 1) * (range.upper - range.lower + 1)
row.start <- row.start + 1
row.start <- row.start + (current.length - range.lower)
#fill in matrix
result[row.start, ] <- temp
}
} else {
#compute the row offset
row.start <- (index - 1) * ref.length * (range.upper - range.lower + 1)
add <- (current.length - range.lower) * ref.length
row.start <- row.start + 1
row.start <- row.start + add
row.stop <- row.start + ref.length - 1
#fill in matrix
result[row.start:row.stop, ] <- data
}
return(result)
}
determine_matrix_size <- function(conditions,
ref.length,
total.indices,
total.experiments,
read.length.range) {
# helper function that determines the size of the matrix
# Returns:
# Matrix of the appropriate size based on the read lengths,
# number of experiments, number of reference transcripts, sum.transcripts
# and aggregate
length <- conditions[["length"]]
transcript <- conditions[["transcript"]]
if (length & transcript) {
return (matrix(0L,
nrow = total.experiments,
ncol = total.indices))
} else if (length) { #condense across lengths only
return (matrix(0L,
nrow = ref.length * total.experiments,
ncol = total.indices))
} else if (transcript) { #condense across transcript only
return (matrix(nrow = read.length.range * total.experiments,
ncol = total.indices))
} #!transcript & !length
return (matrix(nrow = ref.length * read.length.range * total.experiments,
ncol = total.indices))
}
make_datatable <- function(ref.names,
transcript,
length,
matched.list,
range.lower,
range.upper,
matrix) {
# helper function that creates a polished data table out of the matrix
#
# Returns:
# Data table containing the metagene data based on the specifications of the
# experiments to include, read length ranges, sum.transcript, aggregate,
# and the matrix structure
total.list <- length(matched.list)
ref.length <- length(ref.names)
num.reads <- range.upper - range.lower + 1
#determine columns for each separate case
if (transcript & length) {
experiment.list <- matched.list
return (data.table(experiment = matched.list,
matrix))
} else if (transcript) { #sum.transcripts only
experiment.column <- rep(matched.list, each = num.reads)
read.column <- rep(c(range.lower:range.upper), total.list)
return (data.table(experiment = experiment.column,
length = read.column,
matrix))
} else if (length) { #aggregate only
experiment.column <- rep(matched.list, each = ref.length)
transcript.column <- rep(ref.names, total.list)
return (data.table(experiment = experiment.column,
transcript = transcript.column,
matrix))
}
#!sum.transcripts and !aggregate
experiment.column <- rep(matched.list, each = num.reads * ref.length)
transcripts <- rep(ref.names, total.list * num.reads)
ref.read <- rep(c(range.lower:range.upper), each = ref.length)
read.length <- rep(ref.read, total.list)
return (data.table(experiment = experiment.column,
transcript = transcripts,
read.length = read.length,
matrix))
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/readParamsCSV.R
\name{readParamsCSV}
\alias{readParamsCSV}
\title{Read a TCSAM02 model parameters csv file and return a dataframe}
\usage{
readParamsCSV(csvFile = NULL)
}
\arguments{
\item{csvFile}{\itemize{
\item parameters csv file from a TCSAM02 model run. can be NULL.
}}
}
\value{
a dataframe (or NULL).
}
\description{
Function to read a TCSAM02 parameters csv file and return a dataframe.
}
\details{
If csvFile is NULL, the user will be prompted to identify a
TCSAM02 model parameters csv file to read. Uses functions
\itemize{
\item \code{wtsUtilities::selectFile(...)}
}
}
| /man/readParamsCSV.Rd | permissive | wStockhausen/rTCSAM02 | R | false | true | 661 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/readParamsCSV.R
\name{readParamsCSV}
\alias{readParamsCSV}
\title{Read a TCSAM02 model parameters csv file and return a dataframe}
\usage{
readParamsCSV(csvFile = NULL)
}
\arguments{
\item{csvFile}{\itemize{
\item parameters csv file from a TCSAM02 model run. can be NULL.
}}
}
\value{
a dataframe (or NULL).
}
\description{
Function to read a TCSAM02 parameters csv file and return a dataframe.
}
\details{
If csvFile is NULL, the user will be prompted to identify a
TCSAM02 model parameters csv file to read. Uses functions
\itemize{
\item \code{wtsUtilities::selectFile(...)}
}
}
|
library(poker)
### Name: tiebreaker
### Title: tiebreaker
### Aliases: tiebreaker
### ** Examples
cards <- c(2,1,4,2,5,3,6,4,7,1,13,2,14,3,2,3,3,4,5,1,6,2,7,3,13,4,14,1)
cards <- matrix(cards,2,14,byrow=TRUE); cards
score <- showdown(cards); score
nPlayers <- nrow(cards); nPlayers
tiebreaker(nPlayers,cards,score)
cards <- c(2,1,3,2,5,3,6,4,7,1,13,2,14,3,2,3,3,4,5,1,6,2,7,3,13,4,14,1)
cards <- matrix(cards,2,14,byrow=TRUE); cards
score <- showdown(cards); score
nPlayers <- nrow(cards); nPlayers
tiebreaker(nPlayers,cards,score)
cards <- c(2,3,4,5,1,1,1,1,2,3,6,7,2,2,2,2,4,4,4,4,3,3,3,3,11,11,11,11,3,3,3,3)
cards <- c(cards,13,13,13,13,3,3,3,3,14,14,14,14,3,3,3,3,9,9,9,9,4,4,4,4)
cards <- matrix(cards,nrow=4,ncol=14); cards
score <- showdown(cards); score
nPlayers <- nrow(cards); nPlayers
tiebreaker(nPlayers,cards,score)
cards <- c(2,3,4,5,1,1,1,1,2,3,6,7,2,2,2,2,4,4,4,4,3,3,3,3,11,11,11,11,3,3,3,3)
cards <- c(cards,13,13,13,13,3,3,3,3,14,14,14,14,3,3,3,3,14,14,14,14,4,4,4,4)
cards <- matrix(cards,nrow=4,ncol=14); cards
score <- showdown(cards); score
nPlayers <- nrow(cards); nPlayers
tiebreaker(nPlayers,cards,score)
cards <- c(14,14,4,5,1,2,1,1,10,9,6,7,2,2,2,2,4,4,4,4,3,3,3,3,8,8,8,8,3,3,3,3)
cards <-c(cards,13,13,13,13,3,3,3,3,14,14,14,14,3,3,3,3,14,14,14,14,4,4,4,4)
cards <- matrix(cards,nrow=4,ncol=14); cards
score <- showdown(cards); score
nPlayers <- nrow(cards); nPlayers
tiebreaker(nPlayers,cards,score)
cards <- c(14,14,4,5,1,2,1,1,2,3,6,7,2,2,2,2,4,4,4,4,3,3,3,3,11,11,11,11,3,3,3,3)
cards <-c(cards,13,13,13,13,3,3,3,3,14,14,14,14,3,3,3,3,14,14,14,14,4,4,4,4)
cards <- matrix(cards,nrow=4,ncol=14); cards
score <- showdown(cards); score
nPlayers <- nrow(cards); nPlayers
tiebreaker(nPlayers,cards,score)
cards <- c(7,1,4,2,4,1,4,3,10,1,11,2,2,2,2,3,3,3,3,3,3,1,1,1,5,5,5,4,4,4,6,6,6)
cards <-c(cards,2,2,2,14,14,14,2,2,2)
cards <- matrix(cards,nrow=3,ncol=14); cards
score <- showdown(cards); score
nPlayers <- nrow(cards); nPlayers
tiebreaker(nPlayers,cards,score)
cards <- c(2,1,4,2,4,1,4,3,10,1,11,2,2,2,2,3,3,3,3,3,3,1,1,1,5,5,5,4,4,4,6,6,6)
cards <-c(cards,2,2,2,14,14,14,2,2,2)
cards <- matrix(cards,nrow=3,ncol=14); cards
score <- showdown(cards); score
nPlayers <- nrow(cards); nPlayers
tiebreaker(nPlayers,cards,score)
cards <- c(2,1,3,3,5,2,6,3,7,3,13,3,14,3,2,3,3,4,5,1,6,3,7,3,13,3,14,3)
cards <- matrix(cards,2,14,byrow=TRUE); cards
score <- showdown(cards); score
nPlayers <- nrow(cards); nPlayers
tiebreaker(nPlayers,cards,score)
cards <- c(2,1,3,3,5,3,6,3,7,3,13,3,14,3,2,3,3,4,5,3,6,3,7,3,13,3,14,3)
cards <- matrix(cards,2,14,byrow=TRUE); cards
score <- showdown(cards); score
nPlayers <- nrow(cards); nPlayers
tiebreaker(nPlayers,cards,score)
cards <- c(5,10,4,8,1,2,1,1,10,9,6,7,3,2,2,2,5,5,5,5,3,3,3,3,8,8,8,8,3,3,3,3)
cards <-c(cards,14,14,14,14,2,2,2,2,14,14,14,14,3,3,3,3,14,14,14,14,4,4,4,4)
cards <- matrix(cards,nrow=4,ncol=14); cards
score <- showdown(cards); score
nPlayers <- nrow(cards); nPlayers
tiebreaker(nPlayers,cards,score)
cards <- c(5,10,4,8,1,2,1,1,10,9,6,7,3,2,2,2,12,12,12,12,1,1,1,1,12,12,12,12)
cards <-c(cards,3,3,3,3,14,14,14,14,2,2,2,2,14,14,14,14,3,3,3,3,14,14,14,14,4,4,4,4)
cards <- matrix(cards,nrow=4,ncol=14); cards
score <- showdown(cards); score
nPlayers <- nrow(cards); nPlayers
tiebreaker(nPlayers,cards,score)
cards <- c(14,10,5,1,2,1,14,9,7,2,2,2,4,4,4,3,3,3,8,8,8,3,3,3,13,13,13)
cards <-c(cards,3,3,3,14,14,14,3,3,3,14,14,14,4,4,4)
cards <- matrix(cards,nrow=3,ncol=14); cards
score <- showdown(cards); score
nPlayers <- nrow(cards); nPlayers
tiebreaker(nPlayers,cards,score)
cards <- c(3,4,5,1,1,1,8,9,10,1,1,1,14,14,14,1,1,1,14,14,14,2,2,2,11,11,11)
cards <-c(cards,3,3,3,14,14,14,3,3,3,14,14,14,4,4,4)
cards <- matrix(cards,nrow=3,ncol=14); cards
score <- showdown(cards); score
nPlayers <- nrow(cards); nPlayers
tiebreaker(nPlayers,cards,score)
cards <- c(8,13,5,1,1,4,6,2,2,2,3,4,14,14,14,2,2,2,9,9,9,1,1,1,10,10,10)
cards <-c(cards,1,1,1,11,11,11,1,1,1,12,12,12,1,1,1)
cards <- matrix(cards,nrow=3,ncol=14); cards
score <- showdown(cards); score
nPlayers <- nrow(cards); nPlayers
tiebreaker(nPlayers,cards,score)
cards <- c(1,1,3,4,2,2,3,4,8,8,1,1,9,9,1,1,10,10,1,1,11,11,1,1,12,12,1,1)
cards <- matrix(cards,nrow=2,ncol=14); cards
score <- showdown(cards); score
nPlayers <- nrow(cards); nPlayers
tiebreaker(nPlayers,cards,score)
| /data/genthat_extracted_code/poker/examples/tiebreaker.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 4,351 | r | library(poker)
### Name: tiebreaker
### Title: tiebreaker
### Aliases: tiebreaker
### ** Examples
cards <- c(2,1,4,2,5,3,6,4,7,1,13,2,14,3,2,3,3,4,5,1,6,2,7,3,13,4,14,1)
cards <- matrix(cards,2,14,byrow=TRUE); cards
score <- showdown(cards); score
nPlayers <- nrow(cards); nPlayers
tiebreaker(nPlayers,cards,score)
cards <- c(2,1,3,2,5,3,6,4,7,1,13,2,14,3,2,3,3,4,5,1,6,2,7,3,13,4,14,1)
cards <- matrix(cards,2,14,byrow=TRUE); cards
score <- showdown(cards); score
nPlayers <- nrow(cards); nPlayers
tiebreaker(nPlayers,cards,score)
cards <- c(2,3,4,5,1,1,1,1,2,3,6,7,2,2,2,2,4,4,4,4,3,3,3,3,11,11,11,11,3,3,3,3)
cards <- c(cards,13,13,13,13,3,3,3,3,14,14,14,14,3,3,3,3,9,9,9,9,4,4,4,4)
cards <- matrix(cards,nrow=4,ncol=14); cards
score <- showdown(cards); score
nPlayers <- nrow(cards); nPlayers
tiebreaker(nPlayers,cards,score)
cards <- c(2,3,4,5,1,1,1,1,2,3,6,7,2,2,2,2,4,4,4,4,3,3,3,3,11,11,11,11,3,3,3,3)
cards <- c(cards,13,13,13,13,3,3,3,3,14,14,14,14,3,3,3,3,14,14,14,14,4,4,4,4)
cards <- matrix(cards,nrow=4,ncol=14); cards
score <- showdown(cards); score
nPlayers <- nrow(cards); nPlayers
tiebreaker(nPlayers,cards,score)
cards <- c(14,14,4,5,1,2,1,1,10,9,6,7,2,2,2,2,4,4,4,4,3,3,3,3,8,8,8,8,3,3,3,3)
cards <-c(cards,13,13,13,13,3,3,3,3,14,14,14,14,3,3,3,3,14,14,14,14,4,4,4,4)
cards <- matrix(cards,nrow=4,ncol=14); cards
score <- showdown(cards); score
nPlayers <- nrow(cards); nPlayers
tiebreaker(nPlayers,cards,score)
cards <- c(14,14,4,5,1,2,1,1,2,3,6,7,2,2,2,2,4,4,4,4,3,3,3,3,11,11,11,11,3,3,3,3)
cards <-c(cards,13,13,13,13,3,3,3,3,14,14,14,14,3,3,3,3,14,14,14,14,4,4,4,4)
cards <- matrix(cards,nrow=4,ncol=14); cards
score <- showdown(cards); score
nPlayers <- nrow(cards); nPlayers
tiebreaker(nPlayers,cards,score)
cards <- c(7,1,4,2,4,1,4,3,10,1,11,2,2,2,2,3,3,3,3,3,3,1,1,1,5,5,5,4,4,4,6,6,6)
cards <-c(cards,2,2,2,14,14,14,2,2,2)
cards <- matrix(cards,nrow=3,ncol=14); cards
score <- showdown(cards); score
nPlayers <- nrow(cards); nPlayers
tiebreaker(nPlayers,cards,score)
cards <- c(2,1,4,2,4,1,4,3,10,1,11,2,2,2,2,3,3,3,3,3,3,1,1,1,5,5,5,4,4,4,6,6,6)
cards <-c(cards,2,2,2,14,14,14,2,2,2)
cards <- matrix(cards,nrow=3,ncol=14); cards
score <- showdown(cards); score
nPlayers <- nrow(cards); nPlayers
tiebreaker(nPlayers,cards,score)
cards <- c(2,1,3,3,5,2,6,3,7,3,13,3,14,3,2,3,3,4,5,1,6,3,7,3,13,3,14,3)
cards <- matrix(cards,2,14,byrow=TRUE); cards
score <- showdown(cards); score
nPlayers <- nrow(cards); nPlayers
tiebreaker(nPlayers,cards,score)
cards <- c(2,1,3,3,5,3,6,3,7,3,13,3,14,3,2,3,3,4,5,3,6,3,7,3,13,3,14,3)
cards <- matrix(cards,2,14,byrow=TRUE); cards
score <- showdown(cards); score
nPlayers <- nrow(cards); nPlayers
tiebreaker(nPlayers,cards,score)
cards <- c(5,10,4,8,1,2,1,1,10,9,6,7,3,2,2,2,5,5,5,5,3,3,3,3,8,8,8,8,3,3,3,3)
cards <-c(cards,14,14,14,14,2,2,2,2,14,14,14,14,3,3,3,3,14,14,14,14,4,4,4,4)
cards <- matrix(cards,nrow=4,ncol=14); cards
score <- showdown(cards); score
nPlayers <- nrow(cards); nPlayers
tiebreaker(nPlayers,cards,score)
cards <- c(5,10,4,8,1,2,1,1,10,9,6,7,3,2,2,2,12,12,12,12,1,1,1,1,12,12,12,12)
cards <-c(cards,3,3,3,3,14,14,14,14,2,2,2,2,14,14,14,14,3,3,3,3,14,14,14,14,4,4,4,4)
cards <- matrix(cards,nrow=4,ncol=14); cards
score <- showdown(cards); score
nPlayers <- nrow(cards); nPlayers
tiebreaker(nPlayers,cards,score)
cards <- c(14,10,5,1,2,1,14,9,7,2,2,2,4,4,4,3,3,3,8,8,8,3,3,3,13,13,13)
cards <-c(cards,3,3,3,14,14,14,3,3,3,14,14,14,4,4,4)
cards <- matrix(cards,nrow=3,ncol=14); cards
score <- showdown(cards); score
nPlayers <- nrow(cards); nPlayers
tiebreaker(nPlayers,cards,score)
cards <- c(3,4,5,1,1,1,8,9,10,1,1,1,14,14,14,1,1,1,14,14,14,2,2,2,11,11,11)
cards <-c(cards,3,3,3,14,14,14,3,3,3,14,14,14,4,4,4)
cards <- matrix(cards,nrow=3,ncol=14); cards
score <- showdown(cards); score
nPlayers <- nrow(cards); nPlayers
tiebreaker(nPlayers,cards,score)
cards <- c(8,13,5,1,1,4,6,2,2,2,3,4,14,14,14,2,2,2,9,9,9,1,1,1,10,10,10)
cards <-c(cards,1,1,1,11,11,11,1,1,1,12,12,12,1,1,1)
cards <- matrix(cards,nrow=3,ncol=14); cards
score <- showdown(cards); score
nPlayers <- nrow(cards); nPlayers
tiebreaker(nPlayers,cards,score)
cards <- c(1,1,3,4,2,2,3,4,8,8,1,1,9,9,1,1,10,10,1,1,11,11,1,1,12,12,1,1)
cards <- matrix(cards,nrow=2,ncol=14); cards
score <- showdown(cards); score
nPlayers <- nrow(cards); nPlayers
tiebreaker(nPlayers,cards,score)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/examples.r
\name{example_attron}
\alias{example_attron}
\title{attron example}
\usage{
example_attron(sleep = 5)
}
\arguments{
\item{sleep}{sleep time before exiting}
}
\description{
attron example
}
\seealso{
Other examples: \code{\link{example_attributes}},
\code{\link{example_box}},
\code{\link{example_change_color}},
\code{\link{example_color_set}},
\code{\link{example_color}}, \code{\link{example_getch}},
\code{\link{example_getnstr}},
\code{\link{example_hello}},
\code{\link{example_mouse_debug}},
\code{\link{example_mouse}},
\code{\link{example_mvprintw}},
\code{\link{example_window_border}},
\code{\link{example_window}}
}
| /Rcurses/man/example_attron.Rd | no_license | kforner/rcurses | R | false | true | 739 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/examples.r
\name{example_attron}
\alias{example_attron}
\title{attron example}
\usage{
example_attron(sleep = 5)
}
\arguments{
\item{sleep}{sleep time before exiting}
}
\description{
attron example
}
\seealso{
Other examples: \code{\link{example_attributes}},
\code{\link{example_box}},
\code{\link{example_change_color}},
\code{\link{example_color_set}},
\code{\link{example_color}}, \code{\link{example_getch}},
\code{\link{example_getnstr}},
\code{\link{example_hello}},
\code{\link{example_mouse_debug}},
\code{\link{example_mouse}},
\code{\link{example_mvprintw}},
\code{\link{example_window_border}},
\code{\link{example_window}}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/colConvert.r
\name{colConvert}
\alias{colConvert}
\title{Convert Columns}
\usage{
colConvert(data, columns = "Height|Size|Data.Point",
ignore.case = TRUE, fixed = FALSE, debug = FALSE)
}
\arguments{
\item{data}{data.frame.}
\item{columns}{character string containing a regular expression
(or character string for fixed = TRUE) to be matched in the given
character vector (separate multiple column names by | in reg.exp).}
\item{ignore.case}{logical TRUE to ignore case in matching.}
\item{fixed}{logical TRUE if columns is a string to be matched as is.}
\item{debug}{logical indicating printing debug information.}
}
\value{
data.frame.
}
\description{
Internal helper function.
}
\details{
Takes a data frame as input and return it after converting known numeric
columns to numeric.
}
| /man/colConvert.Rd | no_license | sctyner/strvalidator | R | false | true | 870 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/colConvert.r
\name{colConvert}
\alias{colConvert}
\title{Convert Columns}
\usage{
colConvert(data, columns = "Height|Size|Data.Point",
ignore.case = TRUE, fixed = FALSE, debug = FALSE)
}
\arguments{
\item{data}{data.frame.}
\item{columns}{character string containing a regular expression
(or character string for fixed = TRUE) to be matched in the given
character vector (separate multiple column names by | in reg.exp).}
\item{ignore.case}{logical TRUE to ignore case in matching.}
\item{fixed}{logical TRUE if columns is a string to be matched as is.}
\item{debug}{logical indicating printing debug information.}
}
\value{
data.frame.
}
\description{
Internal helper function.
}
\details{
Takes a data frame as input and return it after converting known numeric
columns to numeric.
}
|
render_g2r <- function(g2){
main_mapping <- g2$x$mapping
scales <- g2$x$scales
combined_mapping <- combine_aes(main_mapping, g2$x$layers)
# globals
if(is.null(g2$x$font)) g2$x$font <- getOption("g2_font")
if(is.null(g2$x$renderer)) g2$x$renderer <- getOption("g2_renderer")
if(is.null(g2$x$theme)) g2$x$theme <- getOption("g2_theme")
views <- list()
for(i in 1:length(g2$x$layers)){
layer <- g2$x$layers[[i]]
aes <- mutate_aes(main_mapping, layer$mapping, layer$inherit_aes)
# geoms
geom <- list(type = layer$type)
for(method in method_and_aes$method){
meth <- add_geom_method(method, aes, scales)
meth <- meth[lapply(meth, length) > 0] # remove empty/unused method
geom <- append(geom, meth)
}
guides <- get_guides(g2$x$guides, layer$name, index = i)
# aniamtion
if(length(layer$animate))
geom$animate <- layer$animate
layer$animate <- NULL
if(length(layer$adjust))
geom$adjust <- layer$adjust
if(length(layer$style))
geom$style <- layer$style
# if data passed, turn to row list
if(!is.null(layer$data))
layer$data <- layer$data %>%
process_data(aes)
view <- list(
layer = list(
options = list(
geoms = list(geom)
)
)
)
if(length(layer$data))
view$data <- layer$data
if(length(layer$opts))
view$layer$options <- append(view$layer$options, layer$opts)
if(length(guides))
view$layer$options$guides <- guides
views <- append(views, list(view))
}
g2$x$layers <- views # replace layers
if(!is.null(g2$x$facet)){
final_func <- ""
for(v in 1:length(views)){
type_func <- paste0("view.", views[[v]]$layer$options$geoms[[1]]$type, "()")
method_func <- paste_facet(views[[v]]$layer$options$geoms[[1]])
view_func <- paste0(type_func, method_func, ";\n")
final_func <- paste0(final_func, view_func)
}
each_view_func <- paste0("function eachView(view){", final_func, "}")
each_view_func <- htmlwidgets::JS(each_view_func)
g2$x$layers <- NULL
g2$x$facet$opts$eachView <- each_view_func
g2$x$facet$facet <- NULL
}
# data as list
g2$x$data <- g2$x$data %>%
process_data(combined_mapping)
if(debug_mode())
print(jsonlite::toJSON(g2$x, auto_unbox = TRUE, pretty = TRUE, force = TRUE))
if(length(g2$x$dataOpts))
g2$x$dataOpts <- map(g2$x$dataOpts, function(x){
keep(x, function(y){
length(y) >= 1
})
})
# remove unwanted
g2$x$scales <- NULL
g2$x$mapping <- NULL
g2$x$guides <- NULL
g2
}
paste_facet <- function(methods){
methods <- methods %>%
map2(names(methods), function(x, y){
paste0(
y, "(",
paste0(
convert_to_json(x),
collapse = ","
),
")"
)
})
# this is the chart type and is done differently
methods[[1]] <- NULL
methods %>%
paste0(collapse = ".") %>%
paste0(".", .)
}
convert_to_json <- function(x){
if(length(x) > 1)
jsonlite::toJSON(unlist(x), auto_unbox = TRUE)
jsonlite::toJSON(x, auto_unbox = TRUE)
}
# get guides
get_guides <- function(guides, name, index = 1){
guides_included <- list()
for(i in 1:length(guides)){
if(is.null(guides[[i]]$figures) && index == 1) {
guides_included <- append(guides_included, list(guides[[i]]$guide))
} else if(sum(index %in% guides[[i]]$figures) >= 1 || sum(name %in% guides[[i]]$figures) >= 1)
guides_included <- append(guides_included, list(guides[[i]]$guide))
}
guides_included[lapply(guides_included, length) > 0]
}
# build basic geom method
build_geom_method <- function(aes, vars){
is_present <- names(aes) %in% vars
aes <- aes[is_present]
if(!length(aes)) return(NULL)
map(aes, function(m){
if(rlang::is_quosure(m))
rlang::quo_name(m)
else
m
}) %>%
unlist() %>%
.[order(names(.))] %>% # for position method: x comes before y
unname() %>%
list()
}
# add geom
add_geom_method <- function(name, aes, scales){
# build arguments based on plan
vars <- method_and_aes %>% filter(method == name) %>% pull(aes) %>% unlist()
method <- build_geom_method(aes, vars)
# add relevant arguments (from scales) to method
is_relevant_scale <- name %in% names(scales)
if(is_relevant_scale){
is_relevant_to_aes <- sum(name %in% names(aes))
if(is_relevant_to_aes > 0){
scl <- scales[is_relevant_to_aes] %>% unname() %>% .[[1]]
names(method) <- "field"
method <- append(method, scl) %>%
list()
}
}
if(!is.null(method))
names(method) <- name
method
} | /R/render.R | permissive | JohnCoene/g2r | R | false | false | 4,677 | r | render_g2r <- function(g2){
main_mapping <- g2$x$mapping
scales <- g2$x$scales
combined_mapping <- combine_aes(main_mapping, g2$x$layers)
# globals
if(is.null(g2$x$font)) g2$x$font <- getOption("g2_font")
if(is.null(g2$x$renderer)) g2$x$renderer <- getOption("g2_renderer")
if(is.null(g2$x$theme)) g2$x$theme <- getOption("g2_theme")
views <- list()
for(i in 1:length(g2$x$layers)){
layer <- g2$x$layers[[i]]
aes <- mutate_aes(main_mapping, layer$mapping, layer$inherit_aes)
# geoms
geom <- list(type = layer$type)
for(method in method_and_aes$method){
meth <- add_geom_method(method, aes, scales)
meth <- meth[lapply(meth, length) > 0] # remove empty/unused method
geom <- append(geom, meth)
}
guides <- get_guides(g2$x$guides, layer$name, index = i)
# aniamtion
if(length(layer$animate))
geom$animate <- layer$animate
layer$animate <- NULL
if(length(layer$adjust))
geom$adjust <- layer$adjust
if(length(layer$style))
geom$style <- layer$style
# if data passed, turn to row list
if(!is.null(layer$data))
layer$data <- layer$data %>%
process_data(aes)
view <- list(
layer = list(
options = list(
geoms = list(geom)
)
)
)
if(length(layer$data))
view$data <- layer$data
if(length(layer$opts))
view$layer$options <- append(view$layer$options, layer$opts)
if(length(guides))
view$layer$options$guides <- guides
views <- append(views, list(view))
}
g2$x$layers <- views # replace layers
if(!is.null(g2$x$facet)){
final_func <- ""
for(v in 1:length(views)){
type_func <- paste0("view.", views[[v]]$layer$options$geoms[[1]]$type, "()")
method_func <- paste_facet(views[[v]]$layer$options$geoms[[1]])
view_func <- paste0(type_func, method_func, ";\n")
final_func <- paste0(final_func, view_func)
}
each_view_func <- paste0("function eachView(view){", final_func, "}")
each_view_func <- htmlwidgets::JS(each_view_func)
g2$x$layers <- NULL
g2$x$facet$opts$eachView <- each_view_func
g2$x$facet$facet <- NULL
}
# data as list
g2$x$data <- g2$x$data %>%
process_data(combined_mapping)
if(debug_mode())
print(jsonlite::toJSON(g2$x, auto_unbox = TRUE, pretty = TRUE, force = TRUE))
if(length(g2$x$dataOpts))
g2$x$dataOpts <- map(g2$x$dataOpts, function(x){
keep(x, function(y){
length(y) >= 1
})
})
# remove unwanted
g2$x$scales <- NULL
g2$x$mapping <- NULL
g2$x$guides <- NULL
g2
}
paste_facet <- function(methods){
methods <- methods %>%
map2(names(methods), function(x, y){
paste0(
y, "(",
paste0(
convert_to_json(x),
collapse = ","
),
")"
)
})
# this is the chart type and is done differently
methods[[1]] <- NULL
methods %>%
paste0(collapse = ".") %>%
paste0(".", .)
}
convert_to_json <- function(x){
if(length(x) > 1)
jsonlite::toJSON(unlist(x), auto_unbox = TRUE)
jsonlite::toJSON(x, auto_unbox = TRUE)
}
# get guides
get_guides <- function(guides, name, index = 1){
guides_included <- list()
for(i in 1:length(guides)){
if(is.null(guides[[i]]$figures) && index == 1) {
guides_included <- append(guides_included, list(guides[[i]]$guide))
} else if(sum(index %in% guides[[i]]$figures) >= 1 || sum(name %in% guides[[i]]$figures) >= 1)
guides_included <- append(guides_included, list(guides[[i]]$guide))
}
guides_included[lapply(guides_included, length) > 0]
}
# build basic geom method
build_geom_method <- function(aes, vars){
is_present <- names(aes) %in% vars
aes <- aes[is_present]
if(!length(aes)) return(NULL)
map(aes, function(m){
if(rlang::is_quosure(m))
rlang::quo_name(m)
else
m
}) %>%
unlist() %>%
.[order(names(.))] %>% # for position method: x comes before y
unname() %>%
list()
}
# add geom
add_geom_method <- function(name, aes, scales){
# build arguments based on plan
vars <- method_and_aes %>% filter(method == name) %>% pull(aes) %>% unlist()
method <- build_geom_method(aes, vars)
# add relevant arguments (from scales) to method
is_relevant_scale <- name %in% names(scales)
if(is_relevant_scale){
is_relevant_to_aes <- sum(name %in% names(aes))
if(is_relevant_to_aes > 0){
scl <- scales[is_relevant_to_aes] %>% unname() %>% .[[1]]
names(method) <- "field"
method <- append(method, scl) %>%
list()
}
}
if(!is.null(method))
names(method) <- name
method
} |
prepare_kfold <- function(K, model_defs, base_data, seed = NULL) {
model_names <- unique(model_defs$model_name)
models =
foreach(model_name = model_names) %do% {
library(rstan) #On Windows, %dopar%-ed code does not share the main session
library(here)
rstan_options(auto_write = TRUE)
stan_model(here("stan",sprintf("%s.stan", model_name)))
} %>%
setNames(model_names)
model_defs <- model_defs %>% mutate(id = 1:n())
model_defs_kfold = model_defs %>% crossing(fold = 1:K)
if(!is.null(seed)) {
set.seed(seed)
}
folds = kfold_split_random(K, base_data$N)
data_list = list()
for(i in 1:nrow(model_defs_kfold)) {
data_list[[i]] = base_data
data_list[[i]]$holdout = (folds == model_defs_kfold$fold[i])
}
if(!is.null(model_defs$adapt_delta)) {
control_list = map(model_defs_kfold$adapt_delta, function(x) { list(adapt_delta =x)})
} else {
control_list = NULL
}
list(
base_data = base_data,
data_list = data_list,
control_list = list,
model_defs = model_defs,
model_defs_kfold = model_defs_kfold,
models_list = models[as.character(model_defs_kfold$model_name)],
models_unique = models
)
}
run_kfold <- function(kfold_def, name_for_cache) {
cache_dir = here("local_temp_data",name_for_cache)
if(!dir.exists(cache_dir)) {
dir.create(cache_dir)
}
fits = sampling_multi(kfold_def$models_list,
kfold_def$data_list,
control_per_item = kfold_def$control_list,
cache_dir = cache_dir,
cores = getOption("mc.cores"))
kfold_log_lik <- kfold_def$model_defs$id %>% map(function(id) {
indices = kfold_def$model_defs_kfold$id == id
fits_for_model <- fits[indices]
holdout_for_model <- lapply(kfold_def$data_list[indices], '[[', "holdout")
extract_log_lik_K(fits_for_model, holdout_for_model, "log_lik")
})
res_names <- kfold_def$model_defs %>% unite(full_name, -id) %>% pull(full_name)
kfold_res <- kfold_log_lik %>%
map(kfold) %>%
set_names(res_names)
list(
fits = fits,
log_lik = kfold_log_lik,
kfold = kfold_res
)
}
extract_holdout_ranks <- function(list_of_stanfits, list_of_holdout, base_data) {
K <- length(list_of_stanfits)
N_samples <- 100
all_genes_holdout_gen <- array(NA_real_, c(N_samples, base_data$N, base_data$G))
for(i in 1:K) {
counts_gen <- rstan::extract(list_of_stanfits[[i]], pars = "counts_gen_geneWise")$counts_gen_geneWise
holdout <- list_of_holdout[[i]]
# I should better get samples at fixed steps, but ignoring for now
samples_to_use <- sample(1:(dim(counts_gen)[1]), N_samples)
all_genes_holdout_gen[, holdout, ] <- counts_gen[samples_to_use, holdout, ]
}
if(any(is.na(counts_gen))) {
stop("Inconsistent holdout data")
}
holdout_ranks_less <- sweep(all_genes_holdout_gen, MARGIN = c(2,3), STATS = base_data$counts, FUN = "<") %>%
apply(MARGIN = c(2,3), FUN = sum)
holdout_ranks_equal <- sweep(all_genes_holdout_gen, MARGIN = c(2,3), STATS = base_data$counts, FUN = "==") %>%
apply(MARGIN = c(2,3), FUN = sum)
#If there are equal values, sample the rank randomly over the equal values
#Using a trich with rounded uniform random numbers to get that in a vectorized way
holdout_ranks = holdout_ranks_less +
round((holdout_ranks_equal + 1) * array(runif(length(holdout_ranks_equal)), dim(holdout_ranks_equal)) - 0.5)
dimnames(holdout_ranks) <- dimnames(base_data$counts)
holdout_ranks
}
extract_holdout_ranks_all <- function(kfold_def, kfold_res) {
kfold_def$model_defs$id %>% map(function(id) {
indices = kfold_def$model_defs_kfold$id == id
fits_for_model <- kfold_res$fits[indices]
holdout_for_model <- lapply(kfold_def$data_list[indices], '[[', "holdout")
extract_holdout_ranks(fits_for_model, holdout_for_model, kfold_def$base_data) %>%
as.tibble() %>%
rownames_to_column("sample") %>%
gather("gene","rank", - sample) %>%
mutate(model = kfold_def$model_defs$model_name[kfold_def$model_defs$id == id])
}) %>% do.call(rbind, args = .)
}
plot_holdout_ranks <- function(ranks, binwidth = 1, facet = ~ model) {
if(100 %% binwidth != 0) {
stop("binwidth has to divide 100")
}
n_ranks <- aggregate(update.formula(facet, rank ~ .) , ranks, length)
if(length(unique(n_ranks$rank)) != 1) {
stop("Unequal number of observations per group")
}
n_ranks <- n_ranks$rank[1]
CI = qbinom(c(0.005,0.5,0.995), size=n_ranks,prob = binwidth / 100)
lower = CI[1]
mean = CI[2]
upper = CI[3]
ranks %>% ggplot(aes(x = rank)) +
geom_segment(aes(x=0,y=mean,xend=100,yend=mean),colour="grey25") +
geom_polygon(data=data.frame(x=c(-10,0,-10,110,100,110,-10),y=c(lower,mean,upper,upper,mean,lower,lower)),aes(x=x,y=y),fill="grey45",color="grey25",alpha=0.5) +
geom_histogram(breaks = seq(1, 101, by = binwidth), closed = "left" ,fill="#A25050",colour="black") +
facet_wrap(facet, scales = "free_y") +
ggtitle("Posterior ranks of heldout observations")
}
#Functions extract_log_lik and kfold taken from
#https://github.com/stan-dev/stancon_talks/blob/master/2017/Contributed-Talks/07_nicenboim/kfold.Rmd
extract_log_lik_K <- function(list_of_stanfits, list_of_holdout, ...){
K <- length(list_of_stanfits)
list_of_log_liks <- plyr::llply(1:K, function(k){
extract_log_lik(list_of_stanfits[[k]],...)
})
# `log_lik_heldout` will include the loglike of all the held out data of all the folds.
# We define `log_lik_heldout` as a (samples x N_obs) matrix
# (similar to each log_lik matrix)
log_lik_heldout <- list_of_log_liks[[1]] * NA
for(k in 1:K){
log_lik <- list_of_log_liks[[k]]
samples <- dim(log_lik)[1]
N_obs <- dim(log_lik)[2]
# This is a matrix with the same size as log_lik_heldout
# with 1 if the data was held out in the fold k
heldout <- matrix(rep(list_of_holdout[[k]], each = samples), nrow = samples)
# Sanity check that the previous log_lik is not being overwritten:
if(any(!is.na(log_lik_heldout[heldout==1]))){
warning("Heldout log_lik has been overwritten!!!!")
}
# We save here the log_lik of the fold k in the matrix:
log_lik_heldout[heldout==1] <- log_lik[heldout==1]
}
return(log_lik_heldout)
}
kfold <- function(log_lik_heldout) {
library(matrixStats)
logColMeansExp <- function(x) {
# should be more stable than log(colMeans(exp(x)))
S <- nrow(x)
colLogSumExps(x) - log(S)
}
# See equation (20) of @VehtariEtAl2016
pointwise <- matrix(logColMeansExp(log_lik_heldout), ncol= 1)
colnames(pointwise) <- "elpd"
# See equation (21) of @VehtariEtAl2016
elpd_kfold <- sum(pointwise)
se_elpd_kfold <- sqrt(ncol(log_lik_heldout) * var(pointwise))
estimates <- matrix(NA_real_, nrow = 1, ncol = 2)
rownames(estimates) <- "elpd_loo"
colnames(estimates) <- c("Estimate","SE")
estimates["elpd_loo", "Estimate"] <- elpd_kfold
estimates["elpd_loo", "SE"] <- se_elpd_kfold
out <- list(
pointwise = pointwise,
estimates = estimates
)
structure(out, class = "loo")
}
| /R/kfold.R | no_license | stemangiola/RNAseq-noise-model | R | false | false | 7,337 | r | prepare_kfold <- function(K, model_defs, base_data, seed = NULL) {
model_names <- unique(model_defs$model_name)
models =
foreach(model_name = model_names) %do% {
library(rstan) #On Windows, %dopar%-ed code does not share the main session
library(here)
rstan_options(auto_write = TRUE)
stan_model(here("stan",sprintf("%s.stan", model_name)))
} %>%
setNames(model_names)
model_defs <- model_defs %>% mutate(id = 1:n())
model_defs_kfold = model_defs %>% crossing(fold = 1:K)
if(!is.null(seed)) {
set.seed(seed)
}
folds = kfold_split_random(K, base_data$N)
data_list = list()
for(i in 1:nrow(model_defs_kfold)) {
data_list[[i]] = base_data
data_list[[i]]$holdout = (folds == model_defs_kfold$fold[i])
}
if(!is.null(model_defs$adapt_delta)) {
control_list = map(model_defs_kfold$adapt_delta, function(x) { list(adapt_delta =x)})
} else {
control_list = NULL
}
list(
base_data = base_data,
data_list = data_list,
control_list = list,
model_defs = model_defs,
model_defs_kfold = model_defs_kfold,
models_list = models[as.character(model_defs_kfold$model_name)],
models_unique = models
)
}
run_kfold <- function(kfold_def, name_for_cache) {
cache_dir = here("local_temp_data",name_for_cache)
if(!dir.exists(cache_dir)) {
dir.create(cache_dir)
}
fits = sampling_multi(kfold_def$models_list,
kfold_def$data_list,
control_per_item = kfold_def$control_list,
cache_dir = cache_dir,
cores = getOption("mc.cores"))
kfold_log_lik <- kfold_def$model_defs$id %>% map(function(id) {
indices = kfold_def$model_defs_kfold$id == id
fits_for_model <- fits[indices]
holdout_for_model <- lapply(kfold_def$data_list[indices], '[[', "holdout")
extract_log_lik_K(fits_for_model, holdout_for_model, "log_lik")
})
res_names <- kfold_def$model_defs %>% unite(full_name, -id) %>% pull(full_name)
kfold_res <- kfold_log_lik %>%
map(kfold) %>%
set_names(res_names)
list(
fits = fits,
log_lik = kfold_log_lik,
kfold = kfold_res
)
}
extract_holdout_ranks <- function(list_of_stanfits, list_of_holdout, base_data) {
K <- length(list_of_stanfits)
N_samples <- 100
all_genes_holdout_gen <- array(NA_real_, c(N_samples, base_data$N, base_data$G))
for(i in 1:K) {
counts_gen <- rstan::extract(list_of_stanfits[[i]], pars = "counts_gen_geneWise")$counts_gen_geneWise
holdout <- list_of_holdout[[i]]
# I should better get samples at fixed steps, but ignoring for now
samples_to_use <- sample(1:(dim(counts_gen)[1]), N_samples)
all_genes_holdout_gen[, holdout, ] <- counts_gen[samples_to_use, holdout, ]
}
if(any(is.na(counts_gen))) {
stop("Inconsistent holdout data")
}
holdout_ranks_less <- sweep(all_genes_holdout_gen, MARGIN = c(2,3), STATS = base_data$counts, FUN = "<") %>%
apply(MARGIN = c(2,3), FUN = sum)
holdout_ranks_equal <- sweep(all_genes_holdout_gen, MARGIN = c(2,3), STATS = base_data$counts, FUN = "==") %>%
apply(MARGIN = c(2,3), FUN = sum)
#If there are equal values, sample the rank randomly over the equal values
#Using a trich with rounded uniform random numbers to get that in a vectorized way
holdout_ranks = holdout_ranks_less +
round((holdout_ranks_equal + 1) * array(runif(length(holdout_ranks_equal)), dim(holdout_ranks_equal)) - 0.5)
dimnames(holdout_ranks) <- dimnames(base_data$counts)
holdout_ranks
}
extract_holdout_ranks_all <- function(kfold_def, kfold_res) {
kfold_def$model_defs$id %>% map(function(id) {
indices = kfold_def$model_defs_kfold$id == id
fits_for_model <- kfold_res$fits[indices]
holdout_for_model <- lapply(kfold_def$data_list[indices], '[[', "holdout")
extract_holdout_ranks(fits_for_model, holdout_for_model, kfold_def$base_data) %>%
as.tibble() %>%
rownames_to_column("sample") %>%
gather("gene","rank", - sample) %>%
mutate(model = kfold_def$model_defs$model_name[kfold_def$model_defs$id == id])
}) %>% do.call(rbind, args = .)
}
plot_holdout_ranks <- function(ranks, binwidth = 1, facet = ~ model) {
if(100 %% binwidth != 0) {
stop("binwidth has to divide 100")
}
n_ranks <- aggregate(update.formula(facet, rank ~ .) , ranks, length)
if(length(unique(n_ranks$rank)) != 1) {
stop("Unequal number of observations per group")
}
n_ranks <- n_ranks$rank[1]
CI = qbinom(c(0.005,0.5,0.995), size=n_ranks,prob = binwidth / 100)
lower = CI[1]
mean = CI[2]
upper = CI[3]
ranks %>% ggplot(aes(x = rank)) +
geom_segment(aes(x=0,y=mean,xend=100,yend=mean),colour="grey25") +
geom_polygon(data=data.frame(x=c(-10,0,-10,110,100,110,-10),y=c(lower,mean,upper,upper,mean,lower,lower)),aes(x=x,y=y),fill="grey45",color="grey25",alpha=0.5) +
geom_histogram(breaks = seq(1, 101, by = binwidth), closed = "left" ,fill="#A25050",colour="black") +
facet_wrap(facet, scales = "free_y") +
ggtitle("Posterior ranks of heldout observations")
}
#Functions extract_log_lik and kfold taken from
#https://github.com/stan-dev/stancon_talks/blob/master/2017/Contributed-Talks/07_nicenboim/kfold.Rmd
extract_log_lik_K <- function(list_of_stanfits, list_of_holdout, ...){
K <- length(list_of_stanfits)
list_of_log_liks <- plyr::llply(1:K, function(k){
extract_log_lik(list_of_stanfits[[k]],...)
})
# `log_lik_heldout` will include the loglike of all the held out data of all the folds.
# We define `log_lik_heldout` as a (samples x N_obs) matrix
# (similar to each log_lik matrix)
log_lik_heldout <- list_of_log_liks[[1]] * NA
for(k in 1:K){
log_lik <- list_of_log_liks[[k]]
samples <- dim(log_lik)[1]
N_obs <- dim(log_lik)[2]
# This is a matrix with the same size as log_lik_heldout
# with 1 if the data was held out in the fold k
heldout <- matrix(rep(list_of_holdout[[k]], each = samples), nrow = samples)
# Sanity check that the previous log_lik is not being overwritten:
if(any(!is.na(log_lik_heldout[heldout==1]))){
warning("Heldout log_lik has been overwritten!!!!")
}
# We save here the log_lik of the fold k in the matrix:
log_lik_heldout[heldout==1] <- log_lik[heldout==1]
}
return(log_lik_heldout)
}
kfold <- function(log_lik_heldout) {
library(matrixStats)
logColMeansExp <- function(x) {
# should be more stable than log(colMeans(exp(x)))
S <- nrow(x)
colLogSumExps(x) - log(S)
}
# See equation (20) of @VehtariEtAl2016
pointwise <- matrix(logColMeansExp(log_lik_heldout), ncol= 1)
colnames(pointwise) <- "elpd"
# See equation (21) of @VehtariEtAl2016
elpd_kfold <- sum(pointwise)
se_elpd_kfold <- sqrt(ncol(log_lik_heldout) * var(pointwise))
estimates <- matrix(NA_real_, nrow = 1, ncol = 2)
rownames(estimates) <- "elpd_loo"
colnames(estimates) <- c("Estimate","SE")
estimates["elpd_loo", "Estimate"] <- elpd_kfold
estimates["elpd_loo", "SE"] <- se_elpd_kfold
out <- list(
pointwise = pointwise,
estimates = estimates
)
structure(out, class = "loo")
}
|
VERSION <- "2.7.4"
if(!file.exists(sprintf("../windows/harfbuzz-%s/include/png.h", VERSION))){
if(getRversion() < "3.3.0") setInternet2()
download.file(sprintf("https://github.com/rwinlib/harfbuzz/archive/v%s.zip", VERSION), "lib.zip", quiet = TRUE)
dir.create("../windows", showWarnings = FALSE)
unzip("lib.zip", exdir = "../windows")
unlink("lib.zip")
}
| /tools/winlibs.R | permissive | isabella232/vdiffr | R | false | false | 366 | r | VERSION <- "2.7.4"
if(!file.exists(sprintf("../windows/harfbuzz-%s/include/png.h", VERSION))){
if(getRversion() < "3.3.0") setInternet2()
download.file(sprintf("https://github.com/rwinlib/harfbuzz/archive/v%s.zip", VERSION), "lib.zip", quiet = TRUE)
dir.create("../windows", showWarnings = FALSE)
unzip("lib.zip", exdir = "../windows")
unlink("lib.zip")
}
|
library(tidyverse)
# set wd
setwd('C:/Users/bbutler/Documents/DigitalAnalytics')
ads <- readr::read_csv('GAdsWeek1Rev2.csv')
y <- readr::read_csv('CheckingAdsClean.csv')
head(ads)
ads$AdText <- paste(ads$`Line 1`, ads$`Line 2`, sep = " ")
ads$`Ad ID` <- as.character(ads$`Ad ID`)
# write locally too
write.csv(ads, file ='CheckingAdsClean.csv', row.names = FALSE, fileEncoding = "UTF-8")
# test the COCC file
cocc <- readr::read_csv('eOpen Summary_20200601-20201015.csv')
| /Google/GoogleAdsFormatter.R | no_license | bgbutler/R_Scripts | R | false | false | 512 | r |
library(tidyverse)
# set wd
setwd('C:/Users/bbutler/Documents/DigitalAnalytics')
ads <- readr::read_csv('GAdsWeek1Rev2.csv')
y <- readr::read_csv('CheckingAdsClean.csv')
head(ads)
ads$AdText <- paste(ads$`Line 1`, ads$`Line 2`, sep = " ")
ads$`Ad ID` <- as.character(ads$`Ad ID`)
# write locally too
write.csv(ads, file ='CheckingAdsClean.csv', row.names = FALSE, fileEncoding = "UTF-8")
# test the COCC file
cocc <- readr::read_csv('eOpen Summary_20200601-20201015.csv')
|
if(!require(tidyverse)) install.packages("tidyverse", repos = "http://cran.us.r-project.org")
if(!require(caret)) install.packages("caret", repos = "http://cran.us.r-project.org")
if(!require(data.table)) install.packages("data.table", repos = "http://cran.us.r-project.org")
# MovieLens 10M dataset:
# https://grouplens.org/datasets/movielens/10m/
# http://files.grouplens.org/datasets/movielens/ml-10m.zip
dl <- tempfile()
download.file("http://files.grouplens.org/datasets/movielens/ml-10m.zip", dl)
ratings <- fread(text = gsub("::", "\t", readLines(unzip(dl, "ml-10M100K/ratings.dat"))),
col.names = c("userId", "movieId", "rating", "timestamp"))
movies <- str_split_fixed(readLines(unzip(dl, "ml-10M100K/movies.dat")), "\\::", 3)
colnames(movies) <- c("movieId", "title", "genres")
movies <- as.data.frame(movies) %>% mutate(movieId = as.numeric(levels(movieId))[movieId],
title = as.character(title),
genres = as.character(genres))
movielens <- left_join(ratings, movies, by = "movieId")
# Validation set will be 10% of MovieLens data
set.seed(1, sample.kind="Rounding")
# if using R 3.5 or earlier, use `set.seed(1)` instead
test_index <- createDataPartition(y = movielens$rating, times = 1, p = 0.1, list = FALSE)
edx <- movielens[-test_index,]
temp <- movielens[test_index,]
# Make sure userId and movieId in validation set are also in edx set
validation <- temp %>%
semi_join(edx, by = "movieId") %>%
semi_join(edx, by = "userId")
# Add rows removed from validation set back into edx set
removed <- anti_join(temp, validation)
edx <- rbind(edx, removed)
# Tidy up
rm(dl, ratings, movies, test_index, temp, movielens, removed)
# Visualise a 100 x 100 sample of users and movies
users <- sample(unique(edx$userId), 100)
edx %>% filter(userId %in% users) %>%
select(userId, movieId, rating) %>%
mutate(rating = 1) %>%
spread(movieId, rating) %>% select(sample(ncol(.), 100)) %>%
as.matrix() %>% t(.) %>%
image(1:100, 1:100,. , xlab="Movies", ylab="Users")
abline(h=0:100+0.5, v=0:100+0.5, col = "grey")
# Plot of distribution of ratings by user
edx %>%
dplyr::count(userId) %>%
ggplot(aes(n)) +
geom_histogram(bins = 30, color = "black") +
scale_x_log10() +
ggtitle("Users")
# Plot of distribution of ratings by movie
edx %>%
dplyr::count(movieId) %>%
ggplot(aes(n)) +
geom_histogram(bins = 30, color = "black") +
scale_x_log10() +
ggtitle("Movies")
# Function to calculate RMSE
RMSE <- function(true_ratings, predicted_ratings){
sqrt(mean((true_ratings - predicted_ratings)^2))
}
# Base algorithm
average_rating <- mean(edx$rating)
average_rating
base_rmse <- RMSE(validation$rating, average_rating)
base_rmse
# Movie effects algorithm
# calculate movie average difference to overall
movie_avgs <- edx %>%
group_by(movieId) %>%
summarize(b_i = mean(rating - average_rating))
#plot movie average difference to overall
movie_avgs %>% qplot(b_i, geom ="histogram", bins = 10, data = ., color = I("black"))
#use validation to make predictions and calculate RMSE
movie_effects <- average_rating + validation %>%
left_join(movie_avgs, by='movieId') %>%
.$b_i
movie_effects_rmse <- RMSE(movie_effects, validation$rating)
movie_effects_rmse
# User and movie effects algorithm
#plot user averages
edx %>%
group_by(userId) %>%
summarize(b_u = mean(rating)) %>%
filter(n()>=100) %>%
ggplot(aes(b_u)) +
geom_histogram(bins = 30, color = "black")
# Calculate user average difference to overall and movie average
user_avgs <- edx %>%
left_join(movie_avgs, by='movieId') %>%
group_by(userId) %>%
summarize(b_u = mean(rating - average_rating - b_i))
#use validation to make predictions and calculate RMSE
user_movie_effects <- validation %>%
left_join(movie_avgs, by='movieId') %>%
left_join(user_avgs, by='userId') %>%
mutate(pred = average_rating + b_i + b_u) %>%
.$pred
user_movie_effects_rmse <- RMSE(user_movie_effects, validation$rating)
user_movie_effects_rmse
#Regularized algorithm
#Splitting the edx dataset into train and test
set.seed(1, sample.kind = "Rounding")
# if using R 3.5 or earlier, use `set.seed(1)` instead
test_index <- createDataPartition(y = edx$rating, times = 1, p = 0.2, list = FALSE)
train <- edx[-test_index,]
temp <- edx[test_index,]
# Make sure userId and movieId in test set are also in train set
test <- temp %>%
semi_join(train, by = "movieId") %>%
semi_join(train, by = "userId")
# Add rows removed from test set back into train set
removed <- anti_join(temp, test)
train <- rbind(train, removed)
# Tidy up
rm(test_index, temp, removed)
#Choose lambda
lambdas <- seq(2, 6, 0.5)
train_rmses <- sapply(lambdas, function(l){
mu <- mean(train$rating)
b_i <- train %>%
group_by(movieId) %>%
summarize(b_i = sum(rating - mu)/(n()+l))
b_u <- train %>%
left_join(b_i, by="movieId") %>%
group_by(userId) %>%
summarize(b_u = sum(rating - b_i - mu)/(n()+l))
predicted_ratings <-
test %>%
left_join(b_i, by = "movieId") %>%
left_join(b_u, by = "userId") %>%
mutate(pred = mu + b_i + b_u) %>%
pull(pred)
return(RMSE(predicted_ratings, test$rating))
})
qplot(lambdas, train_rmses)
lambda <- lambdas[which.min(train_rmses)]
# Use chosen lambda to create regularised user and movie effects model
b_i <- edx %>%
group_by(movieId) %>%
summarize(b_i = sum(rating - average_rating)/(n()+lambda))
b_u <- edx %>%
left_join(b_i, by="movieId") %>%
group_by(userId) %>%
summarize(b_u = sum(rating - b_i - average_rating)/(n()+lambda))
#use validation to make predictions and calculate RMSE
regularised <-
validation %>%
left_join(b_i, by = "movieId") %>%
left_join(b_u, by = "userId") %>%
mutate(pred = average_rating + b_i + b_u) %>%
.$pred
regularised_rmse <- RMSE(regularised, validation$rating)
regularised_rmse | /MLProject.R | no_license | mandysimpson/MovieLens | R | false | false | 5,967 | r | if(!require(tidyverse)) install.packages("tidyverse", repos = "http://cran.us.r-project.org")
if(!require(caret)) install.packages("caret", repos = "http://cran.us.r-project.org")
if(!require(data.table)) install.packages("data.table", repos = "http://cran.us.r-project.org")
# MovieLens 10M dataset:
# https://grouplens.org/datasets/movielens/10m/
# http://files.grouplens.org/datasets/movielens/ml-10m.zip
dl <- tempfile()
download.file("http://files.grouplens.org/datasets/movielens/ml-10m.zip", dl)
ratings <- fread(text = gsub("::", "\t", readLines(unzip(dl, "ml-10M100K/ratings.dat"))),
col.names = c("userId", "movieId", "rating", "timestamp"))
movies <- str_split_fixed(readLines(unzip(dl, "ml-10M100K/movies.dat")), "\\::", 3)
colnames(movies) <- c("movieId", "title", "genres")
movies <- as.data.frame(movies) %>% mutate(movieId = as.numeric(levels(movieId))[movieId],
title = as.character(title),
genres = as.character(genres))
movielens <- left_join(ratings, movies, by = "movieId")
# Validation set will be 10% of MovieLens data
set.seed(1, sample.kind="Rounding")
# if using R 3.5 or earlier, use `set.seed(1)` instead
test_index <- createDataPartition(y = movielens$rating, times = 1, p = 0.1, list = FALSE)
edx <- movielens[-test_index,]
temp <- movielens[test_index,]
# Make sure userId and movieId in validation set are also in edx set
validation <- temp %>%
semi_join(edx, by = "movieId") %>%
semi_join(edx, by = "userId")
# Add rows removed from validation set back into edx set
removed <- anti_join(temp, validation)
edx <- rbind(edx, removed)
# Tidy up
rm(dl, ratings, movies, test_index, temp, movielens, removed)
# Visualise a 100 x 100 sample of users and movies
users <- sample(unique(edx$userId), 100)
edx %>% filter(userId %in% users) %>%
select(userId, movieId, rating) %>%
mutate(rating = 1) %>%
spread(movieId, rating) %>% select(sample(ncol(.), 100)) %>%
as.matrix() %>% t(.) %>%
image(1:100, 1:100,. , xlab="Movies", ylab="Users")
abline(h=0:100+0.5, v=0:100+0.5, col = "grey")
# Plot of distribution of ratings by user
edx %>%
dplyr::count(userId) %>%
ggplot(aes(n)) +
geom_histogram(bins = 30, color = "black") +
scale_x_log10() +
ggtitle("Users")
# Plot of distribution of ratings by movie
edx %>%
dplyr::count(movieId) %>%
ggplot(aes(n)) +
geom_histogram(bins = 30, color = "black") +
scale_x_log10() +
ggtitle("Movies")
# Function to calculate RMSE
RMSE <- function(true_ratings, predicted_ratings){
sqrt(mean((true_ratings - predicted_ratings)^2))
}
# Base algorithm
average_rating <- mean(edx$rating)
average_rating
base_rmse <- RMSE(validation$rating, average_rating)
base_rmse
# Movie effects algorithm
# calculate movie average difference to overall
movie_avgs <- edx %>%
group_by(movieId) %>%
summarize(b_i = mean(rating - average_rating))
#plot movie average difference to overall
movie_avgs %>% qplot(b_i, geom ="histogram", bins = 10, data = ., color = I("black"))
#use validation to make predictions and calculate RMSE
movie_effects <- average_rating + validation %>%
left_join(movie_avgs, by='movieId') %>%
.$b_i
movie_effects_rmse <- RMSE(movie_effects, validation$rating)
movie_effects_rmse
# User and movie effects algorithm
#plot user averages
edx %>%
group_by(userId) %>%
summarize(b_u = mean(rating)) %>%
filter(n()>=100) %>%
ggplot(aes(b_u)) +
geom_histogram(bins = 30, color = "black")
# Calculate user average difference to overall and movie average
user_avgs <- edx %>%
left_join(movie_avgs, by='movieId') %>%
group_by(userId) %>%
summarize(b_u = mean(rating - average_rating - b_i))
#use validation to make predictions and calculate RMSE
user_movie_effects <- validation %>%
left_join(movie_avgs, by='movieId') %>%
left_join(user_avgs, by='userId') %>%
mutate(pred = average_rating + b_i + b_u) %>%
.$pred
user_movie_effects_rmse <- RMSE(user_movie_effects, validation$rating)
user_movie_effects_rmse
#Regularized algorithm
#Splitting the edx dataset into train and test
set.seed(1, sample.kind = "Rounding")
# if using R 3.5 or earlier, use `set.seed(1)` instead
test_index <- createDataPartition(y = edx$rating, times = 1, p = 0.2, list = FALSE)
train <- edx[-test_index,]
temp <- edx[test_index,]
# Make sure userId and movieId in test set are also in train set
test <- temp %>%
semi_join(train, by = "movieId") %>%
semi_join(train, by = "userId")
# Add rows removed from test set back into train set
removed <- anti_join(temp, test)
train <- rbind(train, removed)
# Tidy up
rm(test_index, temp, removed)
#Choose lambda
lambdas <- seq(2, 6, 0.5)
train_rmses <- sapply(lambdas, function(l){
mu <- mean(train$rating)
b_i <- train %>%
group_by(movieId) %>%
summarize(b_i = sum(rating - mu)/(n()+l))
b_u <- train %>%
left_join(b_i, by="movieId") %>%
group_by(userId) %>%
summarize(b_u = sum(rating - b_i - mu)/(n()+l))
predicted_ratings <-
test %>%
left_join(b_i, by = "movieId") %>%
left_join(b_u, by = "userId") %>%
mutate(pred = mu + b_i + b_u) %>%
pull(pred)
return(RMSE(predicted_ratings, test$rating))
})
qplot(lambdas, train_rmses)
lambda <- lambdas[which.min(train_rmses)]
# Use chosen lambda to create regularised user and movie effects model
b_i <- edx %>%
group_by(movieId) %>%
summarize(b_i = sum(rating - average_rating)/(n()+lambda))
b_u <- edx %>%
left_join(b_i, by="movieId") %>%
group_by(userId) %>%
summarize(b_u = sum(rating - b_i - average_rating)/(n()+lambda))
#use validation to make predictions and calculate RMSE
regularised <-
validation %>%
left_join(b_i, by = "movieId") %>%
left_join(b_u, by = "userId") %>%
mutate(pred = average_rating + b_i + b_u) %>%
.$pred
regularised_rmse <- RMSE(regularised, validation$rating)
regularised_rmse |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/gui_vms_db_select.R
\name{gui_vms_db_sel}
\alias{gui_vms_db_sel}
\title{VMS DataBase Select GUI}
\usage{
gui_vms_db_sel(vms_db_name = "")
}
\arguments{
\item{vms_db_name}{The path of a VMS DataBase}
}
\value{
This function does not return a value.
}
\description{
The \code{gui_vms_db_sel} function implement the graphical user interface for the
VMS DataBase Select routine.
}
\details{
This function, with a VMS DataBase (see \code{\link{gui_vms_db_stat}}),
enables the user to perform queries on, and extract data from, the submitted VMS DataBase.
}
\seealso{
\code{\link{gui_vms_db_stat}}
}
| /man/gui_vms_db_sel.Rd | no_license | mariasotoruiz/vmsbase | R | false | true | 674 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/gui_vms_db_select.R
\name{gui_vms_db_sel}
\alias{gui_vms_db_sel}
\title{VMS DataBase Select GUI}
\usage{
gui_vms_db_sel(vms_db_name = "")
}
\arguments{
\item{vms_db_name}{The path of a VMS DataBase}
}
\value{
This function does not return a value.
}
\description{
The \code{gui_vms_db_sel} function implement the graphical user interface for the
VMS DataBase Select routine.
}
\details{
This function, with a VMS DataBase (see \code{\link{gui_vms_db_stat}}),
enables the user to perform queries on, and extract data from, the submitted VMS DataBase.
}
\seealso{
\code{\link{gui_vms_db_stat}}
}
|
library(Rllvm)
m = parseIR("dnormLoop.ir")
if(FALSE) {
# Run but it doesn't copy x.
x0 = x = seq(-1.5, 1.5, by = .1)
z = .llvm(m$v_dnorm, x, length(x), 0, 1)
# will still write directly into x0 and x which are the same.
z = .llvm(m$v_dnorm, x, length(x), 0, 1, .duplicate = c(TRUE, FALSE, FALSE, FALSE))
}
# Build a proxy function that we can invoke via .Call().
# It can replace the coercion that .llvm() needs to do
#
p = getParameters(m$v_dnorm)
rr = simpleFunction( "R_v_dnorm", SEXPType, .types = replicate(length(p), SEXPType), module = m)
pr = getParameters(rr$fun)
ir = rr$ir
# Declare R API routines
asInteger = Function("Rf_asInteger", Int32Type, list(SEXPType), module = m)
asReal = Function("Rf_asReal", DoubleType, list(SEXPType), module = m)
coerceVector = Function("Rf_coerceVector", SEXPType, list(SEXPType, Int32Type), module = m)
REAL = Function("REAL", pointerType(DoubleType), list(SEXPType), module = m)
duplicate = Function("Rf_duplicate", SEXPType, list(SEXPType), module = m)
printValue = Function("Rf_PrintValue", VoidType, list(SEXPType), module = m)
protect = Function("Rf_protect", SEXPType, list(SEXPType), module = m)
unprotect = Function("Rf_unprotect", VoidType, list(Int32Type), module = m)
typeof = Function("TYPEOF", Int32Type, list(SEXPType), module = m)
#allocVector = Function("Rf_allocVector", SEXPType, list(Int32Type, Int32Type), module = m)
#allocVector3 = Function("Rf_allocVector3", SEXPType, list(Int32Type, Int32Type, pointerType(VoidType)), module = m)
#ScalarInteger = Function("Rf_ScalarInteger", SEXPType, list(Int32Type), module = m)
if(TRUE) {
type = ir$createCall(typeof, pr[[1]])
typeCond = ir$createICmp(ICMP_EQ, type, ir$createConstant(14L))
# Does the Select evaluate both sides of the ternary regardless? Appears to.
#
select = FALSE
if(select)
phi = xc = ir$createSelect(typeCond, ir$createCall(duplicate, pr[[1]]), ir$createCall(coerceVector, pr[[1]], 14L))
else {
b2 = Block(rr$fun, "dup")
b3 = Block(rr$fun, "coerce")
b4 = Block(rr$fun, "do")
#print(getBlocks(rr$fun))
ir$createCondBranch(typeCond, b2, b3)
ir$setInsertBlock(b2)
dup = ir$createCall(duplicate, pr[[1]])
ir$createBranch(b4)
ir$setInsertBlock(b3)
#XXX ir$createCall(printValue, pr[[1]])
coerce = ir$createCall(coerceVector, pr[[1]], 14L)
ir$createBranch(b4)
ir$setInsertBlock(b4)
# Not needed in this case.
# ir$createCall(protect, xc)
phi = ir$createPHI( SEXPType, 2L )
addIncoming(phi, dup, b2)
addIncoming(phi, coerce, b3)
}
x2 = ir$createCall(REAL, phi)
len = ir$createCall(asInteger, pr[[2]])
mu = ir$createCall(asReal, pr[[3]])
sd = ir$createCall(asReal, pr[[4]])
ir$createCall(m$v_dnorm, x2, len, mu, sd)
# ir$createCall(unprotect, 1L)
ir$createReturn(phi)
#print(getBlocks(rr$fun))
} else {
# ir$createCall(printValue, pr[[1]])
# ir$createCall(printValue, pr[[2]])
# ir$createCall(printValue, pr[[3]])
# ir$createCall(printValue, pr[[4]])
# ir$createReturn(ir$createCall(ScalarInteger, 11))
ir$createReturn(pr[[1]])
}
#XXX Do we actually need these.
# register the symbols for the R API routines we used.
# More than not needing them, they cause problems.
# DON'T DO THIS
if(FALSE) {
rtns = c("REAL", "Rf_duplicate", "Rf_asReal", "Rf_asInteger", "Rf_coerceVector", "Rf_allocVector", "Rf_allocVector3",
"Rf_unprotect", "Rf_protect",
"Rf_PrintValue")
rptrs = lapply(rtns, function(sym) .Call("R_llvm_dlsym", sym, NULL))
names(rptrs) = rtns
llvmAddSymbol(.syms = rptrs)
}
ee = ExecutionEngine(m)
x = seq(-1.5, 1.5, by = .1)
#print(getBlocks(rr$fun)) # R_v_dnorm
if(FALSE) {
# Use the Rffi invocation
cif = Rffi::CIF(SEXPType, replicate(4, SEXPType))
y = .llvm( rr$fun, x, length(x), 0, 1, .ffi = cif)
}
fptr = getPointerToFunction(rr$fun, ee)@ref
sym = list(name = "R_v_dnorm", address = structure(fptr, class = "NativeSymbol"), dll = NULL)
y = .Call(sym, x, length(x), 0.1, 1.2)
z = c(-1L, 0L, 1L)
z2 = .Call(sym, z, length(z), 0, 1)
fun = function(x, mu, sd) { tmp = (x-mu)/sd; 1/(sd*2.506628)*exp(-.5*tmp*tmp)}
stopifnot(identical(y, fun(x, .1, 1.2)))
stopifnot(identical(z2, fun(z, 0, 1)))
truth = c(0.136675077204378, 0.152207587833673, 0.16833225640452, 0.184876815843572,
0.201642292524936, 0.218406151494248, 0.234926588580926, 0.250947887623335,
0.266206700434743, 0.280439049679568, 0.293387804447713, 0.304810338740607,
0.31448605753319, 0.322223465972462, 0.327866466006629, 0.331299591513103,
0.332451936758599, 0.331299591513103, 0.327866466006629, 0.322223465972462,
0.31448605753319, 0.304810338740607, 0.293387804447713, 0.280439049679568,
0.266206700434743, 0.250947887623335, 0.234926588580926, 0.218406151494248,
0.201642292524936, 0.184876815843572, 0.16833225640452)
# stopifnot(identical(y, truth))
| /explorations/dnormLoop.R | no_license | duncantl/Rllvm | R | false | false | 5,064 | r | library(Rllvm)
m = parseIR("dnormLoop.ir")
if(FALSE) {
# Run but it doesn't copy x.
x0 = x = seq(-1.5, 1.5, by = .1)
z = .llvm(m$v_dnorm, x, length(x), 0, 1)
# will still write directly into x0 and x which are the same.
z = .llvm(m$v_dnorm, x, length(x), 0, 1, .duplicate = c(TRUE, FALSE, FALSE, FALSE))
}
# Build a proxy function that we can invoke via .Call().
# It can replace the coercion that .llvm() needs to do
#
p = getParameters(m$v_dnorm)
rr = simpleFunction( "R_v_dnorm", SEXPType, .types = replicate(length(p), SEXPType), module = m)
pr = getParameters(rr$fun)
ir = rr$ir
# Declare R API routines
asInteger = Function("Rf_asInteger", Int32Type, list(SEXPType), module = m)
asReal = Function("Rf_asReal", DoubleType, list(SEXPType), module = m)
coerceVector = Function("Rf_coerceVector", SEXPType, list(SEXPType, Int32Type), module = m)
REAL = Function("REAL", pointerType(DoubleType), list(SEXPType), module = m)
duplicate = Function("Rf_duplicate", SEXPType, list(SEXPType), module = m)
printValue = Function("Rf_PrintValue", VoidType, list(SEXPType), module = m)
protect = Function("Rf_protect", SEXPType, list(SEXPType), module = m)
unprotect = Function("Rf_unprotect", VoidType, list(Int32Type), module = m)
typeof = Function("TYPEOF", Int32Type, list(SEXPType), module = m)
#allocVector = Function("Rf_allocVector", SEXPType, list(Int32Type, Int32Type), module = m)
#allocVector3 = Function("Rf_allocVector3", SEXPType, list(Int32Type, Int32Type, pointerType(VoidType)), module = m)
#ScalarInteger = Function("Rf_ScalarInteger", SEXPType, list(Int32Type), module = m)
if(TRUE) {
type = ir$createCall(typeof, pr[[1]])
typeCond = ir$createICmp(ICMP_EQ, type, ir$createConstant(14L))
# Does the Select evaluate both sides of the ternary regardless? Appears to.
#
select = FALSE
if(select)
phi = xc = ir$createSelect(typeCond, ir$createCall(duplicate, pr[[1]]), ir$createCall(coerceVector, pr[[1]], 14L))
else {
b2 = Block(rr$fun, "dup")
b3 = Block(rr$fun, "coerce")
b4 = Block(rr$fun, "do")
#print(getBlocks(rr$fun))
ir$createCondBranch(typeCond, b2, b3)
ir$setInsertBlock(b2)
dup = ir$createCall(duplicate, pr[[1]])
ir$createBranch(b4)
ir$setInsertBlock(b3)
#XXX ir$createCall(printValue, pr[[1]])
coerce = ir$createCall(coerceVector, pr[[1]], 14L)
ir$createBranch(b4)
ir$setInsertBlock(b4)
# Not needed in this case.
# ir$createCall(protect, xc)
phi = ir$createPHI( SEXPType, 2L )
addIncoming(phi, dup, b2)
addIncoming(phi, coerce, b3)
}
x2 = ir$createCall(REAL, phi)
len = ir$createCall(asInteger, pr[[2]])
mu = ir$createCall(asReal, pr[[3]])
sd = ir$createCall(asReal, pr[[4]])
ir$createCall(m$v_dnorm, x2, len, mu, sd)
# ir$createCall(unprotect, 1L)
ir$createReturn(phi)
#print(getBlocks(rr$fun))
} else {
# ir$createCall(printValue, pr[[1]])
# ir$createCall(printValue, pr[[2]])
# ir$createCall(printValue, pr[[3]])
# ir$createCall(printValue, pr[[4]])
# ir$createReturn(ir$createCall(ScalarInteger, 11))
ir$createReturn(pr[[1]])
}
#XXX Do we actually need these.
# register the symbols for the R API routines we used.
# More than not needing them, they cause problems.
# DON'T DO THIS
if(FALSE) {
rtns = c("REAL", "Rf_duplicate", "Rf_asReal", "Rf_asInteger", "Rf_coerceVector", "Rf_allocVector", "Rf_allocVector3",
"Rf_unprotect", "Rf_protect",
"Rf_PrintValue")
rptrs = lapply(rtns, function(sym) .Call("R_llvm_dlsym", sym, NULL))
names(rptrs) = rtns
llvmAddSymbol(.syms = rptrs)
}
ee = ExecutionEngine(m)
x = seq(-1.5, 1.5, by = .1)
#print(getBlocks(rr$fun)) # R_v_dnorm
if(FALSE) {
# Use the Rffi invocation
cif = Rffi::CIF(SEXPType, replicate(4, SEXPType))
y = .llvm( rr$fun, x, length(x), 0, 1, .ffi = cif)
}
fptr = getPointerToFunction(rr$fun, ee)@ref
sym = list(name = "R_v_dnorm", address = structure(fptr, class = "NativeSymbol"), dll = NULL)
y = .Call(sym, x, length(x), 0.1, 1.2)
z = c(-1L, 0L, 1L)
z2 = .Call(sym, z, length(z), 0, 1)
fun = function(x, mu, sd) { tmp = (x-mu)/sd; 1/(sd*2.506628)*exp(-.5*tmp*tmp)}
stopifnot(identical(y, fun(x, .1, 1.2)))
stopifnot(identical(z2, fun(z, 0, 1)))
truth = c(0.136675077204378, 0.152207587833673, 0.16833225640452, 0.184876815843572,
0.201642292524936, 0.218406151494248, 0.234926588580926, 0.250947887623335,
0.266206700434743, 0.280439049679568, 0.293387804447713, 0.304810338740607,
0.31448605753319, 0.322223465972462, 0.327866466006629, 0.331299591513103,
0.332451936758599, 0.331299591513103, 0.327866466006629, 0.322223465972462,
0.31448605753319, 0.304810338740607, 0.293387804447713, 0.280439049679568,
0.266206700434743, 0.250947887623335, 0.234926588580926, 0.218406151494248,
0.201642292524936, 0.184876815843572, 0.16833225640452)
# stopifnot(identical(y, truth))
|
png("plot1.png")
hist(
x = elec$Global_active_power,
breaks = 12,
col = "red",
main = "Global Active Power",
xlab = "Global Active Power (kilowatts)",
ylab ="Frequency"
)
dev.off()
| /plot1.R | no_license | dpjmullins/Coursera_Exploratory_Data_Analysis1 | R | false | false | 222 | r | png("plot1.png")
hist(
x = elec$Global_active_power,
breaks = 12,
col = "red",
main = "Global Active Power",
xlab = "Global Active Power (kilowatts)",
ylab ="Frequency"
)
dev.off()
|
# NRAIA-badCl.R
rm(list=ls())
library(NRAIA)
library(nlsr)
Nform <- conc ~ Asym *( 1 - exp(-exp(lrc) * (Time - c0)))
Nstart = c(Asym = 1, c0=1, lrc = log(0.25))
badCl <- model2rjfun(modelformula=Nform, pvec=Nstart, data=Chloride)
badCl(Nstart)
N2 <- c(Asym = 1, c0=1, lrc = 5)
badCl(N2)
| /archived/NRAIA-badCl.R | no_license | ArkaB-DS/GSOC21-improveNLS | R | false | false | 287 | r | # NRAIA-badCl.R
rm(list=ls())
library(NRAIA)
library(nlsr)
Nform <- conc ~ Asym *( 1 - exp(-exp(lrc) * (Time - c0)))
Nstart = c(Asym = 1, c0=1, lrc = log(0.25))
badCl <- model2rjfun(modelformula=Nform, pvec=Nstart, data=Chloride)
badCl(Nstart)
N2 <- c(Asym = 1, c0=1, lrc = 5)
badCl(N2)
|
evaluator$set(
"private", "output_handler",
function() {
ggplot2::set_last_plot(NULL)
private$last_plot <- NULL
private$last_warning <- NULL
private$last_error <- NULL
evaluate::new_output_handler(
graphics = function(x) {
last_plot <- ggplot2::last_plot()
if (is.null(last_plot)) {
private$last_plot <- x
} else {
private$last_plot <- last_plot
ggplot2::set_last_plot(NULL)
}
return(NULL)
},
warning = function(x) {
private$last_warning <- x
},
error = function(x) {
private$last_error <- x
}
)
}
)
| /R/evaluator_output_hanlder.R | permissive | GregorDeCillia/kasper | R | false | false | 652 | r | evaluator$set(
"private", "output_handler",
function() {
ggplot2::set_last_plot(NULL)
private$last_plot <- NULL
private$last_warning <- NULL
private$last_error <- NULL
evaluate::new_output_handler(
graphics = function(x) {
last_plot <- ggplot2::last_plot()
if (is.null(last_plot)) {
private$last_plot <- x
} else {
private$last_plot <- last_plot
ggplot2::set_last_plot(NULL)
}
return(NULL)
},
warning = function(x) {
private$last_warning <- x
},
error = function(x) {
private$last_error <- x
}
)
}
)
|
\name{ex01.13}
\alias{ex01.13}
\docType{data}
\title{R Data set: ex01.13}
\description{
The \code{ex01.13} data frame has 153 rows and 1 column.
}
\usage{data(ex01.13)}
\format{
A data frame with 153 observations on the following variable.
\describe{
\item{\code{strength}}{a numeric vector}
}
}
\details{
Consult the web site \url{http://www.thomsonedu.com/statistics/devore} for additional online resources that are available for this book.
}
\source{
Devore, J. L. (2008) \emph{Probability and Statistics for Engineering and the Sciences (7th Edition)}, ISBN-10: 0495382175 ISBN-13: 9780495382171
}
\examples{
data(ex01.13)
str(ex01.13)
}
\keyword{datasets}
| /man/ex01.13.Rd | no_license | cran/Devore7 | R | false | false | 674 | rd | \name{ex01.13}
\alias{ex01.13}
\docType{data}
\title{R Data set: ex01.13}
\description{
The \code{ex01.13} data frame has 153 rows and 1 column.
}
\usage{data(ex01.13)}
\format{
A data frame with 153 observations on the following variable.
\describe{
\item{\code{strength}}{a numeric vector}
}
}
\details{
Consult the web site \url{http://www.thomsonedu.com/statistics/devore} for additional online resources that are available for this book.
}
\source{
Devore, J. L. (2008) \emph{Probability and Statistics for Engineering and the Sciences (7th Edition)}, ISBN-10: 0495382175 ISBN-13: 9780495382171
}
\examples{
data(ex01.13)
str(ex01.13)
}
\keyword{datasets}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plot_daily_stats.R
\name{plot_daily_stats}
\alias{plot_daily_stats}
\title{Plot daily summary statistics}
\usage{
plot_daily_stats(
data,
dates = Date,
values = Value,
groups = STATION_NUMBER,
station_number,
roll_days = 1,
roll_align = "right",
water_year_start = 1,
start_year,
end_year,
exclude_years,
complete_years = FALSE,
months = 1:12,
ignore_missing = FALSE,
plot_extremes = TRUE,
plot_inner_percentiles = TRUE,
plot_outer_percentiles = TRUE,
inner_percentiles = c(25, 75),
outer_percentiles = c(5, 95),
add_year,
log_discharge = TRUE,
log_ticks = ifelse(log_discharge, TRUE, FALSE),
include_title = FALSE
)
}
\arguments{
\item{data}{Data frame of daily data that contains columns of dates, flow values, and (optional) groups (e.g. station numbers).
Leave blank or set to \code{NULL} if using \code{station_number} argument.}
\item{dates}{Name of column in \code{data} that contains dates formatted YYYY-MM-DD. Only required if dates column name is not
'Date' (default). Leave blank or set to \code{NULL} if using \code{station_number} argument.}
\item{values}{Name of column in \code{data} that contains numeric flow values, in units of cubic metres per second.
Only required if values column name is not 'Value' (default). Leave blank if using \code{station_number} argument.}
\item{groups}{Name of column in \code{data} that contains unique identifiers for different data sets, if applicable. Only required
if groups column name is not 'STATION_NUMBER'. Function will automatically group by a column named 'STATION_NUMBER' if
present. Remove the 'STATION_NUMBER' column beforehand to remove this grouping. Leave blank if using \code{station_number}
argument.}
\item{station_number}{Character string vector of seven digit Water Survey of Canada station numbers (e.g. \code{"08NM116"}) of
which to extract daily streamflow data from a HYDAT database. Requires \code{tidyhydat} package and a HYDAT database.
Leave blank if using \code{data} argument.}
\item{roll_days}{Numeric value of the number of days to apply a rolling mean. Default \code{1}.}
\item{roll_align}{Character string identifying the direction of the rolling mean from the specified date, either by the first
(\code{'left'}), last (\code{'right'}), or middle (\code{'center'}) day of the rolling n-day group of observations.
Default \code{'right'}.}
\item{water_year_start}{Numeric value indicating the month (\code{1} through \code{12}) of the start of water year for
analysis. Default \code{1}.}
\item{start_year}{Numeric value of the first year to consider for analysis. Leave blank or set well before start date (i.e.
\code{1800}) to use from the first year of the source data.}
\item{end_year}{Numeric value of the last year to consider for analysis. Leave blank or set well after end date (i.e.
\code{2100}) to use up to the last year of the source data.}
\item{exclude_years}{Numeric vector of years to exclude from analysis. Leave blank or set to \code{NULL} to include all years.}
\item{complete_years}{Logical values indicating whether to include only years with complete data in analysis. Default \code{FALSE}.}
\item{months}{Numeric vector of months to include in analysis. For example, \code{3} for March, \code{6:8} for Jun-Aug or
\code{c(10:12,1)} for first four months (Oct-Jan) when \code{water_year_start = 10} (Oct). Default summarizes all
months (\code{1:12}).}
\item{ignore_missing}{Logical value indicating whether dates with missing values should be included in the calculation. If
\code{TRUE} then a statistic will be calculated regardless of missing dates. If \code{FALSE} then only those statistics from
time periods with no missing dates will be returned. Default \code{FALSE}.}
\item{plot_extremes}{Logical value to indicate plotting a ribbon with the range of daily minimum and maximum flows.
Default \code{TRUE}.}
\item{plot_inner_percentiles}{Logical value indicating whether to plot the inner percentiles ribbon. Default \code{TRUE}.}
\item{plot_outer_percentiles}{Logical value indicating whether to plot the outer percentiles ribbon. Default \code{TRUE}.}
\item{inner_percentiles}{Numeric vector of two percentile values indicating the lower and upper limits of the
inner percentiles ribbon for plotting. Default \code{c(25,75)}, set to \code{NULL} for no inner ribbon.}
\item{outer_percentiles}{Numeric vector of two percentile values indicating the lower and upper limits of the
outer percentiles ribbon for plotting. Default \code{c(5,95)}, set to \code{NULL} for no outer ribbon.}
\item{add_year}{Numeric value indicating a year of daily flows to add to the daily statistics plot. Leave blank
or set to \code{NULL} for no years.}
\item{log_discharge}{Logical value to indicate plotting the discharge axis (Y-axis) on a logarithmic scale. Default \code{FALSE}.}
\item{log_ticks}{Logical value to indicate plotting logarithmic scale ticks when \code{log_discharge = TRUE}. Ticks will not
appear when \code{log_discharge = FALSE}. Default to \code{TRUE} when \code{log_discharge = TRUE}.}
\item{include_title}{Logical value to indicate adding the group/station number to the plot, if provided. Default \code{FALSE}.}
}
\value{
A list of ggplot2 objects with the following for each station provided:
\item{Daily_Stats}{a plot that contains daily flow statistics}
Default plots on each object:
\item{Mean}{daily mean}
\item{Median}{daily median}
\item{25-75 Percentiles}{a ribbon showing the range of data between the daily 25th and 75th percentiles}
\item{5-95 Percentiles}{a ribbon showing the range of data between the daily 5th and 95th percentiles}
\item{Minimum-Maximum}{a ribbon showing the range of data between the daily minimum and maximums}
\item{'Year'}{(on annual plots) the daily flows for the designated year}
}
\description{
Plots means, medians, maximums, minimums, and percentiles for each day of the year of flow values
from a daily streamflow data set. Can determine statistics of rolling mean days (e.g. 7-day flows) using the \code{roll_days}
argument. Calculates statistics from all values, unless specified. The Maximum-Minimum band can be removed using the
\code{plot_extremes} argument and the percentile bands can be customized using the \code{inner_percentiles} and
\code{outer_percentiles} arguments. Data calculated using \code{calc_daily_stats()} function. Returns a list of plots.
}
\examples{
# Run if HYDAT database has been downloaded (using tidyhydat::download_hydat())
if (file.exists(tidyhydat::hy_downloaded_db())) {
# Plot daily statistics using a data frame and data argument with defaults
flow_data <- tidyhydat::hy_daily_flows(station_number = "08NM116")
plot_daily_stats(data = flow_data,
start_year = 1980)
# Plot daily statistics using only years with no missing data
plot_daily_stats(station_number = "08NM116",
complete_years = TRUE)
# Plot daily statistics and add a specific year's daily flows
plot_daily_stats(station_number = "08NM116",
start_year = 1980,
add_year = 1985)
# Plot daily statistics for 7-day flows for July-September months only
plot_daily_stats(station_number = "08NM116",
start_year = 1980,
roll_days = 7,
months = 7:9)
}
}
\seealso{
\code{\link{calc_daily_stats}}
}
| /man/plot_daily_stats.Rd | no_license | cran/fasstr | R | false | true | 7,694 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plot_daily_stats.R
\name{plot_daily_stats}
\alias{plot_daily_stats}
\title{Plot daily summary statistics}
\usage{
plot_daily_stats(
data,
dates = Date,
values = Value,
groups = STATION_NUMBER,
station_number,
roll_days = 1,
roll_align = "right",
water_year_start = 1,
start_year,
end_year,
exclude_years,
complete_years = FALSE,
months = 1:12,
ignore_missing = FALSE,
plot_extremes = TRUE,
plot_inner_percentiles = TRUE,
plot_outer_percentiles = TRUE,
inner_percentiles = c(25, 75),
outer_percentiles = c(5, 95),
add_year,
log_discharge = TRUE,
log_ticks = ifelse(log_discharge, TRUE, FALSE),
include_title = FALSE
)
}
\arguments{
\item{data}{Data frame of daily data that contains columns of dates, flow values, and (optional) groups (e.g. station numbers).
Leave blank or set to \code{NULL} if using \code{station_number} argument.}
\item{dates}{Name of column in \code{data} that contains dates formatted YYYY-MM-DD. Only required if dates column name is not
'Date' (default). Leave blank or set to \code{NULL} if using \code{station_number} argument.}
\item{values}{Name of column in \code{data} that contains numeric flow values, in units of cubic metres per second.
Only required if values column name is not 'Value' (default). Leave blank if using \code{station_number} argument.}
\item{groups}{Name of column in \code{data} that contains unique identifiers for different data sets, if applicable. Only required
if groups column name is not 'STATION_NUMBER'. Function will automatically group by a column named 'STATION_NUMBER' if
present. Remove the 'STATION_NUMBER' column beforehand to remove this grouping. Leave blank if using \code{station_number}
argument.}
\item{station_number}{Character string vector of seven digit Water Survey of Canada station numbers (e.g. \code{"08NM116"}) of
which to extract daily streamflow data from a HYDAT database. Requires \code{tidyhydat} package and a HYDAT database.
Leave blank if using \code{data} argument.}
\item{roll_days}{Numeric value of the number of days to apply a rolling mean. Default \code{1}.}
\item{roll_align}{Character string identifying the direction of the rolling mean from the specified date, either by the first
(\code{'left'}), last (\code{'right'}), or middle (\code{'center'}) day of the rolling n-day group of observations.
Default \code{'right'}.}
\item{water_year_start}{Numeric value indicating the month (\code{1} through \code{12}) of the start of water year for
analysis. Default \code{1}.}
\item{start_year}{Numeric value of the first year to consider for analysis. Leave blank or set well before start date (i.e.
\code{1800}) to use from the first year of the source data.}
\item{end_year}{Numeric value of the last year to consider for analysis. Leave blank or set well after end date (i.e.
\code{2100}) to use up to the last year of the source data.}
\item{exclude_years}{Numeric vector of years to exclude from analysis. Leave blank or set to \code{NULL} to include all years.}
\item{complete_years}{Logical values indicating whether to include only years with complete data in analysis. Default \code{FALSE}.}
\item{months}{Numeric vector of months to include in analysis. For example, \code{3} for March, \code{6:8} for Jun-Aug or
\code{c(10:12,1)} for first four months (Oct-Jan) when \code{water_year_start = 10} (Oct). Default summarizes all
months (\code{1:12}).}
\item{ignore_missing}{Logical value indicating whether dates with missing values should be included in the calculation. If
\code{TRUE} then a statistic will be calculated regardless of missing dates. If \code{FALSE} then only those statistics from
time periods with no missing dates will be returned. Default \code{FALSE}.}
\item{plot_extremes}{Logical value to indicate plotting a ribbon with the range of daily minimum and maximum flows.
Default \code{TRUE}.}
\item{plot_inner_percentiles}{Logical value indicating whether to plot the inner percentiles ribbon. Default \code{TRUE}.}
\item{plot_outer_percentiles}{Logical value indicating whether to plot the outer percentiles ribbon. Default \code{TRUE}.}
\item{inner_percentiles}{Numeric vector of two percentile values indicating the lower and upper limits of the
inner percentiles ribbon for plotting. Default \code{c(25,75)}, set to \code{NULL} for no inner ribbon.}
\item{outer_percentiles}{Numeric vector of two percentile values indicating the lower and upper limits of the
outer percentiles ribbon for plotting. Default \code{c(5,95)}, set to \code{NULL} for no outer ribbon.}
\item{add_year}{Numeric value indicating a year of daily flows to add to the daily statistics plot. Leave blank
or set to \code{NULL} for no years.}
\item{log_discharge}{Logical value to indicate plotting the discharge axis (Y-axis) on a logarithmic scale. Default \code{FALSE}.}
\item{log_ticks}{Logical value to indicate plotting logarithmic scale ticks when \code{log_discharge = TRUE}. Ticks will not
appear when \code{log_discharge = FALSE}. Default to \code{TRUE} when \code{log_discharge = TRUE}.}
\item{include_title}{Logical value to indicate adding the group/station number to the plot, if provided. Default \code{FALSE}.}
}
\value{
A list of ggplot2 objects with the following for each station provided:
\item{Daily_Stats}{a plot that contains daily flow statistics}
Default plots on each object:
\item{Mean}{daily mean}
\item{Median}{daily median}
\item{25-75 Percentiles}{a ribbon showing the range of data between the daily 25th and 75th percentiles}
\item{5-95 Percentiles}{a ribbon showing the range of data between the daily 5th and 95th percentiles}
\item{Minimum-Maximum}{a ribbon showing the range of data between the daily minimum and maximums}
\item{'Year'}{(on annual plots) the daily flows for the designated year}
}
\description{
Plots means, medians, maximums, minimums, and percentiles for each day of the year of flow values
from a daily streamflow data set. Can determine statistics of rolling mean days (e.g. 7-day flows) using the \code{roll_days}
argument. Calculates statistics from all values, unless specified. The Maximum-Minimum band can be removed using the
\code{plot_extremes} argument and the percentile bands can be customized using the \code{inner_percentiles} and
\code{outer_percentiles} arguments. Data calculated using \code{calc_daily_stats()} function. Returns a list of plots.
}
\examples{
# Run if HYDAT database has been downloaded (using tidyhydat::download_hydat())
if (file.exists(tidyhydat::hy_downloaded_db())) {
# Plot daily statistics using a data frame and data argument with defaults
flow_data <- tidyhydat::hy_daily_flows(station_number = "08NM116")
plot_daily_stats(data = flow_data,
start_year = 1980)
# Plot daily statistics using only years with no missing data
plot_daily_stats(station_number = "08NM116",
complete_years = TRUE)
# Plot daily statistics and add a specific year's daily flows
plot_daily_stats(station_number = "08NM116",
start_year = 1980,
add_year = 1985)
# Plot daily statistics for 7-day flows for July-September months only
plot_daily_stats(station_number = "08NM116",
start_year = 1980,
roll_days = 7,
months = 7:9)
}
}
\seealso{
\code{\link{calc_daily_stats}}
}
|
require(TeachingDemos)
require(plotrix)
#get data and down sample
data.all <- read.table("combined.log", header=T, row.names="Iteration")
data<-data.all[,c(1:8,16,17)]
s.size<-10000 #how many points do you want to sample
rows <- sample(1:50000, s.size)
s.data<-data.frame()
s.data<-data[rows,]
est.means<-colMeans(s.data[,1:10])
colnames(s.data)<-c("XO->XY","XO->Xy+","XY->XO","XY->Xy+","Xy+->XO","Xy+->XY","A.XO->XY","A.XY->XO","prob.tran.ade","prob.tran.pol")
#calculate the highest posterior density for the data of interest
hpd.bars<-matrix(,10,2)
for(i in 1:10)hpd.bars[i,]<-emp.hpd(s.data[,i])
rownames(hpd.bars)<-colnames(s.data)
#NEW PLOTS
pol.y.gain<-s.data[,1]+s.data[,2]
pol.y.loss<-s.data[,3]+s.data[,5]
pol.yp.loss<-s.data[,5]
pol.y.to.yp<-s.data[,4]
ade.y.gain<-s.data[,7]
ade.y.loss<-s.data[,8]
t.prob.ade<-s.data[,9]
t.prob.poly<-s.data[,10]
#FIRST HERE IS A BASIC PLOT OF Y CHROMOSOME GAIN AND LOSS NO Y+ STUFF
plot(density(pol.y.loss),
col=colors()[32],
main="Sex Chromosome\nTransition Rates",
ylab="Marginal Density",
xlab="Estimated Transitions per branch per million years",
lwd=4,ylim=c(0,1200),
xlim=c(0,.011),
cex.main=.7,
cex.lab=.7,
new=F)
lines(density(pol.y.gain),col=colors()[33],lwd=3,)
lines(density(ade.y.gain),col=colors()[123],lwd=3,)
lines(density(ade.y.loss),col=colors()[132],lwd=3,)
foo<-c("Polyphaga","XY to XO","XO to XY","",
"Adephaga","XY to XO","XO to XY")
legend(.0087,1200,foo,y.intersp=1.5,pch=20,bty="n",pt.cex=1,cex=.5,col=c("white",colors()[32],colors()[33],"white","white",colors()[132],colors()[123]))
polygon(density(pol.y.loss),col=colors()[32],density=180,lwd=.1)
polygon(density(pol.y.gain),col=colors()[33],density=180,lwd=.1,angle=-45)
polygon(density(ade.y.gain),col=colors()[123],density=180,lwd=.1)
polygon(density(ade.y.loss),col=colors()[132],density=180,lwd=.1,angle=-45)
#HERE IT IS WITH Y+ GAIN AND LOSS
plot(density(pol.y.loss),
col=colors()[32],
main="Sex Chromosome Transition Rates",
ylab="Marginal Density",
xlab="Estimated Transitions per branch per million years",
cex.main=.7,
cex.lab=.7,
cex.axis=.6,
lwd=3,ylim=c(0,1200),
xlim=c(0,.011))
axis.break(2,1057,breakcol="black",style="slash")
lines(density(pol.y.gain),col=colors()[33],lwd=3,)
lines(density(ade.y.gain),col=colors()[123],lwd=3,)
lines(density(ade.y.loss),col=colors()[132],lwd=3,)
#adjust yp to fit actual peak occurs at 2600
foobar<-density(pol.yp.loss)
foobar[[2]]<-foobar[[2]]*(1200/max(foobar[[2]]))
yp.fixed<-foobar
lines(yp.fixed[1:2],col=colors()[258],lwd=3,)
lines(density(pol.y.to.yp),col=colors()[414],lwd=3,)
foo<-c("Polyphaga","XY to XO","XO to XY","XY+ to XO","XY to XY+","","Adephaga","XY to XO","XO to XY")
legend(.0087,1200,foo,y.intersp=1.5,pch=20,bty="n",pt.cex=1,cex=.5,col=c("white",colors()[32],colors()[33],colors()[258],colors()[414],"white","white",colors()[132],colors()[123]))
for(i in 1:100)mtext("1200",2,1,at=1200,col="white",font=2,cex=.6)
mtext("2600",2,1,at=1200,cex=.6)
#polygon(density(pol.y.loss),col=colors()[32],density=180,lwd=.1)
#polygon(density(pol.y.gain),col=colors()[33],density=180,lwd=.1,angle=-45)
#polygon(density(ade.y.gain),col=colors()[123],density=180,lwd=.1)
#polygon(density(ade.y.loss),col=colors()[132],density=180,lwd=.1,angle=-45)
#polygon(yp.fixed[1:2],col=colors()[258],density=180,lwd=.1,angle=-45)
#polygon(density(pol.y.to.yp),col=colors()[414],density=180,lwd=.1,angle=-45)
#LETS MAKE A PLOT OF JUST THE RAW CHANCE OF A TRANSIITON HAPPENING IN BOTH SUBORDERS
plot(density(t.prob.ade),
col=colors()[32],
main="Probability of a Transition in Sex Chromosome System",
ylab="Marginal Density",
xlab="Estimated Transitions per branch per million years",
lwd=4,ylim=c(0,370),
xlim=c(0,.022))
lines(density(t.prob.poly),col=colors()[123],lwd=4,)
polygon(density(t.prob.ade),col=colors()[32],density=180,lwd=.1)
polygon(density(t.prob.poly),col=colors()[123],density=180,lwd=.1)
#create a jittered variable for spreading out the estimates
spreads<-data.frame()
spreads[1:s.size,1]<-1
spreads[,1]<-as.numeric(lapply(spreads[,1],jitter, factor=18))
for(i in 1:2) spreads[,i+1]<-spreads[,i]+1
plot(t.prob.poly~spreads[,1],col=colors()[32],xlim=c(.5,2.5),ylim=c(.002,.02),pch=20,cex=.1,xlab="Transition",ylab="Probability of Transition per Million Years", xaxt='n',bg="gray", main="Posterior Distribution of Transition Rates")
points(t.prob.ade~spreads[,2],col=colors()[125],pch=20,cex=.1)
for(i in 1:2)arrows(i, hpd.bars[11-i,1], i, hpd.bars[11-i,2], col="black", length = 0.1, angle = 90,code = 3,lwd=2)
text(1:2,(.02), c("Polyphaga","Adephaga"),cex=.8,col="black")
for(i in 1:2)points(i, est.means[11-i],pch=21,col="black",cex=1.5,lwd=2)
for(i in 2:1)points(i, est.means[11-i],pch=20,col="gold",cex=1.4,lwd=2) | /rseminar/r-scripts/new.graphs.R | no_license | coleoguy/coleoguy.github.io | R | false | false | 4,854 | r | require(TeachingDemos)
require(plotrix)
#get data and down sample
data.all <- read.table("combined.log", header=T, row.names="Iteration")
data<-data.all[,c(1:8,16,17)]
s.size<-10000 #how many points do you want to sample
rows <- sample(1:50000, s.size)
s.data<-data.frame()
s.data<-data[rows,]
est.means<-colMeans(s.data[,1:10])
colnames(s.data)<-c("XO->XY","XO->Xy+","XY->XO","XY->Xy+","Xy+->XO","Xy+->XY","A.XO->XY","A.XY->XO","prob.tran.ade","prob.tran.pol")
#calculate the highest posterior density for the data of interest
hpd.bars<-matrix(,10,2)
for(i in 1:10)hpd.bars[i,]<-emp.hpd(s.data[,i])
rownames(hpd.bars)<-colnames(s.data)
#NEW PLOTS
pol.y.gain<-s.data[,1]+s.data[,2]
pol.y.loss<-s.data[,3]+s.data[,5]
pol.yp.loss<-s.data[,5]
pol.y.to.yp<-s.data[,4]
ade.y.gain<-s.data[,7]
ade.y.loss<-s.data[,8]
t.prob.ade<-s.data[,9]
t.prob.poly<-s.data[,10]
#FIRST HERE IS A BASIC PLOT OF Y CHROMOSOME GAIN AND LOSS NO Y+ STUFF
plot(density(pol.y.loss),
col=colors()[32],
main="Sex Chromosome\nTransition Rates",
ylab="Marginal Density",
xlab="Estimated Transitions per branch per million years",
lwd=4,ylim=c(0,1200),
xlim=c(0,.011),
cex.main=.7,
cex.lab=.7,
new=F)
lines(density(pol.y.gain),col=colors()[33],lwd=3,)
lines(density(ade.y.gain),col=colors()[123],lwd=3,)
lines(density(ade.y.loss),col=colors()[132],lwd=3,)
foo<-c("Polyphaga","XY to XO","XO to XY","",
"Adephaga","XY to XO","XO to XY")
legend(.0087,1200,foo,y.intersp=1.5,pch=20,bty="n",pt.cex=1,cex=.5,col=c("white",colors()[32],colors()[33],"white","white",colors()[132],colors()[123]))
polygon(density(pol.y.loss),col=colors()[32],density=180,lwd=.1)
polygon(density(pol.y.gain),col=colors()[33],density=180,lwd=.1,angle=-45)
polygon(density(ade.y.gain),col=colors()[123],density=180,lwd=.1)
polygon(density(ade.y.loss),col=colors()[132],density=180,lwd=.1,angle=-45)
#HERE IT IS WITH Y+ GAIN AND LOSS
plot(density(pol.y.loss),
col=colors()[32],
main="Sex Chromosome Transition Rates",
ylab="Marginal Density",
xlab="Estimated Transitions per branch per million years",
cex.main=.7,
cex.lab=.7,
cex.axis=.6,
lwd=3,ylim=c(0,1200),
xlim=c(0,.011))
axis.break(2,1057,breakcol="black",style="slash")
lines(density(pol.y.gain),col=colors()[33],lwd=3,)
lines(density(ade.y.gain),col=colors()[123],lwd=3,)
lines(density(ade.y.loss),col=colors()[132],lwd=3,)
#adjust yp to fit actual peak occurs at 2600
foobar<-density(pol.yp.loss)
foobar[[2]]<-foobar[[2]]*(1200/max(foobar[[2]]))
yp.fixed<-foobar
lines(yp.fixed[1:2],col=colors()[258],lwd=3,)
lines(density(pol.y.to.yp),col=colors()[414],lwd=3,)
foo<-c("Polyphaga","XY to XO","XO to XY","XY+ to XO","XY to XY+","","Adephaga","XY to XO","XO to XY")
legend(.0087,1200,foo,y.intersp=1.5,pch=20,bty="n",pt.cex=1,cex=.5,col=c("white",colors()[32],colors()[33],colors()[258],colors()[414],"white","white",colors()[132],colors()[123]))
for(i in 1:100)mtext("1200",2,1,at=1200,col="white",font=2,cex=.6)
mtext("2600",2,1,at=1200,cex=.6)
#polygon(density(pol.y.loss),col=colors()[32],density=180,lwd=.1)
#polygon(density(pol.y.gain),col=colors()[33],density=180,lwd=.1,angle=-45)
#polygon(density(ade.y.gain),col=colors()[123],density=180,lwd=.1)
#polygon(density(ade.y.loss),col=colors()[132],density=180,lwd=.1,angle=-45)
#polygon(yp.fixed[1:2],col=colors()[258],density=180,lwd=.1,angle=-45)
#polygon(density(pol.y.to.yp),col=colors()[414],density=180,lwd=.1,angle=-45)
#LETS MAKE A PLOT OF JUST THE RAW CHANCE OF A TRANSIITON HAPPENING IN BOTH SUBORDERS
plot(density(t.prob.ade),
col=colors()[32],
main="Probability of a Transition in Sex Chromosome System",
ylab="Marginal Density",
xlab="Estimated Transitions per branch per million years",
lwd=4,ylim=c(0,370),
xlim=c(0,.022))
lines(density(t.prob.poly),col=colors()[123],lwd=4,)
polygon(density(t.prob.ade),col=colors()[32],density=180,lwd=.1)
polygon(density(t.prob.poly),col=colors()[123],density=180,lwd=.1)
#create a jittered variable for spreading out the estimates
spreads<-data.frame()
spreads[1:s.size,1]<-1
spreads[,1]<-as.numeric(lapply(spreads[,1],jitter, factor=18))
for(i in 1:2) spreads[,i+1]<-spreads[,i]+1
plot(t.prob.poly~spreads[,1],col=colors()[32],xlim=c(.5,2.5),ylim=c(.002,.02),pch=20,cex=.1,xlab="Transition",ylab="Probability of Transition per Million Years", xaxt='n',bg="gray", main="Posterior Distribution of Transition Rates")
points(t.prob.ade~spreads[,2],col=colors()[125],pch=20,cex=.1)
for(i in 1:2)arrows(i, hpd.bars[11-i,1], i, hpd.bars[11-i,2], col="black", length = 0.1, angle = 90,code = 3,lwd=2)
text(1:2,(.02), c("Polyphaga","Adephaga"),cex=.8,col="black")
for(i in 1:2)points(i, est.means[11-i],pch=21,col="black",cex=1.5,lwd=2)
for(i in 2:1)points(i, est.means[11-i],pch=20,col="gold",cex=1.4,lwd=2) |
#This file is an amalgamation of a few files and so it redundant in many ways. The three steps of downloading data could easily be done in a more concise way.
# DownloadPitchFX.R
# downloads the massive MLB Gameday data.
# Author: apeecape
# Email: achikule at gmail dot com
# Updated: Jun 13 2010
# Version 0.4
# Version History
# 0.5 ~ grab player data, both pitchers and batters, ability to pick team
# 0.4 ~ get team data, and ability to grab team info, checks to see if regular season
# 0.3 ~ updated so 2010 works, fixed some bugs, and saves as tab delimited file
# 0.2 ~ inputs are start and end dates
# 0.1 ~ grab Pitch f/x data from MLB Gameday, specify date ranges (takes half a minute for a day's worth of data on my 2.5Ghz machine)
# Future Versions:
# ~ ability to pick pitchers, batters, teams
# - ability to grab matchups
# - better searching instead of tediously parsing through each XML file
# ~ connect to mysql database
# ~ don't overheat computer!
# ~ document Gameday Code
# downloading pitch f/x data from MLB website
# Get data from http://gd2.mlb.com/components/game/mlb/
# XML package http://www.omegahat.org/RSXML/shortIntro.html
# Perl script of same application by Mike Fast:
# http://fastballs.files.wordpress.com/2007/09/hack_28_parser_mikefast_test_pl.txt
# Less general R code from Erik Iverson of Blogistic Reflections:
# http://blogisticreflections.wordpress.com/2009/10/04/using-r-to-analyze-baseball-games-in-real-time/
# listing of pitch f/x tools by Baseball Analysts
# http://baseballanalysts.com/archives/2010/03/how_can_i_get_m.php
# downloadable pitch f/x database from Darrell Zimmerman
# http://www.wantlinux.net/category/baseball-data/
# I think gameday data starts 2005
# I think enhanced gameday (pitch fx) has all of 2009, most of 2008, some 2007, tiny bit 2006
# required libraries:
library(XML)
# code for <game type> in game.xml (input game.type in code)
# "S" ~ spring training, "R" ~ regular season, "D" ~ Division Series
# "L" ~ League Championship Series "W" ~ World Series
# code for <game gameday_sw> in game.xml
# http://sports.dir.groups.yahoo.com/group/RetroSQL/message/320
# "N" ~ missing, no pitch info
# "Y" ~ standard w/ pitch locations
# "E" ~ w/ pitch f/x
# "P" ~ for 2010, whatever that's supposed to mean
# code for teams
# code for players
# code for gameday
# code for pitch type
# code for atbat type
# checks for:
# gameday type
# home, away
# player, batter, pitch type
# -----------------------------------------------------------
DownloadPitchFX <- function(fileloc = "./pitchfx.txt",
start.date = "2009-05-02", end.date = start.date,
URL.base = "http://gd2.mlb.com/components/game/mlb/",
game.type = "R",
grab.pitch = c("des", "type", "x", "y",
"start_speed", "end_speed",
"sz_top", "sz_bot", "pfx_x", "pfx_z", "px", "pz",
"x0", "y0", "z0", "vx0", "vy0", "vz0", "ax", "ay", "az",
"break_y", "break_angle", "break_length", "pitch_type",
"type_confidence"),
grab.atbat = c("b", "s", "o", "batter", "pitcher", "b_height",
"stand", "p_throws", "event")) {
# write initial variables on file
meta <- c("Year", "Month", "Day", "Inning", "Home", "Away")
write(c(meta, grab.atbat, grab.pitch), file = fileloc,
ncol = length(c(grab.atbat, grab.pitch)) + length(meta), sep = ">")
# transfer date info
start.date <- as.POSIXlt(start.date); end.date <- as.POSIXlt(end.date);
diff.date <- as.numeric(difftime(end.date, start.date))
date.range <- as.POSIXlt(seq(start.date, by = "days",
length = 1 + diff.date))
for (i in 1:(diff.date+1)) {
year <- date.range[i]$year + 1900
month <- date.range[i]$mon + 1
day <- date.range[i]$mday
URL.date <- paste(URL.base, "year_", year, "/",
ifelse(month >= 10, "month_", "month_0"), month, "/",
ifelse(day >= 10, "day_", "day_0"), day, "/", sep = "")
# grab matchups for today
## URL.scoreboard <- paste(URL.date, "miniscoreboard.xml", sep = "")
## XML.scoreboard <- xmlInternalTreeParse(URL.scoreboard)
## parse.scoreboard <- xpathSApply(XML.scoreboard, "//game[@gameday_link]",
## xmlGetAttr, "gameday_link")
HTML.day <- htmlParse(URL.date)
parse.day <- xpathSApply(HTML.day, "//a[@*]", xmlGetAttr, "href")
parse.day <- parse.day[grep("^gid_*", parse.day)]
# if games exists today
if (length(parse.day) >= 1) {
# for each game
for (game in 1:length(parse.day)) {
print(game)
URL.game <- paste(URL.date, parse.day[game], sep = "")
HTML.game <- htmlParse(URL.game)
parse.game.exists <- xpathSApply(HTML.game, "//a[@*]", xmlGetAttr, "href")
# if game.xml exists
if (sum(match(parse.game.exists, "game.xml"), na.rm = T) > 0) {
# grab game type (regular season, etc.) and gameday type (pitch f/x, etc.)
XML.game <- xmlInternalTreeParse(paste(URL.game, "game.xml", sep = ""))
parse.game <- sapply(c("type", "gameday_sw"), function (x)
xpathSApply(XML.game, "//game[@*]", xmlGetAttr, x))
# if proper game type: "R" ~ regular season, "S" ~ spring, "D" ~ divison series
# "L" ~ league chamption series, "W" ~ world series
if (parse.game['type'] == game.type) {
# grab team names
parse.teams <- sapply(c("abbrev"), function (x)
xpathSApply(XML.game, "//team[@*]", xmlGetAttr, x))
home <- parse.teams[1]; away <- parse.teams[2]
# if pitch f/x data exists
if (parse.game["gameday_sw"] == "E" | parse.game["gameday_sw"] == "P") {
# grab number of innings played
HTML.Ninnings <- htmlParse(paste(URL.game, "inning/", sep = ""))
parse.Ninnings <- xpathSApply(HTML.Ninnings, "//a[@*]", xmlGetAttr, "href")
# check to see if game exists data by checking innings > 1
if (length(grep("^inning_[0-9]", parse.Ninnings)) > 1) {
# for each inning
for (inning in 1:length(grep("^inning_[0-9]", parse.Ninnings))) {
# grab inning info
URL.inning <- paste(URL.game, "inning/", "inning_", inning,
".xml", sep = "")
XML.inning <- xmlInternalTreeParse(URL.inning)
parse.atbat <- xpathSApply(XML.inning, "//atbat[@*]")
parse.Npitches.atbat <- sapply(parse.atbat, function(x)
sum(names(xmlChildren(x)) == "pitch"))
# check to see if atbat exists
if (length(parse.atbat) > 0) {
print(paste(parse.day[game], "inning =", inning))
# parse attributes from pitch and atbat (ugh, ugly)
parse.pitch <- sapply(grab.pitch, function(x)
as.character(xpathSApply(XML.inning, "//pitch[@*]",
xmlGetAttr, x)))
parse.pitch <- if (class(parse.pitch) == "character") {
t(parse.pitch)
} else apply(parse.pitch, 2, as.character)
results.atbat <- t(sapply(parse.atbat, function(x)
xmlAttrs(x)[grab.atbat]))
results.atbat <- results.atbat[rep(seq(nrow(results.atbat)),
times = parse.Npitches.atbat),]
results.atbat <- if (class(results.atbat) == "character") {
t(results.atbat)
} else results.atbat
# write results
write(t(cbind(year, month, day, inning, home, away,
results.atbat, parse.pitch)), file = fileloc,
ncol = length(c(grab.atbat, grab.pitch)) + length(meta),
append = T, sep = ">")
}
}
}
}
}
}
}
}
}
}
DownloadPitchFX(fileloc = "./2010MLB.txt",start.date="2010-04-05",end.date="2010-10-03")
DownloadPitchFX(fileloc = "./2011MLB.txt",start.date="2011-03-31",end.date="2011-09-28")
DownloadPitchFX(fileloc = "./2012MLB.txt",start.date="2012-04-12",end.date="2012-10-03")
library(XML)
GameID<- function(fileloc = "./GameID.txt",
start.date = "2009-05-02", end.date = start.date,
URL.base = "http://gd2.mlb.com/components/game/mlb/",
game.type = "R") {
# write initial variables on file
write("GID", file = fileloc,
ncol = 1, sep = ">")
# transfer date info
start.date <- as.POSIXlt(start.date); end.date <- as.POSIXlt(end.date);
diff.date <- as.numeric(difftime(end.date, start.date))
date.range <- as.POSIXlt(seq(start.date, by = "days",
length = 1 + diff.date))
for (i in 1:(diff.date+1)) {
year <- date.range[i]$year + 1900
month <- date.range[i]$mon + 1
day <- date.range[i]$mday
URL.date <- paste(URL.base, "year_", year, "/",
ifelse(month >= 10, "month_", "month_0"), month, "/",
ifelse(day >= 10, "day_", "day_0"), day, "/", sep = "")
# grab matchups for today
## URL.scoreboard <- paste(URL.date, "miniscoreboard.xml", sep = "")
## XML.scoreboard <- xmlInternalTreeParse(URL.scoreboard)
## parse.scoreboard <- xpathSApply(XML.scoreboard, "//game[@gameday_link]",
## xmlGetAttr, "gameday_link")
HTML.day <- htmlParse(URL.date)
parse.day <- xpathSApply(HTML.day, "//a[@*]", xmlGetAttr, "href")
parse.day <- parse.day[grep("^gid_*", parse.day)]
# if games exists today
if (length(parse.day) >= 1) {
# for each game
for (game in 1:length(parse.day)) {
# print(game)
# URL.game <- paste(URL.date, parse.day[game], sep = "")
# HTML.game <- htmlParse(URL.game)
# parse.game.exists <- xpathSApply(HTML.game, "//a[@*]", xmlGetAttr, "href")
# if game.xml exists
# if (sum(match(parse.game.exists, "game.xml"), na.rm = T) > 0) {
# grab game type (regular season, etc.) and gameday type (pitch f/x, etc.)
# XML.game <- xmlInternalTreeParse(paste(URL.game, "game.xml", sep = ""))
# parse.game <- sapply(c("type", "gameday_sw"), function (x)
# xpathSApply(XML.game, "//game[@*]", xmlGetAttr, x))
# if proper game type: "R" ~ regular season, "S" ~ spring, "D" ~ divison series
# "L" ~ league chamption series, "W" ~ world series
# if (parse.game['type'] == game.type) {
# grab team names
# parse.teams <- sapply(c("abbrev"), function (x)
# xpathSApply(XML.game, "//team[@*]", xmlGetAttr, x))
# home <- parse.teams[1]; away <- parse.teams[2]
# if pitch f/x data exists
# if (parse.game["gameday_sw"] == "E" | parse.game["gameday_sw"] == "P") {
# grab number of innings played
# HTML.Ninnings <- htmlParse(paste(URL.game, "inning/", sep = ""))
# parse.Ninnings <- xpathSApply(HTML.Ninnings, "//a[@*]", xmlGetAttr, "href")
# check to see if game exists data by checking innings > 1
# if (length(grep("^inning_[0-9]", parse.Ninnings)) > 1) {
# for each inning
# for (inning in 1:length(grep("^inning_[0-9]", parse.Ninnings))) {
# grab inning info
# URL.inning <- paste(URL.game, "inning/", "inning_", inning,
# ".xml", sep = "")
# XML.inning <- xmlInternalTreeParse(URL.inning)
# parse.atbat <- xpathSApply(XML.inning, "//atbat[@*]")
# parse.Npitches.atbat <- sapply(parse.atbat, function(x)
# sum(names(xmlChildren(x)) == "pitch"))
# check to see if atbat exists
# if (length(parse.atbat) > 0) {
# print(paste(parse.day[game], "inning =", inning))
# parse attributes from pitch and atbat (ugh, ugly)
# parse.pitch <- sapply(grab.pitch, function(x)
# as.character(xpathSApply(XML.inning, "//pitch[@*]",
# xmlGetAttr, x)))
# parse.pitch <- if (class(parse.pitch) == "character") {
# t(parse.pitch)
# } else apply(parse.pitch, 2, as.character)
# results.atbat <- t(sapply(parse.atbat, function(x)
# xmlAttrs(x)[grab.atbat]))
# results.atbat <- results.atbat[rep(seq(nrow(results.atbat)),
# times = parse.Npitches.atbat),]
# results.atbat <- if (class(results.atbat) == "character") {
# t(results.atbat)
# } else results.atbat
# write results
write(substr(parse.day[game],1,nchar(parse.day[game])-1), file = fileloc,
ncol = 1, append = T, sep = ">")
}
}
}
}
GameID(fileloc = "./GameID10.txt",start.date="2010-04-05",end.date="2010-10-03")
GameID(fileloc = "./GameID11.txt",start.date="2011-03-31",end.date="2011-09-28")
GameID(fileloc = "./GameID12.txt",start.date="2012-04-12",end.date="2012-10-03")
# a function to retrieve the homeplate umpire for every game with pitchfx data
library(XML)
# DownloadPitchFX <- function(fileloc = "./pitchfx.txt",
Downloadump <- function(fileloc = "./pitchfx.txt",
start.date = "2009-05-02", end.date = start.date,
URL.base = "http://gd2.mlb.com/components/game/mlb/",
game.type = "R",
grab.atbat = c("b", "s", "o", "batter", "pitcher", "b_height",
"stand", "p_throws", "event")) {
# write initial variables on file
# meta <- c("Year", "Month", "Day", "Inning", "Home", "Away")
# write(t(cbind(year, month, day, home, away,hpumpire)),
# write(c(meta, grab.atbat), file = fileloc,
# ncol = length(grab.atbat) + length(meta), sep = ">")
meta <- c("Year", "Month", "Day", "Home", "Away")
write(c(meta,"HPUmpire"), file = fileloc,
ncol = length(meta) + 1, sep = ">")
# transfer date info
start.date <- as.POSIXlt(start.date); end.date <- as.POSIXlt(end.date);
diff.date <- as.numeric(difftime(end.date, start.date))
date.range <- as.POSIXlt(seq(start.date, by = "days",
length = 1 + diff.date))
for (i in 1:(diff.date+1)) {
year <- date.range[i]$year + 1900
month <- date.range[i]$mon + 1
day <- date.range[i]$mday
print(paste("month=",month,"day=",day))
URL.date <- paste(URL.base, "year_", year, "/",
ifelse(month >= 10, "month_", "month_0"), month, "/",
ifelse(day >= 10, "day_", "day_0"), day, "/", sep = "")
HTML.day <- htmlParse(URL.date)
parse.day <- xpathSApply(HTML.day, "//a[@*]", xmlGetAttr, "href")
parse.day <- parse.day[grep("^gid_*", parse.day)]
# if games exists today
if (length(parse.day) >= 1) {
# for each game
for (game in 1:length(parse.day)) {
print(game)
URL.game <- paste(URL.date, parse.day[game], sep = "")
HTML.game <- htmlParse(URL.game)
parse.game.exists <- xpathSApply(HTML.game, "//a[@*]", xmlGetAttr, "href")
# if game.xml exists
if (sum(match(parse.game.exists, "game.xml"), na.rm = T) > 0) {
# grab game type (regular season, etc.) and gameday type (pitch f/x, etc.)
XML.game <- xmlInternalTreeParse(paste(URL.game, "game.xml", sep = ""))
parse.game <- sapply(c("type", "gameday_sw"), function (x)
xpathSApply(XML.game, "//game[@*]", xmlGetAttr, x))
# if proper game type: "R" ~ regular season, "S" ~ spring, "D" ~ divison series
# "L" ~ league chamption series, "W" ~ world series
if (parse.game['type'] == game.type) {
# grab team names
parse.teams <- sapply(c("abbrev"), function (x)
xpathSApply(XML.game, "//team[@*]", xmlGetAttr, x))
home <- parse.teams[1]; away <- parse.teams[2]
# if pitch f/x data exists
if (parse.game["gameday_sw"] == "E" | parse.game["gameday_sw"] == "P") {
# grab number of innings played
HTML.Ninnings <- htmlParse(paste(URL.game, "inning/", sep = ""))
parse.Ninnings <- xpathSApply(HTML.Ninnings, "//a[@*]", xmlGetAttr, "href")
# check to see if game exists data by checking innings > 1
if (length(grep("^inning_[0-9]", parse.Ninnings)) > 1) {
URL.boxscore <-
URL.game <- paste(URL.date,parse.day[game],sep="")
URL.boxscore <- paste(URL.game,"boxscore.xml",sep="")
parsed.boxscore <- xmlTreeParse(URL.boxscore)
game.info <- xmlValue(parsed.boxscore$doc[[1]][[6]])
hpumpire <- sub(pattern=".*?HP: (.*). 1B:.*", replacement="\\1",x=game.info)
}
write(t(cbind(year, month, day, home, away,hpumpire)),
file = fileloc, 1 + length(meta), append = T, sep = ">")
}
}
}
}
}
}
}
Downloadump(fileloc="./umps2010.txt",start.date="2010-04-05",end.date="2010-10-03")
Downloadump(fileloc="umps2011.txt",start.date="2011-03-31",end.date="2011-09-28")
Downloadump(fileloc="umps2012.txt",start.date="2012-04-12",end.date="2012-10-03")
| /GetPitchFXData.R | no_license | anthonywittemann/Sabermetrics-R-scripts | R | false | false | 18,890 | r | #This file is an amalgamation of a few files and so it redundant in many ways. The three steps of downloading data could easily be done in a more concise way.
# DownloadPitchFX.R
# downloads the massive MLB Gameday data.
# Author: apeecape
# Email: achikule at gmail dot com
# Updated: Jun 13 2010
# Version 0.4
# Version History
# 0.5 ~ grab player data, both pitchers and batters, ability to pick team
# 0.4 ~ get team data, and ability to grab team info, checks to see if regular season
# 0.3 ~ updated so 2010 works, fixed some bugs, and saves as tab delimited file
# 0.2 ~ inputs are start and end dates
# 0.1 ~ grab Pitch f/x data from MLB Gameday, specify date ranges (takes half a minute for a day's worth of data on my 2.5Ghz machine)
# Future Versions:
# ~ ability to pick pitchers, batters, teams
# - ability to grab matchups
# - better searching instead of tediously parsing through each XML file
# ~ connect to mysql database
# ~ don't overheat computer!
# ~ document Gameday Code
# downloading pitch f/x data from MLB website
# Get data from http://gd2.mlb.com/components/game/mlb/
# XML package http://www.omegahat.org/RSXML/shortIntro.html
# Perl script of same application by Mike Fast:
# http://fastballs.files.wordpress.com/2007/09/hack_28_parser_mikefast_test_pl.txt
# Less general R code from Erik Iverson of Blogistic Reflections:
# http://blogisticreflections.wordpress.com/2009/10/04/using-r-to-analyze-baseball-games-in-real-time/
# listing of pitch f/x tools by Baseball Analysts
# http://baseballanalysts.com/archives/2010/03/how_can_i_get_m.php
# downloadable pitch f/x database from Darrell Zimmerman
# http://www.wantlinux.net/category/baseball-data/
# I think gameday data starts 2005
# I think enhanced gameday (pitch fx) has all of 2009, most of 2008, some 2007, tiny bit 2006
# required libraries:
library(XML)
# code for <game type> in game.xml (input game.type in code)
# "S" ~ spring training, "R" ~ regular season, "D" ~ Division Series
# "L" ~ League Championship Series "W" ~ World Series
# code for <game gameday_sw> in game.xml
# http://sports.dir.groups.yahoo.com/group/RetroSQL/message/320
# "N" ~ missing, no pitch info
# "Y" ~ standard w/ pitch locations
# "E" ~ w/ pitch f/x
# "P" ~ for 2010, whatever that's supposed to mean
# code for teams
# code for players
# code for gameday
# code for pitch type
# code for atbat type
# checks for:
# gameday type
# home, away
# player, batter, pitch type
# -----------------------------------------------------------
DownloadPitchFX <- function(fileloc = "./pitchfx.txt",
start.date = "2009-05-02", end.date = start.date,
URL.base = "http://gd2.mlb.com/components/game/mlb/",
game.type = "R",
grab.pitch = c("des", "type", "x", "y",
"start_speed", "end_speed",
"sz_top", "sz_bot", "pfx_x", "pfx_z", "px", "pz",
"x0", "y0", "z0", "vx0", "vy0", "vz0", "ax", "ay", "az",
"break_y", "break_angle", "break_length", "pitch_type",
"type_confidence"),
grab.atbat = c("b", "s", "o", "batter", "pitcher", "b_height",
"stand", "p_throws", "event")) {
# write initial variables on file
meta <- c("Year", "Month", "Day", "Inning", "Home", "Away")
write(c(meta, grab.atbat, grab.pitch), file = fileloc,
ncol = length(c(grab.atbat, grab.pitch)) + length(meta), sep = ">")
# transfer date info
start.date <- as.POSIXlt(start.date); end.date <- as.POSIXlt(end.date);
diff.date <- as.numeric(difftime(end.date, start.date))
date.range <- as.POSIXlt(seq(start.date, by = "days",
length = 1 + diff.date))
for (i in 1:(diff.date+1)) {
year <- date.range[i]$year + 1900
month <- date.range[i]$mon + 1
day <- date.range[i]$mday
URL.date <- paste(URL.base, "year_", year, "/",
ifelse(month >= 10, "month_", "month_0"), month, "/",
ifelse(day >= 10, "day_", "day_0"), day, "/", sep = "")
# grab matchups for today
## URL.scoreboard <- paste(URL.date, "miniscoreboard.xml", sep = "")
## XML.scoreboard <- xmlInternalTreeParse(URL.scoreboard)
## parse.scoreboard <- xpathSApply(XML.scoreboard, "//game[@gameday_link]",
## xmlGetAttr, "gameday_link")
HTML.day <- htmlParse(URL.date)
parse.day <- xpathSApply(HTML.day, "//a[@*]", xmlGetAttr, "href")
parse.day <- parse.day[grep("^gid_*", parse.day)]
# if games exists today
if (length(parse.day) >= 1) {
# for each game
for (game in 1:length(parse.day)) {
print(game)
URL.game <- paste(URL.date, parse.day[game], sep = "")
HTML.game <- htmlParse(URL.game)
parse.game.exists <- xpathSApply(HTML.game, "//a[@*]", xmlGetAttr, "href")
# if game.xml exists
if (sum(match(parse.game.exists, "game.xml"), na.rm = T) > 0) {
# grab game type (regular season, etc.) and gameday type (pitch f/x, etc.)
XML.game <- xmlInternalTreeParse(paste(URL.game, "game.xml", sep = ""))
parse.game <- sapply(c("type", "gameday_sw"), function (x)
xpathSApply(XML.game, "//game[@*]", xmlGetAttr, x))
# if proper game type: "R" ~ regular season, "S" ~ spring, "D" ~ divison series
# "L" ~ league chamption series, "W" ~ world series
if (parse.game['type'] == game.type) {
# grab team names
parse.teams <- sapply(c("abbrev"), function (x)
xpathSApply(XML.game, "//team[@*]", xmlGetAttr, x))
home <- parse.teams[1]; away <- parse.teams[2]
# if pitch f/x data exists
if (parse.game["gameday_sw"] == "E" | parse.game["gameday_sw"] == "P") {
# grab number of innings played
HTML.Ninnings <- htmlParse(paste(URL.game, "inning/", sep = ""))
parse.Ninnings <- xpathSApply(HTML.Ninnings, "//a[@*]", xmlGetAttr, "href")
# check to see if game exists data by checking innings > 1
if (length(grep("^inning_[0-9]", parse.Ninnings)) > 1) {
# for each inning
for (inning in 1:length(grep("^inning_[0-9]", parse.Ninnings))) {
# grab inning info
URL.inning <- paste(URL.game, "inning/", "inning_", inning,
".xml", sep = "")
XML.inning <- xmlInternalTreeParse(URL.inning)
parse.atbat <- xpathSApply(XML.inning, "//atbat[@*]")
parse.Npitches.atbat <- sapply(parse.atbat, function(x)
sum(names(xmlChildren(x)) == "pitch"))
# check to see if atbat exists
if (length(parse.atbat) > 0) {
print(paste(parse.day[game], "inning =", inning))
# parse attributes from pitch and atbat (ugh, ugly)
parse.pitch <- sapply(grab.pitch, function(x)
as.character(xpathSApply(XML.inning, "//pitch[@*]",
xmlGetAttr, x)))
parse.pitch <- if (class(parse.pitch) == "character") {
t(parse.pitch)
} else apply(parse.pitch, 2, as.character)
results.atbat <- t(sapply(parse.atbat, function(x)
xmlAttrs(x)[grab.atbat]))
results.atbat <- results.atbat[rep(seq(nrow(results.atbat)),
times = parse.Npitches.atbat),]
results.atbat <- if (class(results.atbat) == "character") {
t(results.atbat)
} else results.atbat
# write results
write(t(cbind(year, month, day, inning, home, away,
results.atbat, parse.pitch)), file = fileloc,
ncol = length(c(grab.atbat, grab.pitch)) + length(meta),
append = T, sep = ">")
}
}
}
}
}
}
}
}
}
}
DownloadPitchFX(fileloc = "./2010MLB.txt",start.date="2010-04-05",end.date="2010-10-03")
DownloadPitchFX(fileloc = "./2011MLB.txt",start.date="2011-03-31",end.date="2011-09-28")
DownloadPitchFX(fileloc = "./2012MLB.txt",start.date="2012-04-12",end.date="2012-10-03")
library(XML)
GameID<- function(fileloc = "./GameID.txt",
start.date = "2009-05-02", end.date = start.date,
URL.base = "http://gd2.mlb.com/components/game/mlb/",
game.type = "R") {
# write initial variables on file
write("GID", file = fileloc,
ncol = 1, sep = ">")
# transfer date info
start.date <- as.POSIXlt(start.date); end.date <- as.POSIXlt(end.date);
diff.date <- as.numeric(difftime(end.date, start.date))
date.range <- as.POSIXlt(seq(start.date, by = "days",
length = 1 + diff.date))
for (i in 1:(diff.date+1)) {
year <- date.range[i]$year + 1900
month <- date.range[i]$mon + 1
day <- date.range[i]$mday
URL.date <- paste(URL.base, "year_", year, "/",
ifelse(month >= 10, "month_", "month_0"), month, "/",
ifelse(day >= 10, "day_", "day_0"), day, "/", sep = "")
# grab matchups for today
## URL.scoreboard <- paste(URL.date, "miniscoreboard.xml", sep = "")
## XML.scoreboard <- xmlInternalTreeParse(URL.scoreboard)
## parse.scoreboard <- xpathSApply(XML.scoreboard, "//game[@gameday_link]",
## xmlGetAttr, "gameday_link")
HTML.day <- htmlParse(URL.date)
parse.day <- xpathSApply(HTML.day, "//a[@*]", xmlGetAttr, "href")
parse.day <- parse.day[grep("^gid_*", parse.day)]
# if games exists today
if (length(parse.day) >= 1) {
# for each game
for (game in 1:length(parse.day)) {
# print(game)
# URL.game <- paste(URL.date, parse.day[game], sep = "")
# HTML.game <- htmlParse(URL.game)
# parse.game.exists <- xpathSApply(HTML.game, "//a[@*]", xmlGetAttr, "href")
# if game.xml exists
# if (sum(match(parse.game.exists, "game.xml"), na.rm = T) > 0) {
# grab game type (regular season, etc.) and gameday type (pitch f/x, etc.)
# XML.game <- xmlInternalTreeParse(paste(URL.game, "game.xml", sep = ""))
# parse.game <- sapply(c("type", "gameday_sw"), function (x)
# xpathSApply(XML.game, "//game[@*]", xmlGetAttr, x))
# if proper game type: "R" ~ regular season, "S" ~ spring, "D" ~ divison series
# "L" ~ league chamption series, "W" ~ world series
# if (parse.game['type'] == game.type) {
# grab team names
# parse.teams <- sapply(c("abbrev"), function (x)
# xpathSApply(XML.game, "//team[@*]", xmlGetAttr, x))
# home <- parse.teams[1]; away <- parse.teams[2]
# if pitch f/x data exists
# if (parse.game["gameday_sw"] == "E" | parse.game["gameday_sw"] == "P") {
# grab number of innings played
# HTML.Ninnings <- htmlParse(paste(URL.game, "inning/", sep = ""))
# parse.Ninnings <- xpathSApply(HTML.Ninnings, "//a[@*]", xmlGetAttr, "href")
# check to see if game exists data by checking innings > 1
# if (length(grep("^inning_[0-9]", parse.Ninnings)) > 1) {
# for each inning
# for (inning in 1:length(grep("^inning_[0-9]", parse.Ninnings))) {
# grab inning info
# URL.inning <- paste(URL.game, "inning/", "inning_", inning,
# ".xml", sep = "")
# XML.inning <- xmlInternalTreeParse(URL.inning)
# parse.atbat <- xpathSApply(XML.inning, "//atbat[@*]")
# parse.Npitches.atbat <- sapply(parse.atbat, function(x)
# sum(names(xmlChildren(x)) == "pitch"))
# check to see if atbat exists
# if (length(parse.atbat) > 0) {
# print(paste(parse.day[game], "inning =", inning))
# parse attributes from pitch and atbat (ugh, ugly)
# parse.pitch <- sapply(grab.pitch, function(x)
# as.character(xpathSApply(XML.inning, "//pitch[@*]",
# xmlGetAttr, x)))
# parse.pitch <- if (class(parse.pitch) == "character") {
# t(parse.pitch)
# } else apply(parse.pitch, 2, as.character)
# results.atbat <- t(sapply(parse.atbat, function(x)
# xmlAttrs(x)[grab.atbat]))
# results.atbat <- results.atbat[rep(seq(nrow(results.atbat)),
# times = parse.Npitches.atbat),]
# results.atbat <- if (class(results.atbat) == "character") {
# t(results.atbat)
# } else results.atbat
# write results
write(substr(parse.day[game],1,nchar(parse.day[game])-1), file = fileloc,
ncol = 1, append = T, sep = ">")
}
}
}
}
GameID(fileloc = "./GameID10.txt",start.date="2010-04-05",end.date="2010-10-03")
GameID(fileloc = "./GameID11.txt",start.date="2011-03-31",end.date="2011-09-28")
GameID(fileloc = "./GameID12.txt",start.date="2012-04-12",end.date="2012-10-03")
# a function to retrieve the homeplate umpire for every game with pitchfx data
library(XML)
# DownloadPitchFX <- function(fileloc = "./pitchfx.txt",
Downloadump <- function(fileloc = "./pitchfx.txt",
start.date = "2009-05-02", end.date = start.date,
URL.base = "http://gd2.mlb.com/components/game/mlb/",
game.type = "R",
grab.atbat = c("b", "s", "o", "batter", "pitcher", "b_height",
"stand", "p_throws", "event")) {
# write initial variables on file
# meta <- c("Year", "Month", "Day", "Inning", "Home", "Away")
# write(t(cbind(year, month, day, home, away,hpumpire)),
# write(c(meta, grab.atbat), file = fileloc,
# ncol = length(grab.atbat) + length(meta), sep = ">")
meta <- c("Year", "Month", "Day", "Home", "Away")
write(c(meta,"HPUmpire"), file = fileloc,
ncol = length(meta) + 1, sep = ">")
# transfer date info
start.date <- as.POSIXlt(start.date); end.date <- as.POSIXlt(end.date);
diff.date <- as.numeric(difftime(end.date, start.date))
date.range <- as.POSIXlt(seq(start.date, by = "days",
length = 1 + diff.date))
for (i in 1:(diff.date+1)) {
year <- date.range[i]$year + 1900
month <- date.range[i]$mon + 1
day <- date.range[i]$mday
print(paste("month=",month,"day=",day))
URL.date <- paste(URL.base, "year_", year, "/",
ifelse(month >= 10, "month_", "month_0"), month, "/",
ifelse(day >= 10, "day_", "day_0"), day, "/", sep = "")
HTML.day <- htmlParse(URL.date)
parse.day <- xpathSApply(HTML.day, "//a[@*]", xmlGetAttr, "href")
parse.day <- parse.day[grep("^gid_*", parse.day)]
# if games exists today
if (length(parse.day) >= 1) {
# for each game
for (game in 1:length(parse.day)) {
print(game)
URL.game <- paste(URL.date, parse.day[game], sep = "")
HTML.game <- htmlParse(URL.game)
parse.game.exists <- xpathSApply(HTML.game, "//a[@*]", xmlGetAttr, "href")
# if game.xml exists
if (sum(match(parse.game.exists, "game.xml"), na.rm = T) > 0) {
# grab game type (regular season, etc.) and gameday type (pitch f/x, etc.)
XML.game <- xmlInternalTreeParse(paste(URL.game, "game.xml", sep = ""))
parse.game <- sapply(c("type", "gameday_sw"), function (x)
xpathSApply(XML.game, "//game[@*]", xmlGetAttr, x))
# if proper game type: "R" ~ regular season, "S" ~ spring, "D" ~ divison series
# "L" ~ league chamption series, "W" ~ world series
if (parse.game['type'] == game.type) {
# grab team names
parse.teams <- sapply(c("abbrev"), function (x)
xpathSApply(XML.game, "//team[@*]", xmlGetAttr, x))
home <- parse.teams[1]; away <- parse.teams[2]
# if pitch f/x data exists
if (parse.game["gameday_sw"] == "E" | parse.game["gameday_sw"] == "P") {
# grab number of innings played
HTML.Ninnings <- htmlParse(paste(URL.game, "inning/", sep = ""))
parse.Ninnings <- xpathSApply(HTML.Ninnings, "//a[@*]", xmlGetAttr, "href")
# check to see if game exists data by checking innings > 1
if (length(grep("^inning_[0-9]", parse.Ninnings)) > 1) {
URL.boxscore <-
URL.game <- paste(URL.date,parse.day[game],sep="")
URL.boxscore <- paste(URL.game,"boxscore.xml",sep="")
parsed.boxscore <- xmlTreeParse(URL.boxscore)
game.info <- xmlValue(parsed.boxscore$doc[[1]][[6]])
hpumpire <- sub(pattern=".*?HP: (.*). 1B:.*", replacement="\\1",x=game.info)
}
write(t(cbind(year, month, day, home, away,hpumpire)),
file = fileloc, 1 + length(meta), append = T, sep = ">")
}
}
}
}
}
}
}
Downloadump(fileloc="./umps2010.txt",start.date="2010-04-05",end.date="2010-10-03")
Downloadump(fileloc="umps2011.txt",start.date="2011-03-31",end.date="2011-09-28")
Downloadump(fileloc="umps2012.txt",start.date="2012-04-12",end.date="2012-10-03")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/CDS_ANA.R
\name{CodonCountPos}
\alias{CodonCountPos}
\title{Create a file with Codon count numbers (adjust by position) and a file with Codon usage}
\usage{
CodonCountPos(filenm, CodonPerc, xlsxFS, f, metds = 1)
}
\arguments{
\item{infile}{"DNA sequence file", "Codon Usage Table file", "Result Excel file and "Input file serial number"}
}
\value{
None (Result files with "*_codonSEQ.txt" and "_codonUSTB" and Excel sheets will be created)
}
\description{
Create a file with Codon count numbers (adjust by position) and a file with Codon usage
}
| /man/CodonCountPos.Rd | no_license | shchwang8/CDSAnalysis | R | false | true | 624 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/CDS_ANA.R
\name{CodonCountPos}
\alias{CodonCountPos}
\title{Create a file with Codon count numbers (adjust by position) and a file with Codon usage}
\usage{
CodonCountPos(filenm, CodonPerc, xlsxFS, f, metds = 1)
}
\arguments{
\item{infile}{"DNA sequence file", "Codon Usage Table file", "Result Excel file and "Input file serial number"}
}
\value{
None (Result files with "*_codonSEQ.txt" and "_codonUSTB" and Excel sheets will be created)
}
\description{
Create a file with Codon count numbers (adjust by position) and a file with Codon usage
}
|
## @knitr dm
rusersOrig <- readxl::read_excel(here("data", "R users group list 0527_136PM.xlsx"))
# I had already taken a manual look at the data before doing this
# also when I was building this code I was outputting new datasets in between steps
# so I could see how the sample size changed, etc.
# then I put it all together at the end into one big data step
# %>% is the pipe operator https://r4ds.had.co.nz/pipes.html
rusers <- rusersOrig %>%
# Because of the way Qualtrics data imports, there is an extra row here with the variable labels
# Get rid of it by saying 'keep all rows that have a StartDate NOT EQUAL to "Start Date"
# filter() keeps rows that return TRUE for a given condition(s)
filter(StartDate != "Start Date") %>%
# select() keeps the variables that I want
# you can rename on the fly newVar = oldVar
# the other option is the rename() function
select(morning = Q2_1,
midday = Q2_2,
afternoon = Q2_3,
evening = Q2_4) %>%
# Since I deidentified the data, want to make an ID so I can transpose
# rownames_to_column() takes the sequential rowname and makes an ID var
# Lots of other ways to do this, could make a random number, etc.
# This is really only necessary for the pivoting.
rownames_to_column(var="ID") %>%
# Now I want to pivot to long format for easier data processing
# this takes the 4 time variables and transposes them
# now I only have to work on one column, rather than 4
# start:
# ID morning midday afternoon evening
# end:
# ID morning mon/tues/etc.
# ID midday mon/tues/etc.
# ID afternoon mon/tues/etc.
# ...
pivot_longer(cols=c(morning, midday, afternoon, evening),
names_to = "time",
values_to = "days") %>%
# Keep only the rows that have NON-MISSING data for the 'days' column.
# This exists because some people didn't check any days for a given time
# (i.e., someone who never wanted to meet in the morning)
filter(!is.na(days)) %>%
# mutate() is the wrapper for new variables
# within the mutate() function, you can make lots of variables.
# newVar = thing to make newvar,
# str_detect() is part of the stringr package. It returns a Boolean TRUE/FALSE
# variable that is TRUE if the given string was detected, and FALSE if not
# 5 new variables, each a boolean with info about whether or not that person can meet on that day
mutate(mon = str_detect(days, "Monday"),
tues = str_detect(days, "Tuesday"),
wed = str_detect(days, "Wednesday"),
thurs = str_detect(days, "Thursday"),
fri = str_detect(days, "Friday")) %>%
# select() can also drop variables if you put the - in front
# dropping ID and the original 'days' variable.
# I now have info fromd ays in the mon/tues/wed/thurs/fri variables
select(-ID, -days) %>%
# Another transpose/pivot
# want to keep the 'day' information in one column
# so now the columns of this data will be:
# time day tf
pivot_longer(cols=c(mon, tues, wed, thurs, fri),
names_to = "day",
values_to = "tf") %>%
# only keep the days that are TRUE
# this will allow us to count the total number of TRUEs per time/day
filter(tf==TRUE) %>%
# since we have kept TRUEs only, don't need that var anymore
select(-tf) %>%
# group_by() gets your data ready to calculate numbers by a given grouping
# for every combination of time and day
group_by(time, day, .groups="keep") %>%
# summarise() is used after group by to create summary stats
# you can also use things like mean(), sum(), etc.
# count the number of times that combination occurs using n()
summarise(count = n()) %>%
# get rid of the grouping (other functions won't work if you keep it implicitly grouped)
# our dataset now has one row for every time-day combination
# and a count of how many times that combination occurred
ungroup() %>%
# making factor variables that look nicer for display purposes
# factor() takes a given variable and allows you to define the levels
# and associate a label with each level
mutate(time = factor(time,
levels = c("morning", "midday", "afternoon", "evening"),
labels = c("morning" = "Morning \n 8a-11a",
"midday" = "Midday \n 11a-1p",
"afternoon" = "Afternoon \n 1p-4p",
"evening" = "Evening \n 4p-6p")),
day = factor(day,
levels = c("mon", "tues", "wed", "thurs", "fri"),
labels = c("mon" = "Monday",
"tues" = "Tuesday",
"wed" = "Wednesday",
"thurs" = "Thursday",
"fri" = "Friday")),
# if_else() statement -- pretty standard
gt14 = if_else(condition = count>=14,
true = 1,
false = 0)) %>%
# arrange() arranges data according to vars
# factors will be arranged in their order
# just doing this here to demonstrate
arrange(time, day)
## @knitr graph
# initialize our plot with the data, and what the x and y axes are
ggplot(data=rusers,
aes(x=time,
y=count)) +
# make a bar chart
# this will 'inherit' the aesthetics we defined above
# so, y is the COUNT and x is the TIME
# fill means that the color for each bar is defined by TIME
# alpha is the transparency, since I want to bold the times that were given by >= 14 people
# have to use factor() because ggplot needs to know that there are 2 levels to this variable
# that's how it will assign transparencies
# you'll see a little warning from R on using transparencies for a discrete variable
# I don't get why it has a problem with this. I think it makes sense!
geom_bar(aes(fill=time, alpha=factor(gt14)),
stat="identity") +
# this sets the alpha to be within a given range
# since there are two values, I think one will be at the low end and one at the high end
# can tweak this to make it look how you want
# am not as familiar with alpha
# also lots of ggplot is tweaking until it looks right
# just beware that the plot window in RStudio might look different than when you output to png or pdf
scale_alpha_discrete(range=c(0.3, 1)) +
# change colors to be the viridis scale (regular ggplot colors are yucky)
# the _d in the command means for discrete variables
# direction means that I want to reverse the default ordering of the colors
scale_fill_viridis_d(direction=-1) +
# create this barplot separately for each DAY
# display them in one row
facet_wrap(~day,
nrow=1) +
# get rid of the usual ggplot theme
# https://ggplot2.tidyverse.org/reference/ggtheme.html
theme_minimal() +
# tweak lots of the little theme options
# there are almost endless options here
# lots of trial and error
# help page for ?theme
theme(legend.position = "bottom", # legend at bottom
legend.title = element_blank(), # remove legend title
axis.text.x = element_blank(), # remove axis text (labeled times -- pretty intuitive based on legend)
axis.title.x = element_blank(), # remove x axis title ('time')
axis.title.y = element_blank(), # remove y axis title ('count')
strip.text = element_text(size=12), # change the font size for the facet titles - Monday, etc.
plot.title = element_text(size=16), # change the font size for the plot title
plot.subtitle = element_text(size=10), # change the font size for the subtitle
legend.text = element_text(size=10), # change the font size in the legend
legend.spacing.x = unit(0.7, 'cm')) + # space out the legend so it can handle the increased font size and increased square sizes
# I don't want a legend (guide) for the transparency - will put this in a subtitle
# making the square boxes of the color legend larger
guides(alpha=FALSE,
fill = guide_legend(override.aes = list(size = 12))) +
# add some informative labels
labs(title = "Meeting times for R group",
subtitle = "Times with at least 14 positive responses are bolded")
| /code/01_r-users-scheduling.R | no_license | nataliesmith123/Rlearners | R | false | false | 8,386 | r |
## @knitr dm
rusersOrig <- readxl::read_excel(here("data", "R users group list 0527_136PM.xlsx"))
# I had already taken a manual look at the data before doing this
# also when I was building this code I was outputting new datasets in between steps
# so I could see how the sample size changed, etc.
# then I put it all together at the end into one big data step
# %>% is the pipe operator https://r4ds.had.co.nz/pipes.html
rusers <- rusersOrig %>%
# Because of the way Qualtrics data imports, there is an extra row here with the variable labels
# Get rid of it by saying 'keep all rows that have a StartDate NOT EQUAL to "Start Date"
# filter() keeps rows that return TRUE for a given condition(s)
filter(StartDate != "Start Date") %>%
# select() keeps the variables that I want
# you can rename on the fly newVar = oldVar
# the other option is the rename() function
select(morning = Q2_1,
midday = Q2_2,
afternoon = Q2_3,
evening = Q2_4) %>%
# Since I deidentified the data, want to make an ID so I can transpose
# rownames_to_column() takes the sequential rowname and makes an ID var
# Lots of other ways to do this, could make a random number, etc.
# This is really only necessary for the pivoting.
rownames_to_column(var="ID") %>%
# Now I want to pivot to long format for easier data processing
# this takes the 4 time variables and transposes them
# now I only have to work on one column, rather than 4
# start:
# ID morning midday afternoon evening
# end:
# ID morning mon/tues/etc.
# ID midday mon/tues/etc.
# ID afternoon mon/tues/etc.
# ...
pivot_longer(cols=c(morning, midday, afternoon, evening),
names_to = "time",
values_to = "days") %>%
# Keep only the rows that have NON-MISSING data for the 'days' column.
# This exists because some people didn't check any days for a given time
# (i.e., someone who never wanted to meet in the morning)
filter(!is.na(days)) %>%
# mutate() is the wrapper for new variables
# within the mutate() function, you can make lots of variables.
# newVar = thing to make newvar,
# str_detect() is part of the stringr package. It returns a Boolean TRUE/FALSE
# variable that is TRUE if the given string was detected, and FALSE if not
# 5 new variables, each a boolean with info about whether or not that person can meet on that day
mutate(mon = str_detect(days, "Monday"),
tues = str_detect(days, "Tuesday"),
wed = str_detect(days, "Wednesday"),
thurs = str_detect(days, "Thursday"),
fri = str_detect(days, "Friday")) %>%
# select() can also drop variables if you put the - in front
# dropping ID and the original 'days' variable.
# I now have info fromd ays in the mon/tues/wed/thurs/fri variables
select(-ID, -days) %>%
# Another transpose/pivot
# want to keep the 'day' information in one column
# so now the columns of this data will be:
# time day tf
pivot_longer(cols=c(mon, tues, wed, thurs, fri),
names_to = "day",
values_to = "tf") %>%
# only keep the days that are TRUE
# this will allow us to count the total number of TRUEs per time/day
filter(tf==TRUE) %>%
# since we have kept TRUEs only, don't need that var anymore
select(-tf) %>%
# group_by() gets your data ready to calculate numbers by a given grouping
# for every combination of time and day
group_by(time, day, .groups="keep") %>%
# summarise() is used after group by to create summary stats
# you can also use things like mean(), sum(), etc.
# count the number of times that combination occurs using n()
summarise(count = n()) %>%
# get rid of the grouping (other functions won't work if you keep it implicitly grouped)
# our dataset now has one row for every time-day combination
# and a count of how many times that combination occurred
ungroup() %>%
# making factor variables that look nicer for display purposes
# factor() takes a given variable and allows you to define the levels
# and associate a label with each level
mutate(time = factor(time,
levels = c("morning", "midday", "afternoon", "evening"),
labels = c("morning" = "Morning \n 8a-11a",
"midday" = "Midday \n 11a-1p",
"afternoon" = "Afternoon \n 1p-4p",
"evening" = "Evening \n 4p-6p")),
day = factor(day,
levels = c("mon", "tues", "wed", "thurs", "fri"),
labels = c("mon" = "Monday",
"tues" = "Tuesday",
"wed" = "Wednesday",
"thurs" = "Thursday",
"fri" = "Friday")),
# if_else() statement -- pretty standard
gt14 = if_else(condition = count>=14,
true = 1,
false = 0)) %>%
# arrange() arranges data according to vars
# factors will be arranged in their order
# just doing this here to demonstrate
arrange(time, day)
## @knitr graph
# initialize our plot with the data, and what the x and y axes are
ggplot(data=rusers,
aes(x=time,
y=count)) +
# make a bar chart
# this will 'inherit' the aesthetics we defined above
# so, y is the COUNT and x is the TIME
# fill means that the color for each bar is defined by TIME
# alpha is the transparency, since I want to bold the times that were given by >= 14 people
# have to use factor() because ggplot needs to know that there are 2 levels to this variable
# that's how it will assign transparencies
# you'll see a little warning from R on using transparencies for a discrete variable
# I don't get why it has a problem with this. I think it makes sense!
geom_bar(aes(fill=time, alpha=factor(gt14)),
stat="identity") +
# this sets the alpha to be within a given range
# since there are two values, I think one will be at the low end and one at the high end
# can tweak this to make it look how you want
# am not as familiar with alpha
# also lots of ggplot is tweaking until it looks right
# just beware that the plot window in RStudio might look different than when you output to png or pdf
scale_alpha_discrete(range=c(0.3, 1)) +
# change colors to be the viridis scale (regular ggplot colors are yucky)
# the _d in the command means for discrete variables
# direction means that I want to reverse the default ordering of the colors
scale_fill_viridis_d(direction=-1) +
# create this barplot separately for each DAY
# display them in one row
facet_wrap(~day,
nrow=1) +
# get rid of the usual ggplot theme
# https://ggplot2.tidyverse.org/reference/ggtheme.html
theme_minimal() +
# tweak lots of the little theme options
# there are almost endless options here
# lots of trial and error
# help page for ?theme
theme(legend.position = "bottom", # legend at bottom
legend.title = element_blank(), # remove legend title
axis.text.x = element_blank(), # remove axis text (labeled times -- pretty intuitive based on legend)
axis.title.x = element_blank(), # remove x axis title ('time')
axis.title.y = element_blank(), # remove y axis title ('count')
strip.text = element_text(size=12), # change the font size for the facet titles - Monday, etc.
plot.title = element_text(size=16), # change the font size for the plot title
plot.subtitle = element_text(size=10), # change the font size for the subtitle
legend.text = element_text(size=10), # change the font size in the legend
legend.spacing.x = unit(0.7, 'cm')) + # space out the legend so it can handle the increased font size and increased square sizes
# I don't want a legend (guide) for the transparency - will put this in a subtitle
# making the square boxes of the color legend larger
guides(alpha=FALSE,
fill = guide_legend(override.aes = list(size = 12))) +
# add some informative labels
labs(title = "Meeting times for R group",
subtitle = "Times with at least 14 positive responses are bolded")
|
\name{importanceSampling}
\alias{importanceSampling}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Generate Objects of Class "\code{\linkS4class{importanceSampling}}"
}
\description{
This generic has five methods, they are used to apply importance
(sub) sampling to an individual \code{"Stem"} object, or
collections of \code{"Stem"} objects. See
\code{\link{importanceSampling-methods}} for details.
}
\usage{
importanceSampling(object, ...)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{object}{ This is the signature argument, see the
\code{\link{importanceSampling-methods}} for possible values. }
\item{\dots}{ Arguments that can be passed along to the proxy function.}
}
\details{ Briefly, with importance sampling for bole volume (or some
segment of the bole) one uses a proxy taper function from which to draw
samples and thereby concentrate the samples in the lower portion of the
bole, where there is more volume and measurements are easier. The
diferent built-in proxy functions and their use are detailed in the
vignette cited below. In addition, one can supply one's own proxy
function if desired. }
\value{ A valid object of class
"\code{\linkS4class{importanceSampling}}" or
"\code{\linkS4class{mcsContainer}}", depending on which method was used.
}
\references{
\describe{
%
\item{}{Gove, J. H. 2013. Monte Carlo sampling methods in
\pkg{sampSurf}. Package vignette.}
}
}
\author{
Jeffrey H. Gove %, \email{jhgove@unh.edu}
}
\seealso{
See \code{\link{importanceSampling-methods}} for methods.
Other similar generics for Monte Carlo methods include:
\code{\link{crudeMonteCarlo}},
\code{\link{controlVariate}},
\code{\link{antitheticSampling}}.}
\examples{
#
# estimate volume between 10 and 15 m, using 5 random heights...
#
sTree = standingTree(dbh = 40, topDiam = 0, height = 20, solidType = 2.8)
sTree.is = importanceSampling(sTree, n.s = 5, segBnds = c(10,15), startSeed = 114,
proxy = 'wbProxy', solidTypeProxy = 2.5)
sTree.is
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
%% \keyword{ ~kwd1 }
%% \keyword{ ~kwd2 }% __ONLY ONE__ keyword per line
| /man/importanceSampling.Rd | no_license | cran/sampSurf | R | false | false | 2,227 | rd | \name{importanceSampling}
\alias{importanceSampling}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Generate Objects of Class "\code{\linkS4class{importanceSampling}}"
}
\description{
This generic has five methods, they are used to apply importance
(sub) sampling to an individual \code{"Stem"} object, or
collections of \code{"Stem"} objects. See
\code{\link{importanceSampling-methods}} for details.
}
\usage{
importanceSampling(object, ...)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{object}{ This is the signature argument, see the
\code{\link{importanceSampling-methods}} for possible values. }
\item{\dots}{ Arguments that can be passed along to the proxy function.}
}
\details{ Briefly, with importance sampling for bole volume (or some
segment of the bole) one uses a proxy taper function from which to draw
samples and thereby concentrate the samples in the lower portion of the
bole, where there is more volume and measurements are easier. The
diferent built-in proxy functions and their use are detailed in the
vignette cited below. In addition, one can supply one's own proxy
function if desired. }
\value{ A valid object of class
"\code{\linkS4class{importanceSampling}}" or
"\code{\linkS4class{mcsContainer}}", depending on which method was used.
}
\references{
\describe{
%
\item{}{Gove, J. H. 2013. Monte Carlo sampling methods in
\pkg{sampSurf}. Package vignette.}
}
}
\author{
Jeffrey H. Gove %, \email{jhgove@unh.edu}
}
\seealso{
See \code{\link{importanceSampling-methods}} for methods.
Other similar generics for Monte Carlo methods include:
\code{\link{crudeMonteCarlo}},
\code{\link{controlVariate}},
\code{\link{antitheticSampling}}.}
\examples{
#
# estimate volume between 10 and 15 m, using 5 random heights...
#
sTree = standingTree(dbh = 40, topDiam = 0, height = 20, solidType = 2.8)
sTree.is = importanceSampling(sTree, n.s = 5, segBnds = c(10,15), startSeed = 114,
proxy = 'wbProxy', solidTypeProxy = 2.5)
sTree.is
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
%% \keyword{ ~kwd1 }
%% \keyword{ ~kwd2 }% __ONLY ONE__ keyword per line
|
library(CensSpatial)
### Name: localinfmeas
### Title: Local influence measures.
### Aliases: localinfmeas
### Keywords: Spatial Censored SAEM
### ** Examples
## Not run:
##D require(geoR)
##D
##D data("Missouri")
##D data=Missouri
##D data$V3=log((data$V3))
##D cc=data$V5
##D y=data$V3
##D n=127
##D k=1
##D datare1=data
##D coords=datare1[,1:2]
##D data1=data.frame(coords,y)
##D data1=data1[cc==0,]
##D geodata=as.geodata(data1,y.col=3,coords.col=1:2)
##D v=variog(geodata)
##D v1=variofit(v)
##D cov.ini=c(0,2)
##D est=SAEMSCL(cc,y,cens.type="left",trend="cte",coords=coords,M=15,perc=0.25,
##D MaxIter=5,pc=0.2,cov.model="exponential",fix.nugget=T,nugget=2,
##D inits.sigmae=cov.ini[2],inits.phi=cov.ini[1], search=T,lower=0.00001,upper=100)
##D
##D
##D w=localinfmeas(est,fix.nugget=T,c=3)
##D
##D res=w$respper
##D res[res[,1]=="atypical obs",]
##D
##D sm=w$smper
##D sm[sm[,1]=="atypical obs",]
##D
##D ev=w$expvper
##D ev[ev[,1]=="atypical obs",]
##D
##D
##D ##############ANOTHER EXAMPLE#########
##D
##D n<-200 ### sample size for estimation
##D n1=100 ### number of observation used in the prediction
##D
##D ###simulated coordinates
##D r1=sample(seq(1,30,length=400),n+n1)
##D r2=sample(seq(1,30,length=400),n+n1)
##D coords=cbind(r1,r2)
##D
##D coords1=coords[1:n,]
##D
##D cov.ini=c(0.2,0.1)
##D type="exponential"
##D xtot=as.matrix(rep(1,(n+n1)))
##D xobs=xtot[1:n,]
##D beta=5
##D
##D ###simulated data
##D obj=rspacens(cov.pars=c(3,.3,0),beta=beta,x=xtot,coords=coords,cens=0.25,n=(n+n1),
##D n1=n1,cov.model=type,cens.type="left")
##D
##D data2=obj$datare
##D cc=obj$cc
##D y=obj$datare[,3]
##D
##D ##### generating atypical observations###
##D y[91]=y[91]+4
##D y[126]=y[126]+4
##D y[162]=y[162]+4
##D coords=obj$datare[,1:2]
##D
##D ###initial values###
##D cov.ini=c(0.2,0.1)
##D
##D est=SAEMSCL(cc,y,cens.type="left",trend="cte",coords=coords,M=15,perc=0.25,
##D MaxIter=10,pc=0.2,cov.model=type,fix.nugget=T,nugget=0,inits.sigmae=cov.ini[1],
##D inits.phi=cov.ini[2],search=T,lower=0.00001,upper=50)
##D
##D
##D w=localinfmeas(est,fix.nugget=T,c=3)
##D
##D res=w$respper
##D res[res[,1]=="atypical obs",]
##D
##D sm=w$smper
##D sm[sm[,1]=="atypical obs",]
##D
##D ev=w$expvper
##D ev[ev[,1]=="atypical obs",]
##D
## End(Not run)
| /data/genthat_extracted_code/CensSpatial/examples/localinfmeas.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 2,287 | r | library(CensSpatial)
### Name: localinfmeas
### Title: Local influence measures.
### Aliases: localinfmeas
### Keywords: Spatial Censored SAEM
### ** Examples
## Not run:
##D require(geoR)
##D
##D data("Missouri")
##D data=Missouri
##D data$V3=log((data$V3))
##D cc=data$V5
##D y=data$V3
##D n=127
##D k=1
##D datare1=data
##D coords=datare1[,1:2]
##D data1=data.frame(coords,y)
##D data1=data1[cc==0,]
##D geodata=as.geodata(data1,y.col=3,coords.col=1:2)
##D v=variog(geodata)
##D v1=variofit(v)
##D cov.ini=c(0,2)
##D est=SAEMSCL(cc,y,cens.type="left",trend="cte",coords=coords,M=15,perc=0.25,
##D MaxIter=5,pc=0.2,cov.model="exponential",fix.nugget=T,nugget=2,
##D inits.sigmae=cov.ini[2],inits.phi=cov.ini[1], search=T,lower=0.00001,upper=100)
##D
##D
##D w=localinfmeas(est,fix.nugget=T,c=3)
##D
##D res=w$respper
##D res[res[,1]=="atypical obs",]
##D
##D sm=w$smper
##D sm[sm[,1]=="atypical obs",]
##D
##D ev=w$expvper
##D ev[ev[,1]=="atypical obs",]
##D
##D
##D ##############ANOTHER EXAMPLE#########
##D
##D n<-200 ### sample size for estimation
##D n1=100 ### number of observation used in the prediction
##D
##D ###simulated coordinates
##D r1=sample(seq(1,30,length=400),n+n1)
##D r2=sample(seq(1,30,length=400),n+n1)
##D coords=cbind(r1,r2)
##D
##D coords1=coords[1:n,]
##D
##D cov.ini=c(0.2,0.1)
##D type="exponential"
##D xtot=as.matrix(rep(1,(n+n1)))
##D xobs=xtot[1:n,]
##D beta=5
##D
##D ###simulated data
##D obj=rspacens(cov.pars=c(3,.3,0),beta=beta,x=xtot,coords=coords,cens=0.25,n=(n+n1),
##D n1=n1,cov.model=type,cens.type="left")
##D
##D data2=obj$datare
##D cc=obj$cc
##D y=obj$datare[,3]
##D
##D ##### generating atypical observations###
##D y[91]=y[91]+4
##D y[126]=y[126]+4
##D y[162]=y[162]+4
##D coords=obj$datare[,1:2]
##D
##D ###initial values###
##D cov.ini=c(0.2,0.1)
##D
##D est=SAEMSCL(cc,y,cens.type="left",trend="cte",coords=coords,M=15,perc=0.25,
##D MaxIter=10,pc=0.2,cov.model=type,fix.nugget=T,nugget=0,inits.sigmae=cov.ini[1],
##D inits.phi=cov.ini[2],search=T,lower=0.00001,upper=50)
##D
##D
##D w=localinfmeas(est,fix.nugget=T,c=3)
##D
##D res=w$respper
##D res[res[,1]=="atypical obs",]
##D
##D sm=w$smper
##D sm[sm[,1]=="atypical obs",]
##D
##D ev=w$expvper
##D ev[ev[,1]=="atypical obs",]
##D
## End(Not run)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/calc_z_score.R
\name{calc_z_score}
\alias{calc_z_score}
\title{Calculate z-score and p-values}
\usage{
calc_z_score(
x,
mu,
sigma,
n = 1,
sig_level = NULL,
p_val = FALSE,
alternative = 3,
shade = FALSE,
digits = 4
)
}
\arguments{
\item{x}{mean or value for which a z-score or probability of observing is to be calculated.}
\item{mu}{mean of the distribution.}
\item{sigma}{standard deviation of the population.}
\item{n}{sample size if x is a mean. n = 1 by default for the case where x is a single value.}
\item{sig_level}{the significance level for a hypothesis test.}
\item{p_val}{a logical for if a p-value should be provided.}
\item{alternative}{takes 3 values indicating the direction of the alternative hypothesis, or the direction of the probability to be calculated. The value of this does not matter if p_val is FALSE. The options for this input are 1 = "greater than", 2 = "less than", and 3 = "not equal to".}
\item{shade}{a logical for if a shaded density curve should be provided.}
\item{digits}{an integer in for the number of decimal places to which the answer should be rounded. Note: positive numbers refer to the number of digits after the decimal place, negative numbers refer to the number of digits before the decimal place, and 0 refers to the nearest whole number.}
}
\value{
a statement laying out the values imputed, the subsequent z-score, p-value, and an appropriate conclusion (if p_val = TRUE, and sig_level has a value).
}
\description{
Calculates z-score and p-values for hypothesis test and z-score questions. This function is targeted to help introductory level students and professors to use easily answer questions and learn R, without having to understand for complicated functions like pnorm and dnorm.
}
\examples{
calc_z_score(x = 15, mu = 10, sigma = 5, p_val = TRUE, sig_level = 0.05,
alternative = 3)
calc_z_score(x = 100, mu = 90, sigma = 5)
}
| /man/calc_z_score.Rd | permissive | kcon13/introTeach | R | false | true | 1,998 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/calc_z_score.R
\name{calc_z_score}
\alias{calc_z_score}
\title{Calculate z-score and p-values}
\usage{
calc_z_score(
x,
mu,
sigma,
n = 1,
sig_level = NULL,
p_val = FALSE,
alternative = 3,
shade = FALSE,
digits = 4
)
}
\arguments{
\item{x}{mean or value for which a z-score or probability of observing is to be calculated.}
\item{mu}{mean of the distribution.}
\item{sigma}{standard deviation of the population.}
\item{n}{sample size if x is a mean. n = 1 by default for the case where x is a single value.}
\item{sig_level}{the significance level for a hypothesis test.}
\item{p_val}{a logical for if a p-value should be provided.}
\item{alternative}{takes 3 values indicating the direction of the alternative hypothesis, or the direction of the probability to be calculated. The value of this does not matter if p_val is FALSE. The options for this input are 1 = "greater than", 2 = "less than", and 3 = "not equal to".}
\item{shade}{a logical for if a shaded density curve should be provided.}
\item{digits}{an integer in for the number of decimal places to which the answer should be rounded. Note: positive numbers refer to the number of digits after the decimal place, negative numbers refer to the number of digits before the decimal place, and 0 refers to the nearest whole number.}
}
\value{
a statement laying out the values imputed, the subsequent z-score, p-value, and an appropriate conclusion (if p_val = TRUE, and sig_level has a value).
}
\description{
Calculates z-score and p-values for hypothesis test and z-score questions. This function is targeted to help introductory level students and professors to use easily answer questions and learn R, without having to understand for complicated functions like pnorm and dnorm.
}
\examples{
calc_z_score(x = 15, mu = 10, sigma = 5, p_val = TRUE, sig_level = 0.05,
alternative = 3)
calc_z_score(x = 100, mu = 90, sigma = 5)
}
|
library(auRoc)
### Name: auc.para.frequentist
### Title: AUC by Frequentist Parametric Methods
### Aliases: auc.para.frequentist
### Keywords: htest
### ** Examples
#Example 1
data(petBrainGlioma)
y <- subset(petBrainGlioma, grade==1, select="FDG", drop=TRUE)
x <- subset(petBrainGlioma, grade==2, select="FDG", drop=TRUE)
auc.para.frequentist(x, y, dist="exp")
#Example 2
data(petBrainGlioma)
y <- subset(petBrainGlioma, grade==1, select="ACE", drop=TRUE)
x <- subset(petBrainGlioma, grade==2, select="ACE", drop=TRUE)
auc.para.frequentist(x, y, method="RG1")
| /data/genthat_extracted_code/auRoc/examples/auc.para.frequentist.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 599 | r | library(auRoc)
### Name: auc.para.frequentist
### Title: AUC by Frequentist Parametric Methods
### Aliases: auc.para.frequentist
### Keywords: htest
### ** Examples
#Example 1
data(petBrainGlioma)
y <- subset(petBrainGlioma, grade==1, select="FDG", drop=TRUE)
x <- subset(petBrainGlioma, grade==2, select="FDG", drop=TRUE)
auc.para.frequentist(x, y, dist="exp")
#Example 2
data(petBrainGlioma)
y <- subset(petBrainGlioma, grade==1, select="ACE", drop=TRUE)
x <- subset(petBrainGlioma, grade==2, select="ACE", drop=TRUE)
auc.para.frequentist(x, y, method="RG1")
|
\name{QCAGUI-internal}
\alias{.onAttach}
\alias{asNumeric}
\alias{base3rows}
\alias{createChart}
\alias{createString}
\alias{deMorganLoop}
\alias{drawVenn}
\alias{eqmccLoop}
\alias{factor.function}
\alias{fuzzyand}
\alias{fuzzyor}
\alias{getBigList}
\alias{getNonChars}
\alias{getSolution}
\alias{insideBrackets}
\alias{negateValues}
\alias{outsideBrackets}
\alias{possibleNumeric}
\alias{prettyString}
\alias{prettyTable}
\alias{print.aE}
\alias{print.deMorgan}
\alias{print.fctr}
\alias{print.pic}
\alias{print.pof}
\alias{print.qca}
\alias{print.mqca}
\alias{print.sS}
\alias{print.tt}
\alias{removeDuplicates}
\alias{removeSingleStars}
\alias{rowDominance}
\alias{simplifyList}
\alias{solveBrackets}
\alias{sortVector}
\alias{sortMatrix}
\alias{splitstr}
\alias{splitBrackets}
\alias{splitMainComponents}
\alias{splitPluses}
\alias{splitStars}
\alias{splitTildas}
\alias{trimst}
\alias{uninstall}
\alias{verify.data}
\alias{verify.dir.exp}
\alias{verify.eqmcc}
\alias{verify.inf.test}
\alias{verify.qca}
\alias{verify.mqca}
\alias{verify.tt}
\alias{writePrimeimp}
\alias{writeSolution}
\alias{ib}
\title{Internal Functions}
\description{
The above functions are internal in the QCAGUI package which are not designed to
be called directly by the user. All of them are used by the \code{eqmcc} function,
except \code{sortMatrix} which is used by \code{allExpressions}. The verification
and error messages have been moved to the internal functions \code{verify.data}
and \code{verify.tt}.
}
\keyword{internal} | /man/QCAGUI-internal.Rd | no_license | AngelOfMusic/QCAGUI | R | false | false | 1,599 | rd | \name{QCAGUI-internal}
\alias{.onAttach}
\alias{asNumeric}
\alias{base3rows}
\alias{createChart}
\alias{createString}
\alias{deMorganLoop}
\alias{drawVenn}
\alias{eqmccLoop}
\alias{factor.function}
\alias{fuzzyand}
\alias{fuzzyor}
\alias{getBigList}
\alias{getNonChars}
\alias{getSolution}
\alias{insideBrackets}
\alias{negateValues}
\alias{outsideBrackets}
\alias{possibleNumeric}
\alias{prettyString}
\alias{prettyTable}
\alias{print.aE}
\alias{print.deMorgan}
\alias{print.fctr}
\alias{print.pic}
\alias{print.pof}
\alias{print.qca}
\alias{print.mqca}
\alias{print.sS}
\alias{print.tt}
\alias{removeDuplicates}
\alias{removeSingleStars}
\alias{rowDominance}
\alias{simplifyList}
\alias{solveBrackets}
\alias{sortVector}
\alias{sortMatrix}
\alias{splitstr}
\alias{splitBrackets}
\alias{splitMainComponents}
\alias{splitPluses}
\alias{splitStars}
\alias{splitTildas}
\alias{trimst}
\alias{uninstall}
\alias{verify.data}
\alias{verify.dir.exp}
\alias{verify.eqmcc}
\alias{verify.inf.test}
\alias{verify.qca}
\alias{verify.mqca}
\alias{verify.tt}
\alias{writePrimeimp}
\alias{writeSolution}
\alias{ib}
\title{Internal Functions}
\description{
The above functions are internal in the QCAGUI package which are not designed to
be called directly by the user. All of them are used by the \code{eqmcc} function,
except \code{sortMatrix} which is used by \code{allExpressions}. The verification
and error messages have been moved to the internal functions \code{verify.data}
and \code{verify.tt}.
}
\keyword{internal} |
\name{print.FHtestrcc}
\alias{print.FHtestrcc}
\title{Printing method for \code{FHtestrcc} object.}
\description{Printing method for \code{FHtestrcc} object.}
\usage{
\method{print}{FHtestrcc}(x, digits = max(options()$digits - 4, 3), ...)
}
\arguments{
\item{x}{An object of type \code{FHtestrcc}.}
\item{digits}{Number of digits for printing.}
\item{\dots}{Additional arguments.}
}
%\details{
%}
\author{R. Oller and K. Langohr}
%\seealso{
%}
%\examples{
%}
\keyword{internal}
| /man/print.FHtestrcc.Rd | no_license | cran/FHtest | R | false | false | 509 | rd | \name{print.FHtestrcc}
\alias{print.FHtestrcc}
\title{Printing method for \code{FHtestrcc} object.}
\description{Printing method for \code{FHtestrcc} object.}
\usage{
\method{print}{FHtestrcc}(x, digits = max(options()$digits - 4, 3), ...)
}
\arguments{
\item{x}{An object of type \code{FHtestrcc}.}
\item{digits}{Number of digits for printing.}
\item{\dots}{Additional arguments.}
}
%\details{
%}
\author{R. Oller and K. Langohr}
%\seealso{
%}
%\examples{
%}
\keyword{internal}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/adult_coefs.R
\docType{data}
\name{adult_enroll_dur}
\alias{adult_enroll_dur}
\title{Adult Enrollment Duration Factors - Table 9}
\format{
An object of class \code{tbl_df} (inherits from \code{tbl}, \code{data.frame}) with
11 rows and 7 columns
\describe{
\item{used}{Whether or not the category is used in the RA model}
\item{plat, gold, silver, bronze, cat}{Enrollment duration risk score for
each metal level}
\item{months}{Number of enrollment months for each risk score}
}
}
\source{
Data import and cleaning at:
\url{https://github.com/EeethB/edgedata/tree/main/data-raw}
}
\usage{
adult_enroll_dur
}
\description{
A dataset containing the adult model enrollment duration factors for the
risk adjustment model. Risk score is increased for members with less than
12 months' duration due to risk that is present but not accounted for in
diagnoses and prescriptions during the enrollment window.
}
\seealso{
Other factors:
\code{\link{adult_demo}},
\code{\link{adult_group}},
\code{\link{adult_hcc}},
\code{\link{adult_interaction}},
\code{\link{adult_rxc_hcc_inter}},
\code{\link{adult_rxc}},
\code{\link{child_demo}},
\code{\link{child_group}},
\code{\link{child_hcc}},
\code{\link{infant_demo}},
\code{\link{infant_mat_sev}}
}
\concept{factors}
\keyword{datasets}
| /man/adult_enroll_dur.Rd | no_license | cran/edgedata | R | false | true | 1,394 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/adult_coefs.R
\docType{data}
\name{adult_enroll_dur}
\alias{adult_enroll_dur}
\title{Adult Enrollment Duration Factors - Table 9}
\format{
An object of class \code{tbl_df} (inherits from \code{tbl}, \code{data.frame}) with
11 rows and 7 columns
\describe{
\item{used}{Whether or not the category is used in the RA model}
\item{plat, gold, silver, bronze, cat}{Enrollment duration risk score for
each metal level}
\item{months}{Number of enrollment months for each risk score}
}
}
\source{
Data import and cleaning at:
\url{https://github.com/EeethB/edgedata/tree/main/data-raw}
}
\usage{
adult_enroll_dur
}
\description{
A dataset containing the adult model enrollment duration factors for the
risk adjustment model. Risk score is increased for members with less than
12 months' duration due to risk that is present but not accounted for in
diagnoses and prescriptions during the enrollment window.
}
\seealso{
Other factors:
\code{\link{adult_demo}},
\code{\link{adult_group}},
\code{\link{adult_hcc}},
\code{\link{adult_interaction}},
\code{\link{adult_rxc_hcc_inter}},
\code{\link{adult_rxc}},
\code{\link{child_demo}},
\code{\link{child_group}},
\code{\link{child_hcc}},
\code{\link{infant_demo}},
\code{\link{infant_mat_sev}}
}
\concept{factors}
\keyword{datasets}
|
svds_real_gen <- function(A, k, nu, nv, opts, mattype, extra_args = list())
{
if (mattype == "function")
{
m = as.integer(extra_args$dim[1])
n = as.integer(extra_args$dim[2])
} else {
m = nrow(A)
n = ncol(A)
}
wd = min(m, n)
# Check for matrices that are too small
if (wd < 3)
stop("nrow(A) and ncol(A) should be at least 3")
# If all singular values are requested, call svd() instead,
# and give a warning
if (k == wd)
{
warning("all singular values are requested, svd() is used instead")
return(c(svd(A, nu = nu, nv = nv),
nconv = wd, niter = 0))
}
# Matrix will be passed to C++, so we need to check the type.
# Convert the matrix type if A is stored other than double.
#
# However, for sparse matrices defined in Matrix package,
# they are always double, so we can omit this check.
if (mattype == "matrix" & typeof(A) != "double")
{
mode(A) = "double"
}
# Check the value of 'k'
if (k <= 0 | k >= wd)
stop("'k' must satisfy 0 < k < min(nrow(A), ncol(A)).\nTo calculate all singular values, try svd()")
# Check the values of 'nu' and 'nv'
if (nu < 0 | nv < 0 | nu > k | nv > k)
stop("'nu' and 'nv' must satisfy 0 <= nu <= k and 0 <= nv <= k")
# Arguments to be passed to Spectra
spectra.param = list(ncv = min(wd, max(2 * k + 1, 20)),
tol = 1e-10,
maxitr = 1000,
center = FALSE,
scale = FALSE)
# By default center = FALSE and scale = FALSE
ctr = rep(0, n)
scl = rep(1, n)
# Update ctr and scl from opts
# 1. If `center == TRUE`, then the centering vector is the column mean of A
# 2. If `center` is a vector, then use this vector to center A
# 3. In other cases, do not center A
if (isTRUE(opts$center))
{
ctr = colMeans(A)
} else if (is.numeric(opts$center)) {
if (length(opts$center) != n)
stop("opts$center must be TRUE/FALSE or a vector of length n")
ctr = as.numeric(opts$center)
opts$center = TRUE
} else {
opts$center = FALSE
}
# Scaling is always applied to vectors **after centering**
# 4. If `scale == TRUE`, then the scaling vector consists of the norms of column
# vectors of A **after centering**
# 5. If `scale` is a vector, then use this vector to scale A
# 6. In other cases, do not scale A
if (isTRUE(opts$scale))
{
sumx = colSums(A)
sumxx = colSums(A^2)
scl = sqrt(sumxx - 2 * sumx * ctr + m * ctr^2)
} else if (is.numeric(opts$scale)) {
if (length(opts$scale) != n)
stop("opts$scale must be TRUE/FALSE or a vector of length n")
scl = as.numeric(opts$scale)
opts$scale = TRUE
} else {
opts$scale = FALSE
}
# Update parameters from 'opts' argument
spectra.param[names(opts)] = opts
# Any other arguments passed to C++ code
spectra.param = c(spectra.param, as.list(extra_args),
list(ctr_vec = ctr, scl_vec = scl))
# Check the value of 'ncv'
if (spectra.param$ncv <= k | spectra.param$ncv > wd)
stop("'opts$ncv' must be > k and <= min(nrow(A), ncol(A))")
# Call the C++ function
res = .Call("svds_gen",
A,
as.integer(m), as.integer(n),
as.integer(k), as.integer(nu), as.integer(nv),
as.list(spectra.param),
as.integer(MAT_TYPE[mattype]),
PACKAGE = "RSpectra")
return(res)
}
| /R/40_svds_real_gen.R | no_license | yixuan/RSpectra | R | false | false | 3,678 | r | svds_real_gen <- function(A, k, nu, nv, opts, mattype, extra_args = list())
{
if (mattype == "function")
{
m = as.integer(extra_args$dim[1])
n = as.integer(extra_args$dim[2])
} else {
m = nrow(A)
n = ncol(A)
}
wd = min(m, n)
# Check for matrices that are too small
if (wd < 3)
stop("nrow(A) and ncol(A) should be at least 3")
# If all singular values are requested, call svd() instead,
# and give a warning
if (k == wd)
{
warning("all singular values are requested, svd() is used instead")
return(c(svd(A, nu = nu, nv = nv),
nconv = wd, niter = 0))
}
# Matrix will be passed to C++, so we need to check the type.
# Convert the matrix type if A is stored other than double.
#
# However, for sparse matrices defined in Matrix package,
# they are always double, so we can omit this check.
if (mattype == "matrix" & typeof(A) != "double")
{
mode(A) = "double"
}
# Check the value of 'k'
if (k <= 0 | k >= wd)
stop("'k' must satisfy 0 < k < min(nrow(A), ncol(A)).\nTo calculate all singular values, try svd()")
# Check the values of 'nu' and 'nv'
if (nu < 0 | nv < 0 | nu > k | nv > k)
stop("'nu' and 'nv' must satisfy 0 <= nu <= k and 0 <= nv <= k")
# Arguments to be passed to Spectra
spectra.param = list(ncv = min(wd, max(2 * k + 1, 20)),
tol = 1e-10,
maxitr = 1000,
center = FALSE,
scale = FALSE)
# By default center = FALSE and scale = FALSE
ctr = rep(0, n)
scl = rep(1, n)
# Update ctr and scl from opts
# 1. If `center == TRUE`, then the centering vector is the column mean of A
# 2. If `center` is a vector, then use this vector to center A
# 3. In other cases, do not center A
if (isTRUE(opts$center))
{
ctr = colMeans(A)
} else if (is.numeric(opts$center)) {
if (length(opts$center) != n)
stop("opts$center must be TRUE/FALSE or a vector of length n")
ctr = as.numeric(opts$center)
opts$center = TRUE
} else {
opts$center = FALSE
}
# Scaling is always applied to vectors **after centering**
# 4. If `scale == TRUE`, then the scaling vector consists of the norms of column
# vectors of A **after centering**
# 5. If `scale` is a vector, then use this vector to scale A
# 6. In other cases, do not scale A
if (isTRUE(opts$scale))
{
sumx = colSums(A)
sumxx = colSums(A^2)
scl = sqrt(sumxx - 2 * sumx * ctr + m * ctr^2)
} else if (is.numeric(opts$scale)) {
if (length(opts$scale) != n)
stop("opts$scale must be TRUE/FALSE or a vector of length n")
scl = as.numeric(opts$scale)
opts$scale = TRUE
} else {
opts$scale = FALSE
}
# Update parameters from 'opts' argument
spectra.param[names(opts)] = opts
# Any other arguments passed to C++ code
spectra.param = c(spectra.param, as.list(extra_args),
list(ctr_vec = ctr, scl_vec = scl))
# Check the value of 'ncv'
if (spectra.param$ncv <= k | spectra.param$ncv > wd)
stop("'opts$ncv' must be > k and <= min(nrow(A), ncol(A))")
# Call the C++ function
res = .Call("svds_gen",
A,
as.integer(m), as.integer(n),
as.integer(k), as.integer(nu), as.integer(nv),
as.list(spectra.param),
as.integer(MAT_TYPE[mattype]),
PACKAGE = "RSpectra")
return(res)
}
|
get.covs.and.treat.from.formula <- function(f, data = NULL, env = .GlobalEnv, ...) {
A <- list(...)
tt <- terms(f, data = data)
#Check if data exists
if (is_not_null(data) && is.data.frame(data)) {
data.specified <- TRUE
}
else data.specified <- FALSE
#Check if response exists
if (is.formula(tt, 2)) {
resp.vars.mentioned <- as.character(tt)[2]
resp.vars.failed <- vapply(resp.vars.mentioned, function(v) {
is_null_or_error(try(eval(parse(text = v), c(data, env)), silent = TRUE))
}, logical(1L))
if (any(resp.vars.failed)) {
if (is_null(A[["treat"]])) stop(paste0("The given response variable, \"", as.character(tt)[2], "\", is not a variable in ", word.list(c("data", "the global environment")[c(data.specified, TRUE)], "or"), "."), call. = FALSE)
tt <- delete.response(tt)
}
}
else resp.vars.failed <- TRUE
if (any(!resp.vars.failed)) {
treat.name <- resp.vars.mentioned[!resp.vars.failed][1]
tt.treat <- terms(as.formula(paste0(treat.name, " ~ 1")))
mf.treat <- quote(stats::model.frame(tt.treat, data,
drop.unused.levels = TRUE,
na.action = "na.pass"))
tryCatch({mf.treat <- eval(mf.treat, c(data, env))},
error = function(e) {stop(conditionMessage(e), call. = FALSE)})
treat <- model.response(mf.treat)
}
else {
treat <- A[["treat"]]
treat.name <- NULL
}
#Check if RHS variables exist
tt.covs <- delete.response(tt)
rhs.vars.mentioned.lang <- attr(tt.covs, "variables")[-1]
rhs.vars.mentioned <- vapply(rhs.vars.mentioned.lang, deparse, character(1L))
rhs.vars.failed <- vapply(rhs.vars.mentioned.lang, function(v) {
is_null_or_error(try(eval(v, c(data, env)), silent = TRUE))
}, logical(1L))
if (any(rhs.vars.failed)) {
stop(paste0(c("All variables in formula must be variables in data or objects in the global environment.\nMissing variables: ",
paste(rhs.vars.mentioned[rhs.vars.failed], collapse=", "))), call. = FALSE)
}
rhs.term.labels <- attr(tt.covs, "term.labels")
rhs.term.orders <- attr(tt.covs, "order")
rhs.df <- vapply(rhs.vars.mentioned.lang, function(v) {
is.data.frame(try(eval(v, c(data, env)), silent = TRUE))
}, logical(1L))
if (any(rhs.df)) {
if (any(rhs.vars.mentioned[rhs.df] %in% unlist(sapply(rhs.term.labels[rhs.term.orders > 1], function(x) strsplit(x, ":", fixed = TRUE))))) {
stop("Interactions with data.frames are not allowed in the input formula.", call. = FALSE)
}
addl.dfs <- setNames(lapply(rhs.vars.mentioned.lang[rhs.df], function(x) {eval(x, env)}),
rhs.vars.mentioned[rhs.df])
for (i in rhs.term.labels[rhs.term.labels %in% rhs.vars.mentioned[rhs.df]]) {
ind <- which(rhs.term.labels == i)
rhs.term.labels <- append(rhs.term.labels[-ind],
values = names(addl.dfs[[i]]),
after = ind - 1)
}
new.form <- as.formula(paste("~", paste(rhs.term.labels, collapse = " + ")))
tt.covs <- terms(new.form)
if (is_not_null(data)) data <- do.call("cbind", unname(c(addl.dfs, list(data))))
else data <- do.call("cbind", unname(addl.dfs))
}
#Get model.frame, report error
mf.covs <- quote(stats::model.frame(tt.covs, data,
drop.unused.levels = TRUE,
na.action = "na.pass"))
tryCatch({covs <- eval(mf.covs, c(data, env))},
error = function(e) {stop(conditionMessage(e), call. = FALSE)})
if (is_not_null(treat.name) && treat.name %in% names(covs)) stop("The variable on the left side of the formula appears on the right side too.", call. = FALSE)
if (is_null(rhs.vars.mentioned)) {
covs <- data.frame(Intercept = rep(1, if (is_null(treat)) 1 else length(treat)))
}
else attr(tt.covs, "intercept") <- 0
covs.levels <- setNames(vector("list", ncol(covs)), names(covs))
for (i in names(covs)) {
if (is.character(covs[[i]])) covs[[i]] <- factor(covs[[i]])
if (is.factor(covs[[i]])) {
covs.levels[[i]] <- levels(covs[[i]])
levels(covs[[i]]) <- paste0("_", covs.levels[[i]])
}
}
#Get full model matrix with interactions too
covs.matrix <- model.matrix(tt.covs, data = covs,
contrasts.arg = lapply(Filter(is.factor, covs),
contrasts, contrasts=FALSE))
for (i in names(covs)) {
if (is.factor(covs[[i]])) {
levels(covs[[i]]) <- covs.levels[[i]]
}
}
#attr(covs, "terms") <- NULL
return(list(reported.covs = covs,
model.covs = covs.matrix,
treat = treat,
treat.name = treat.name))
}
get.treat.type <- function(treat) {
#Returns treat with treat.type attribute
nunique.treat <- nunique(treat)
if (nunique.treat == 2) {
treat.type <- "binary"
}
else if (nunique.treat < 2) {
stop("The treatment must have at least two unique values.", call. = FALSE)
}
else if (is.factor(treat) || is.character(treat)) {
treat.type <- "multinomial"
treat <- factor(treat)
}
else {
treat.type <- "continuous"
}
attr(treat, "treat.type") <- treat.type
return(treat)
}
process.focal.and.estimand <- function(focal, estimand, targets, treat, treat.type) {
if ((is_null(targets) || all(is.na(targets))) && is_not_null(estimand)) {
if (!(length(estimand) == 1 && is.character(estimand))) {
stop("estimand must be a character vector of length 1.", call. = FALSE)
}
estimand_ <- toupper(estimand)[[1]]
#Allowable estimands
AE <- list(binary = c("ATT", "ATC", "ATE"),
multinomial = c("ATT", "ATE"),
continuous = "ATE")
if (estimand_ %nin% AE[[treat.type]]) {
stop(paste0("\"", estimand, "\" is not an allowable estimand with ", treat.type, " treatments. Only ", word.list(AE[[treat.type]], quotes = TRUE, and.or = "and", is.are = TRUE),
" allowed."), call. = FALSE)
}
reported.estimand <- estimand_
}
else {
if (is_not_null(estimand)) warning("targets are not NULL; ignoring estimand.", call. = FALSE, immediate. = TRUE)
estimand <- NULL
reported.estimand <- "targets"
estimand_ <- NULL
}
#Check focal
if (treat.type %in% c("binary", "multinomial")) {
if (is_null(estimand)) {
if (is_not_null(focal)) {
warning(paste("Only estimand = \"ATT\" is compatible with focal. Ignoring focal."), call. = FALSE)
focal <- NULL
}
}
else if (estimand_ == "ATT") {
if (is_null(focal)) {
if (treat.type == "multinomial") {
stop("When estimand = \"ATT\" for multinomial treatments, an argument must be supplied to focal.", call. = FALSE)
}
}
else if (length(focal) > 1L || !is.atomic(focal) || !any(unique(treat) == focal)) {
stop("The argument supplied to focal must be the name of a level of treat.", call. = FALSE)
}
}
else {
if (is_not_null(focal)) {
warning(paste(estimand_, "is not compatible with focal. Ignoring focal."), call. = FALSE)
focal <- NULL
}
}
}
#Get focal, estimand, and reported estimand
if (isTRUE(treat.type == "binary")) {
unique.treat <- unique(treat, nmax = 2)
unique.treat.bin <- unique(binarize(treat), nmax = 2)
if (is_not_null(estimand)) {
if (estimand_ == "ATT") {
if (is_null(focal)) {
focal <- unique.treat[unique.treat.bin == 1]
}
else if (focal == unique.treat[unique.treat.bin == 0]){
reported.estimand <- "ATC"
}
}
else if (estimand_ == "ATC") {
focal <- unique.treat[unique.treat.bin == 0]
estimand_ <- "ATT"
}
}
}
return(list(focal = focal,
estimand = estimand_,
reported.estimand = reported.estimand))
}
process.s.weights <- function(s.weights, data = NULL) {
#Process s.weights
if (is_not_null(s.weights)) {
if (!(is.character(s.weights) && length(s.weights) == 1) && !is.numeric(s.weights)) {
stop("The argument to s.weights must be a vector or data frame of sampling weights or the (quoted) names of variables in data that contain sampling weights.", call. = FALSE)
}
if (is.character(s.weights) && length(s.weights)==1) {
if (is_null(data)) {
stop("s.weights was specified as a string but there was no argument to data.", call. = FALSE)
}
else if (s.weights %in% names(data)) {
s.weights <- data[[s.weights]]
}
else stop("The name supplied to s.weights is not the name of a variable in data.", call. = FALSE)
}
}
return(s.weights)
}
nunique <- function(x, nmax = NA, na.rm = TRUE) {
if (is_null(x)) return(0)
else {
if (na.rm) x <- x[!is.na(x)]
if (is.factor(x)) return(nlevels(x))
else return(length(unique(x, nmax = nmax)))
}
}
nunique.gt <- function(x, n, na.rm = TRUE) {
if (missing(n)) stop("n must be supplied.")
if (n < 0) stop("n must be non-negative.")
if (is_null(x)) FALSE
else {
if (na.rm) x <- x[!is.na(x)]
if (n == 1 && is.numeric(x)) !check_if_zero(max(x) - min(x))
else if (length(x) < 2000) nunique(x) > n
else tryCatch(nunique(x, nmax = n) > n, error = function(e) TRUE)
}
}
is_binary <- function(x) !nunique.gt(x, 2)
all_the_same <- function(x) !nunique.gt(x, 1)
check_if_zero <- function(x) {
# this is the default tolerance used in all.equal
tolerance <- .Machine$double.eps^0.5
# If the absolute deviation between the number and zero is less than
# the tolerance of the floating point arithmetic, then return TRUE.
# This means, to me, that I can treat the number as 0 rather than
# -3.20469e-16 or some such.
abs(x - 0) < tolerance
}
is_null <- function(x) length(x) == 0L
is_not_null <- function(x) !is_null(x)
is_null_or_error <- function(x) {is_null(x) || class(x) == "try-error"}
binarize <- function(variable) {
nas <- is.na(variable)
if (!is_binary(variable[!nas])) stop(paste0("Cannot binarize ", deparse(substitute(variable)), ": more than two levels."))
if (is.character(variable)) variable <- factor(variable)
variable.numeric <- as.numeric(variable)
if (!is.na(match(0, unique(variable.numeric)))) zero <- 0
else zero <- min(unique(variable.numeric), na.rm = TRUE)
newvar <- setNames(ifelse(!nas & variable.numeric==zero, 0, 1), names(variable))
newvar[nas] <- NA
return(newvar)
}
col.w.m <- function(mat, w = NULL, na.rm = TRUE) {
if (is_null(w)) {
w <- 1
w.sum <- apply(mat, 2, function(x) sum(!is.na(x)))
}
else {
w.sum <- apply(mat, 2, function(x) sum(w, na.rm = na.rm))
}
return(colSums(mat*w, na.rm = na.rm)/w.sum)
}
w.cov.scale <- function(w) {
(sum(w, na.rm = TRUE)^2 - sum(w^2, na.rm = TRUE)) / sum(w, na.rm = TRUE)
}
col.w.v <- function(mat, w = NULL, na.rm = TRUE) {
if (is_null(w)) {
w <- rep(1, nrow(mat))
}
return(colSums(t((t(mat) - col.w.m(mat, w, na.rm = na.rm))^2) * w, na.rm = na.rm) / w.cov.scale(w))
}
`%nin%` <- function(x, table) is.na(match(x, table, nomatch = NA_integer_))
is.formula <- function(f, sides = NULL) {
res <- is.name(f[[1]]) && deparse(f[[1]]) %in% c( '~', '!') &&
length(f) >= 2
if (is_not_null(sides) && is.numeric(sides) && sides %in% c(1,2)) {
res <- res && length(f) == sides + 1
}
return(res)
}
word.list <- function(word.list = NULL, and.or = c("and", "or"), is.are = FALSE, quotes = FALSE) {
#When given a vector of strings, creates a string of the form "a and b"
#or "a, b, and c"
#If is.are, adds "is" or "are" appropriately
L <- length(word.list)
if (quotes) word.list <- vapply(word.list, function(x) paste0("\"", x, "\""), character(1L))
if (L == 0) {
out <- ""
attr(out, "plural") = FALSE
}
else {
word.list <- word.list[!word.list %in% c(NA, "")]
L <- length(word.list)
if (L == 0) {
out <- ""
attr(out, "plural") = FALSE
}
else if (L == 1) {
out <- word.list
if (is.are) out <- paste(out, "is")
attr(out, "plural") = FALSE
}
else {
and.or <- match.arg(and.or)
if (L == 2) {
out <- paste(word.list, collapse = paste0(" ", and.or," "))
}
else {
out <- paste(paste(word.list[seq_len(L-1)], collapse = ", "),
word.list[L], sep = paste0(", ", and.or," "))
}
if (is.are) out <- paste(out, "are")
attr(out, "plural") = TRUE
}
}
return(out)
}
round_df_char <- function(df, digits, pad = "0", na_vals = "") {
nas <- is.na(df)
if (!is.data.frame(df)) df <- as.data.frame.matrix(df, stringsAsFactors = FALSE)
rn <- rownames(df)
cn <- colnames(df)
df <- as.data.frame(lapply(df, function(col) {
if (suppressWarnings(all(!is.na(as.numeric(as.character(col)))))) {
as.numeric(as.character(col))
} else {
col
}
}), stringsAsFactors = FALSE)
nums <- vapply(df, is.numeric, FUN.VALUE = logical(1))
o.negs <- sapply(1:ncol(df), function(x) if (nums[x]) df[[x]] < 0 else rep(FALSE, length(df[[x]])))
df[nums] <- round(df[nums], digits = digits)
df[nas] <- ""
df <- as.data.frame(lapply(df, format, scientific = FALSE, justify = "none"), stringsAsFactors = FALSE)
for (i in which(nums)) {
if (any(grepl(".", df[[i]], fixed = TRUE))) {
s <- strsplit(df[[i]], ".", fixed = TRUE)
lengths <- lengths(s)
digits.r.of.. <- vapply(seq_along(s), function(x) {
if (lengths[x] > 1) nchar(s[[x]][lengths[x]])
else 0 }, numeric(1L))
df[[i]] <- sapply(seq_along(df[[i]]), function(x) {
if (df[[i]][x] == "") ""
else if (lengths[x] <= 1) {
paste0(c(df[[i]][x], rep(".", pad == 0), rep(pad, max(digits.r.of..) - digits.r.of..[x] + as.numeric(pad != 0))),
collapse = "")
}
else paste0(c(df[[i]][x], rep(pad, max(digits.r.of..) - digits.r.of..[x])),
collapse = "")
})
}
}
df[o.negs & df == 0] <- paste0("-", df[o.negs & df == 0])
# Insert NA placeholders
df[nas] <- na_vals
if (length(rn) > 0) rownames(df) <- rn
if (length(cn) > 0) names(df) <- cn
return(df)
}
text.box.plot <- function(range.list, width = 12) {
full.range <- range(unlist(range.list))
ratio = diff(full.range)/(width+1)
rescaled.range.list <- lapply(range.list, function(x) round(x/ratio))
rescaled.full.range <- round(full.range/ratio)
d <- as.data.frame(matrix(NA_character_, ncol = 3, nrow = length(range.list),
dimnames = list(names(range.list), c("Min", paste(rep(" ", width + 1), collapse = ""), "Max"))),
stringsAsFactors = FALSE)
d[,"Min"] <- vapply(range.list, function(x) x[1], numeric(1L))
d[,"Max"] <- vapply(range.list, function(x) x[2], numeric(1L))
for (i in seq_len(nrow(d))) {
spaces1 <- rescaled.range.list[[i]][1] - rescaled.full.range[1]
#|
dashes <- max(0, diff(rescaled.range.list[[i]]) - 2)
#|
spaces2 <- max(0, diff(rescaled.full.range) - (spaces1 + 1 + dashes + 1))
d[i, 2] <- paste0(paste(rep(" ", spaces1), collapse = ""), "|", paste(rep("-", dashes), collapse = ""), "|", paste(rep(" ", spaces2), collapse = ""))
}
return(d)
}
ESS <- function(w) {
(sum(w)^2)/sum(w^2)
}
mean.abs.dev <- function(x) {
mean(abs(x - mean(x)))
}
coef.of.var <- function(x, pop = TRUE) {
if (pop) sqrt(mean((x-mean(x))^2))/mean(x)
else sd(x)/mean(x)
}
#To pass CRAN checks:
utils::globalVariables(c("covs", "dual", "treat", "constraint"))
| /R/functions_for_processing.R | no_license | guhjy/optweight | R | false | false | 15,616 | r | get.covs.and.treat.from.formula <- function(f, data = NULL, env = .GlobalEnv, ...) {
A <- list(...)
tt <- terms(f, data = data)
#Check if data exists
if (is_not_null(data) && is.data.frame(data)) {
data.specified <- TRUE
}
else data.specified <- FALSE
#Check if response exists
if (is.formula(tt, 2)) {
resp.vars.mentioned <- as.character(tt)[2]
resp.vars.failed <- vapply(resp.vars.mentioned, function(v) {
is_null_or_error(try(eval(parse(text = v), c(data, env)), silent = TRUE))
}, logical(1L))
if (any(resp.vars.failed)) {
if (is_null(A[["treat"]])) stop(paste0("The given response variable, \"", as.character(tt)[2], "\", is not a variable in ", word.list(c("data", "the global environment")[c(data.specified, TRUE)], "or"), "."), call. = FALSE)
tt <- delete.response(tt)
}
}
else resp.vars.failed <- TRUE
if (any(!resp.vars.failed)) {
treat.name <- resp.vars.mentioned[!resp.vars.failed][1]
tt.treat <- terms(as.formula(paste0(treat.name, " ~ 1")))
mf.treat <- quote(stats::model.frame(tt.treat, data,
drop.unused.levels = TRUE,
na.action = "na.pass"))
tryCatch({mf.treat <- eval(mf.treat, c(data, env))},
error = function(e) {stop(conditionMessage(e), call. = FALSE)})
treat <- model.response(mf.treat)
}
else {
treat <- A[["treat"]]
treat.name <- NULL
}
#Check if RHS variables exist
tt.covs <- delete.response(tt)
rhs.vars.mentioned.lang <- attr(tt.covs, "variables")[-1]
rhs.vars.mentioned <- vapply(rhs.vars.mentioned.lang, deparse, character(1L))
rhs.vars.failed <- vapply(rhs.vars.mentioned.lang, function(v) {
is_null_or_error(try(eval(v, c(data, env)), silent = TRUE))
}, logical(1L))
if (any(rhs.vars.failed)) {
stop(paste0(c("All variables in formula must be variables in data or objects in the global environment.\nMissing variables: ",
paste(rhs.vars.mentioned[rhs.vars.failed], collapse=", "))), call. = FALSE)
}
rhs.term.labels <- attr(tt.covs, "term.labels")
rhs.term.orders <- attr(tt.covs, "order")
rhs.df <- vapply(rhs.vars.mentioned.lang, function(v) {
is.data.frame(try(eval(v, c(data, env)), silent = TRUE))
}, logical(1L))
if (any(rhs.df)) {
if (any(rhs.vars.mentioned[rhs.df] %in% unlist(sapply(rhs.term.labels[rhs.term.orders > 1], function(x) strsplit(x, ":", fixed = TRUE))))) {
stop("Interactions with data.frames are not allowed in the input formula.", call. = FALSE)
}
addl.dfs <- setNames(lapply(rhs.vars.mentioned.lang[rhs.df], function(x) {eval(x, env)}),
rhs.vars.mentioned[rhs.df])
for (i in rhs.term.labels[rhs.term.labels %in% rhs.vars.mentioned[rhs.df]]) {
ind <- which(rhs.term.labels == i)
rhs.term.labels <- append(rhs.term.labels[-ind],
values = names(addl.dfs[[i]]),
after = ind - 1)
}
new.form <- as.formula(paste("~", paste(rhs.term.labels, collapse = " + ")))
tt.covs <- terms(new.form)
if (is_not_null(data)) data <- do.call("cbind", unname(c(addl.dfs, list(data))))
else data <- do.call("cbind", unname(addl.dfs))
}
#Get model.frame, report error
mf.covs <- quote(stats::model.frame(tt.covs, data,
drop.unused.levels = TRUE,
na.action = "na.pass"))
tryCatch({covs <- eval(mf.covs, c(data, env))},
error = function(e) {stop(conditionMessage(e), call. = FALSE)})
if (is_not_null(treat.name) && treat.name %in% names(covs)) stop("The variable on the left side of the formula appears on the right side too.", call. = FALSE)
if (is_null(rhs.vars.mentioned)) {
covs <- data.frame(Intercept = rep(1, if (is_null(treat)) 1 else length(treat)))
}
else attr(tt.covs, "intercept") <- 0
covs.levels <- setNames(vector("list", ncol(covs)), names(covs))
for (i in names(covs)) {
if (is.character(covs[[i]])) covs[[i]] <- factor(covs[[i]])
if (is.factor(covs[[i]])) {
covs.levels[[i]] <- levels(covs[[i]])
levels(covs[[i]]) <- paste0("_", covs.levels[[i]])
}
}
#Get full model matrix with interactions too
covs.matrix <- model.matrix(tt.covs, data = covs,
contrasts.arg = lapply(Filter(is.factor, covs),
contrasts, contrasts=FALSE))
for (i in names(covs)) {
if (is.factor(covs[[i]])) {
levels(covs[[i]]) <- covs.levels[[i]]
}
}
#attr(covs, "terms") <- NULL
return(list(reported.covs = covs,
model.covs = covs.matrix,
treat = treat,
treat.name = treat.name))
}
get.treat.type <- function(treat) {
#Returns treat with treat.type attribute
nunique.treat <- nunique(treat)
if (nunique.treat == 2) {
treat.type <- "binary"
}
else if (nunique.treat < 2) {
stop("The treatment must have at least two unique values.", call. = FALSE)
}
else if (is.factor(treat) || is.character(treat)) {
treat.type <- "multinomial"
treat <- factor(treat)
}
else {
treat.type <- "continuous"
}
attr(treat, "treat.type") <- treat.type
return(treat)
}
process.focal.and.estimand <- function(focal, estimand, targets, treat, treat.type) {
if ((is_null(targets) || all(is.na(targets))) && is_not_null(estimand)) {
if (!(length(estimand) == 1 && is.character(estimand))) {
stop("estimand must be a character vector of length 1.", call. = FALSE)
}
estimand_ <- toupper(estimand)[[1]]
#Allowable estimands
AE <- list(binary = c("ATT", "ATC", "ATE"),
multinomial = c("ATT", "ATE"),
continuous = "ATE")
if (estimand_ %nin% AE[[treat.type]]) {
stop(paste0("\"", estimand, "\" is not an allowable estimand with ", treat.type, " treatments. Only ", word.list(AE[[treat.type]], quotes = TRUE, and.or = "and", is.are = TRUE),
" allowed."), call. = FALSE)
}
reported.estimand <- estimand_
}
else {
if (is_not_null(estimand)) warning("targets are not NULL; ignoring estimand.", call. = FALSE, immediate. = TRUE)
estimand <- NULL
reported.estimand <- "targets"
estimand_ <- NULL
}
#Check focal
if (treat.type %in% c("binary", "multinomial")) {
if (is_null(estimand)) {
if (is_not_null(focal)) {
warning(paste("Only estimand = \"ATT\" is compatible with focal. Ignoring focal."), call. = FALSE)
focal <- NULL
}
}
else if (estimand_ == "ATT") {
if (is_null(focal)) {
if (treat.type == "multinomial") {
stop("When estimand = \"ATT\" for multinomial treatments, an argument must be supplied to focal.", call. = FALSE)
}
}
else if (length(focal) > 1L || !is.atomic(focal) || !any(unique(treat) == focal)) {
stop("The argument supplied to focal must be the name of a level of treat.", call. = FALSE)
}
}
else {
if (is_not_null(focal)) {
warning(paste(estimand_, "is not compatible with focal. Ignoring focal."), call. = FALSE)
focal <- NULL
}
}
}
#Get focal, estimand, and reported estimand
if (isTRUE(treat.type == "binary")) {
unique.treat <- unique(treat, nmax = 2)
unique.treat.bin <- unique(binarize(treat), nmax = 2)
if (is_not_null(estimand)) {
if (estimand_ == "ATT") {
if (is_null(focal)) {
focal <- unique.treat[unique.treat.bin == 1]
}
else if (focal == unique.treat[unique.treat.bin == 0]){
reported.estimand <- "ATC"
}
}
else if (estimand_ == "ATC") {
focal <- unique.treat[unique.treat.bin == 0]
estimand_ <- "ATT"
}
}
}
return(list(focal = focal,
estimand = estimand_,
reported.estimand = reported.estimand))
}
process.s.weights <- function(s.weights, data = NULL) {
#Process s.weights
if (is_not_null(s.weights)) {
if (!(is.character(s.weights) && length(s.weights) == 1) && !is.numeric(s.weights)) {
stop("The argument to s.weights must be a vector or data frame of sampling weights or the (quoted) names of variables in data that contain sampling weights.", call. = FALSE)
}
if (is.character(s.weights) && length(s.weights)==1) {
if (is_null(data)) {
stop("s.weights was specified as a string but there was no argument to data.", call. = FALSE)
}
else if (s.weights %in% names(data)) {
s.weights <- data[[s.weights]]
}
else stop("The name supplied to s.weights is not the name of a variable in data.", call. = FALSE)
}
}
return(s.weights)
}
nunique <- function(x, nmax = NA, na.rm = TRUE) {
if (is_null(x)) return(0)
else {
if (na.rm) x <- x[!is.na(x)]
if (is.factor(x)) return(nlevels(x))
else return(length(unique(x, nmax = nmax)))
}
}
nunique.gt <- function(x, n, na.rm = TRUE) {
if (missing(n)) stop("n must be supplied.")
if (n < 0) stop("n must be non-negative.")
if (is_null(x)) FALSE
else {
if (na.rm) x <- x[!is.na(x)]
if (n == 1 && is.numeric(x)) !check_if_zero(max(x) - min(x))
else if (length(x) < 2000) nunique(x) > n
else tryCatch(nunique(x, nmax = n) > n, error = function(e) TRUE)
}
}
is_binary <- function(x) !nunique.gt(x, 2)
all_the_same <- function(x) !nunique.gt(x, 1)
check_if_zero <- function(x) {
# this is the default tolerance used in all.equal
tolerance <- .Machine$double.eps^0.5
# If the absolute deviation between the number and zero is less than
# the tolerance of the floating point arithmetic, then return TRUE.
# This means, to me, that I can treat the number as 0 rather than
# -3.20469e-16 or some such.
abs(x - 0) < tolerance
}
is_null <- function(x) length(x) == 0L
is_not_null <- function(x) !is_null(x)
is_null_or_error <- function(x) {is_null(x) || class(x) == "try-error"}
binarize <- function(variable) {
nas <- is.na(variable)
if (!is_binary(variable[!nas])) stop(paste0("Cannot binarize ", deparse(substitute(variable)), ": more than two levels."))
if (is.character(variable)) variable <- factor(variable)
variable.numeric <- as.numeric(variable)
if (!is.na(match(0, unique(variable.numeric)))) zero <- 0
else zero <- min(unique(variable.numeric), na.rm = TRUE)
newvar <- setNames(ifelse(!nas & variable.numeric==zero, 0, 1), names(variable))
newvar[nas] <- NA
return(newvar)
}
col.w.m <- function(mat, w = NULL, na.rm = TRUE) {
if (is_null(w)) {
w <- 1
w.sum <- apply(mat, 2, function(x) sum(!is.na(x)))
}
else {
w.sum <- apply(mat, 2, function(x) sum(w, na.rm = na.rm))
}
return(colSums(mat*w, na.rm = na.rm)/w.sum)
}
w.cov.scale <- function(w) {
(sum(w, na.rm = TRUE)^2 - sum(w^2, na.rm = TRUE)) / sum(w, na.rm = TRUE)
}
col.w.v <- function(mat, w = NULL, na.rm = TRUE) {
if (is_null(w)) {
w <- rep(1, nrow(mat))
}
return(colSums(t((t(mat) - col.w.m(mat, w, na.rm = na.rm))^2) * w, na.rm = na.rm) / w.cov.scale(w))
}
`%nin%` <- function(x, table) is.na(match(x, table, nomatch = NA_integer_))
is.formula <- function(f, sides = NULL) {
res <- is.name(f[[1]]) && deparse(f[[1]]) %in% c( '~', '!') &&
length(f) >= 2
if (is_not_null(sides) && is.numeric(sides) && sides %in% c(1,2)) {
res <- res && length(f) == sides + 1
}
return(res)
}
word.list <- function(word.list = NULL, and.or = c("and", "or"), is.are = FALSE, quotes = FALSE) {
#When given a vector of strings, creates a string of the form "a and b"
#or "a, b, and c"
#If is.are, adds "is" or "are" appropriately
L <- length(word.list)
if (quotes) word.list <- vapply(word.list, function(x) paste0("\"", x, "\""), character(1L))
if (L == 0) {
out <- ""
attr(out, "plural") = FALSE
}
else {
word.list <- word.list[!word.list %in% c(NA, "")]
L <- length(word.list)
if (L == 0) {
out <- ""
attr(out, "plural") = FALSE
}
else if (L == 1) {
out <- word.list
if (is.are) out <- paste(out, "is")
attr(out, "plural") = FALSE
}
else {
and.or <- match.arg(and.or)
if (L == 2) {
out <- paste(word.list, collapse = paste0(" ", and.or," "))
}
else {
out <- paste(paste(word.list[seq_len(L-1)], collapse = ", "),
word.list[L], sep = paste0(", ", and.or," "))
}
if (is.are) out <- paste(out, "are")
attr(out, "plural") = TRUE
}
}
return(out)
}
round_df_char <- function(df, digits, pad = "0", na_vals = "") {
nas <- is.na(df)
if (!is.data.frame(df)) df <- as.data.frame.matrix(df, stringsAsFactors = FALSE)
rn <- rownames(df)
cn <- colnames(df)
df <- as.data.frame(lapply(df, function(col) {
if (suppressWarnings(all(!is.na(as.numeric(as.character(col)))))) {
as.numeric(as.character(col))
} else {
col
}
}), stringsAsFactors = FALSE)
nums <- vapply(df, is.numeric, FUN.VALUE = logical(1))
o.negs <- sapply(1:ncol(df), function(x) if (nums[x]) df[[x]] < 0 else rep(FALSE, length(df[[x]])))
df[nums] <- round(df[nums], digits = digits)
df[nas] <- ""
df <- as.data.frame(lapply(df, format, scientific = FALSE, justify = "none"), stringsAsFactors = FALSE)
for (i in which(nums)) {
if (any(grepl(".", df[[i]], fixed = TRUE))) {
s <- strsplit(df[[i]], ".", fixed = TRUE)
lengths <- lengths(s)
digits.r.of.. <- vapply(seq_along(s), function(x) {
if (lengths[x] > 1) nchar(s[[x]][lengths[x]])
else 0 }, numeric(1L))
df[[i]] <- sapply(seq_along(df[[i]]), function(x) {
if (df[[i]][x] == "") ""
else if (lengths[x] <= 1) {
paste0(c(df[[i]][x], rep(".", pad == 0), rep(pad, max(digits.r.of..) - digits.r.of..[x] + as.numeric(pad != 0))),
collapse = "")
}
else paste0(c(df[[i]][x], rep(pad, max(digits.r.of..) - digits.r.of..[x])),
collapse = "")
})
}
}
df[o.negs & df == 0] <- paste0("-", df[o.negs & df == 0])
# Insert NA placeholders
df[nas] <- na_vals
if (length(rn) > 0) rownames(df) <- rn
if (length(cn) > 0) names(df) <- cn
return(df)
}
text.box.plot <- function(range.list, width = 12) {
full.range <- range(unlist(range.list))
ratio = diff(full.range)/(width+1)
rescaled.range.list <- lapply(range.list, function(x) round(x/ratio))
rescaled.full.range <- round(full.range/ratio)
d <- as.data.frame(matrix(NA_character_, ncol = 3, nrow = length(range.list),
dimnames = list(names(range.list), c("Min", paste(rep(" ", width + 1), collapse = ""), "Max"))),
stringsAsFactors = FALSE)
d[,"Min"] <- vapply(range.list, function(x) x[1], numeric(1L))
d[,"Max"] <- vapply(range.list, function(x) x[2], numeric(1L))
for (i in seq_len(nrow(d))) {
spaces1 <- rescaled.range.list[[i]][1] - rescaled.full.range[1]
#|
dashes <- max(0, diff(rescaled.range.list[[i]]) - 2)
#|
spaces2 <- max(0, diff(rescaled.full.range) - (spaces1 + 1 + dashes + 1))
d[i, 2] <- paste0(paste(rep(" ", spaces1), collapse = ""), "|", paste(rep("-", dashes), collapse = ""), "|", paste(rep(" ", spaces2), collapse = ""))
}
return(d)
}
ESS <- function(w) {
(sum(w)^2)/sum(w^2)
}
mean.abs.dev <- function(x) {
mean(abs(x - mean(x)))
}
coef.of.var <- function(x, pop = TRUE) {
if (pop) sqrt(mean((x-mean(x))^2))/mean(x)
else sd(x)/mean(x)
}
#To pass CRAN checks:
utils::globalVariables(c("covs", "dual", "treat", "constraint"))
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cpx_ms_age.R
\name{ms.prior}
\alias{ms.prior}
\title{Evaluate parameters under a prior distribution for measles}
\usage{
ms.prior(parameters)
}
\arguments{
\item{parameters}{parameters}
}
\value{
prior probability density
}
\description{
Evaluate parameters under a prior distribution for measles
}
\author{
Sebastian Funk
}
| /man/ms.prior.Rd | no_license | sdwfrost/dynmod | R | false | true | 404 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cpx_ms_age.R
\name{ms.prior}
\alias{ms.prior}
\title{Evaluate parameters under a prior distribution for measles}
\usage{
ms.prior(parameters)
}
\arguments{
\item{parameters}{parameters}
}
\value{
prior probability density
}
\description{
Evaluate parameters under a prior distribution for measles
}
\author{
Sebastian Funk
}
|
/R code/181126.R | no_license | Kim-Ayeong/Simulation_with_R | R | false | false | 1,603 | r | ||
##' This is the LPDS
##'
##' @author Feng Li, Department of Statistics, Stockholm University, Sweden.
##' @note First version: Tue Nov 30 23:18:18 CET 2010;
##' Current: Tue Nov 30 23:18:25 CET 2010.
##' TODO: Don't use the full draws when nIter is large.
##' Still doubt the nseLPDS
##' @export
LogPredScore <- function(Y, x, logpost.fun.name, crossvaid.struc, splineArgs, priorArgs,
OUT.Params, Params_Transform, burn.in, LPDS.sampleProp)
{
n <- dim(Y)[1] # no. of obs.
nIter <- dim(OUT.Params[[1]])[4] # no. of iterations
num.burn.in <- floor(nIter*burn.in)
nUsed <- nIter - num.burn.in
## The sample indices for LPDS after burn-in
## Make a decreasing sequence and sort it by increasing order.
## Just to make sure last draw is allays used
LPDS.sampleIdx <- sort(seq(nIter, (nIter-nUsed+1), by = -round(1/LPDS.sampleProp))) # The exact
# size may not same as the result from sample
# proportion.
nSample <- length(LPDS.sampleIdx)
nCross <- length(crossvalid.struc[["training"]])
if(nCross == 1)
{
LPDS = NA
nseLPDS = NA
logPredMatrix <- NA
}
else if(nCross != 1)
{
## Detect how many folds used
if(length(crossvalid.struc[["training"]][[nCross]]) +
length(crossvalid.struc[["testing"]][[nCross]]) == n)
{ nFold <- nCross}
else
{ nFold <- nCross -1}
## The log predictive matrix
logPredMatrix <- matrix(NA, nSample, nFold)
## Calculate the predictive densities for all folds
for(iCross in 1:nFold)
{
# iTraining <- crossvalid.struc[["training"]][[iCross]]
iTesting <- crossvalid.struc[["testing"]][[iCross]]
## Y.iTraining <- Y[iTraining, , drop = FALSE]
## x.iTraining <- x[iTraining, , drop = FALSE]
Y.iTesting <- Y[iTesting, , drop = FALSE]
x.iTesting <- x[iTesting, , drop = FALSE]
which.j <- 0
for(j in LPDS.sampleIdx) ## Just the likelihood function with posterior samples
{
Params.j <- lapply(OUT.Params, function(x) apply(x[,,, j, iCross, drop =
FALSE], c(1, 2), "["))
caller.log.like <- call(logpost.fun.name,Y = Y.iTesting, x = x.iTesting,
Params = Params.j, callParam = list(id =
"likelihood"), priorArgs =
priorArgs, splineArgs = splineArgs, Params_Transform
= Params_Transform)
log.like <- eval(caller.log.like)
which.j <- which.j + 1
logPredMatrix[which.j, iCross] <- log.like
## Simple progress bar
## progressbar(((iCross-1)*nSample + which.j), nFold*nSample)
}
}
## Calculate the LPDS
scaleFactors <- apply(logPredMatrix, 2, median)
scaleMatrix <- matrix(scaleFactors, nSample, nFold)
expPredMatrix <- exp(logPredMatrix-scaleMatrix)
expPredMatrix[is.infinite(expPredMatrix)] <- NA # TODO: Think about it carefully
expPredMean <- colMeans(expPredMatrix, na.rm = TRUE)
LPDS <- mean(scaleFactors + log(expPredMean))
if (sum(is.infinite(expPredMatrix)) > nSample*.05) # If at least 5% are infinity,
# maybe due to unsuccessful MCMC.
{
LPDS <- NA
warning("Too many infinite desities produced, check MCMC convergence!")
}
## Calculate the numerical se. for LPDS. See Box Jenkins (2007) p.31
ACFxx <- matrix(0, 1, nFold)
for(k in 1:nFold)
{
acfSample.tmp0 <- expPredMatrix[, k]
acfSample.tmp <- acfSample.tmp0[acfSample.tmp0<1e100] # Just let it numerically stable.
predACF <- acf(acfSample.tmp, plot = FALSE, na.action = na.pass)$acf
nlagk <- length(predACF)
for(kk in 0:(nlagk-1))
{
ACFxx[k] <- ACFxx[k] + (1 - kk/nlagk)*predACF[kk +1]
}
}
expPredMatrix.tmp <- expPredMatrix
expPredMatrix.tmp[expPredMatrix.tmp>1e100] <- NA # numerically stable
predVar <- apply(expPredMatrix.tmp, 2, var, na.rm = TRUE)
## var.MeanexpPredMatrix <- predVar/nUsed*(1+2*ACFxx)
var.MeanexpPredMatrix <- predVar/nSample*(1+2*ACFxx)
nvarLPDS <- 1/nFold^2*sum(1/(expPredMean)^2*var.MeanexpPredMatrix)
nseLPDS <- sqrt(nvarLPDS)
}
out <- list(LPDS = LPDS, nseLPDS = nseLPDS, logPredMatrix = logPredMatrix, LPDS.sampleIdx = LPDS.sampleIdx)
return(out)
}
| /R/LogPredScore.R | no_license | kl-lab/fformpp | R | false | false | 4,755 | r | ##' This is the LPDS
##'
##' @author Feng Li, Department of Statistics, Stockholm University, Sweden.
##' @note First version: Tue Nov 30 23:18:18 CET 2010;
##' Current: Tue Nov 30 23:18:25 CET 2010.
##' TODO: Don't use the full draws when nIter is large.
##' Still doubt the nseLPDS
##' @export
LogPredScore <- function(Y, x, logpost.fun.name, crossvaid.struc, splineArgs, priorArgs,
OUT.Params, Params_Transform, burn.in, LPDS.sampleProp)
{
n <- dim(Y)[1] # no. of obs.
nIter <- dim(OUT.Params[[1]])[4] # no. of iterations
num.burn.in <- floor(nIter*burn.in)
nUsed <- nIter - num.burn.in
## The sample indices for LPDS after burn-in
## Make a decreasing sequence and sort it by increasing order.
## Just to make sure last draw is allays used
LPDS.sampleIdx <- sort(seq(nIter, (nIter-nUsed+1), by = -round(1/LPDS.sampleProp))) # The exact
# size may not same as the result from sample
# proportion.
nSample <- length(LPDS.sampleIdx)
nCross <- length(crossvalid.struc[["training"]])
if(nCross == 1)
{
LPDS = NA
nseLPDS = NA
logPredMatrix <- NA
}
else if(nCross != 1)
{
## Detect how many folds used
if(length(crossvalid.struc[["training"]][[nCross]]) +
length(crossvalid.struc[["testing"]][[nCross]]) == n)
{ nFold <- nCross}
else
{ nFold <- nCross -1}
## The log predictive matrix
logPredMatrix <- matrix(NA, nSample, nFold)
## Calculate the predictive densities for all folds
for(iCross in 1:nFold)
{
# iTraining <- crossvalid.struc[["training"]][[iCross]]
iTesting <- crossvalid.struc[["testing"]][[iCross]]
## Y.iTraining <- Y[iTraining, , drop = FALSE]
## x.iTraining <- x[iTraining, , drop = FALSE]
Y.iTesting <- Y[iTesting, , drop = FALSE]
x.iTesting <- x[iTesting, , drop = FALSE]
which.j <- 0
for(j in LPDS.sampleIdx) ## Just the likelihood function with posterior samples
{
Params.j <- lapply(OUT.Params, function(x) apply(x[,,, j, iCross, drop =
FALSE], c(1, 2), "["))
caller.log.like <- call(logpost.fun.name,Y = Y.iTesting, x = x.iTesting,
Params = Params.j, callParam = list(id =
"likelihood"), priorArgs =
priorArgs, splineArgs = splineArgs, Params_Transform
= Params_Transform)
log.like <- eval(caller.log.like)
which.j <- which.j + 1
logPredMatrix[which.j, iCross] <- log.like
## Simple progress bar
## progressbar(((iCross-1)*nSample + which.j), nFold*nSample)
}
}
## Calculate the LPDS
scaleFactors <- apply(logPredMatrix, 2, median)
scaleMatrix <- matrix(scaleFactors, nSample, nFold)
expPredMatrix <- exp(logPredMatrix-scaleMatrix)
expPredMatrix[is.infinite(expPredMatrix)] <- NA # TODO: Think about it carefully
expPredMean <- colMeans(expPredMatrix, na.rm = TRUE)
LPDS <- mean(scaleFactors + log(expPredMean))
if (sum(is.infinite(expPredMatrix)) > nSample*.05) # If at least 5% are infinity,
# maybe due to unsuccessful MCMC.
{
LPDS <- NA
warning("Too many infinite desities produced, check MCMC convergence!")
}
## Calculate the numerical se. for LPDS. See Box Jenkins (2007) p.31
ACFxx <- matrix(0, 1, nFold)
for(k in 1:nFold)
{
acfSample.tmp0 <- expPredMatrix[, k]
acfSample.tmp <- acfSample.tmp0[acfSample.tmp0<1e100] # Just let it numerically stable.
predACF <- acf(acfSample.tmp, plot = FALSE, na.action = na.pass)$acf
nlagk <- length(predACF)
for(kk in 0:(nlagk-1))
{
ACFxx[k] <- ACFxx[k] + (1 - kk/nlagk)*predACF[kk +1]
}
}
expPredMatrix.tmp <- expPredMatrix
expPredMatrix.tmp[expPredMatrix.tmp>1e100] <- NA # numerically stable
predVar <- apply(expPredMatrix.tmp, 2, var, na.rm = TRUE)
## var.MeanexpPredMatrix <- predVar/nUsed*(1+2*ACFxx)
var.MeanexpPredMatrix <- predVar/nSample*(1+2*ACFxx)
nvarLPDS <- 1/nFold^2*sum(1/(expPredMean)^2*var.MeanexpPredMatrix)
nseLPDS <- sqrt(nvarLPDS)
}
out <- list(LPDS = LPDS, nseLPDS = nseLPDS, logPredMatrix = logPredMatrix, LPDS.sampleIdx = LPDS.sampleIdx)
return(out)
}
|
targets <- read.csv("~/Documents/cowriter_logs/EIntGen/all_targets.csv", header=FALSE)
buttons <- read.csv("~/Documents/cowriter_logs/EIntGen/all_userbutton.csv", header=FALSE)
### combind data
col1 = c("V2","V5","V6","V7","V9" ,"V11")
n = nrow(targets)
targets = targets[2:n,col1]
targets = rename(targets, c("V2"="time", "V5"="session", "V6"="f2f", "V7"= "alone", "V9"="gender", "V11" = "event"))
col2 = c("V2","V5","V6","V7","V9" ,"V11")
n = nrow(buttons)
buttons = buttons[2:n,col2]
buttons = rename(buttons, c("V2"="time", "V5"="session", "V6"="f2f", "V7"= "alone", "V9"="gender", "V11" = "event"))
events = rbind(targets,buttons)
### sort by time
events[] <- lapply(events, as.character)
events = events[order(events$time),]
dyn_buttons = subset(events,events$event %in% c("new_word","+","-"))
events = subset(events,events$event %in% c("send","+","-","tablet","selection_tablet","robot_head","experimenter"))
### the subset of interest:
sess1 = subset(events, events$session==1)
sess2 = subset(events, events$session==2)
face = subset(events, events$f2f=="f2f")
side = subset(events, events$f2f=="sbs")
face_buttons = subset(dyn_buttons, dyn_buttons$f2f=="f2f")
side_buttons = subset(dyn_buttons, dyn_buttons$f2f=="sbs")
### compute frequency
frequencies(events$event,c("tablet","selection_tablet","robot_head","experimenter"),seuil=0.15)
frequencies(face$event,c("tablet","selection_tablet","robot_head","experimenter"),seuil=0.15)
frequencies(side$event,c("tablet","selection_tablet","robot_head","experimenter"),seuil=0.15)
frequencies(sess1$event,c("tablet","selection_tablet","robot_head","experimenter"),seuil=0.15)
frequencies(sess2$event,c("tablet","selection_tablet","robot_head","experimenter"),seuil=0.15)
frequencies(face_buttons$event,c("tablet","selection_tablet","robot_head","experimenter"),seuil=0.05)
frequencies(side_buttons$event,c("tablet","selection_tablet","robot_head","experimenter"),seuil=0.05)
| /EIntGen/transitions.r | no_license | GuiGui1310/cowriter_logs | R | false | false | 1,943 | r |
targets <- read.csv("~/Documents/cowriter_logs/EIntGen/all_targets.csv", header=FALSE)
buttons <- read.csv("~/Documents/cowriter_logs/EIntGen/all_userbutton.csv", header=FALSE)
### combind data
col1 = c("V2","V5","V6","V7","V9" ,"V11")
n = nrow(targets)
targets = targets[2:n,col1]
targets = rename(targets, c("V2"="time", "V5"="session", "V6"="f2f", "V7"= "alone", "V9"="gender", "V11" = "event"))
col2 = c("V2","V5","V6","V7","V9" ,"V11")
n = nrow(buttons)
buttons = buttons[2:n,col2]
buttons = rename(buttons, c("V2"="time", "V5"="session", "V6"="f2f", "V7"= "alone", "V9"="gender", "V11" = "event"))
events = rbind(targets,buttons)
### sort by time
events[] <- lapply(events, as.character)
events = events[order(events$time),]
dyn_buttons = subset(events,events$event %in% c("new_word","+","-"))
events = subset(events,events$event %in% c("send","+","-","tablet","selection_tablet","robot_head","experimenter"))
### the subset of interest:
sess1 = subset(events, events$session==1)
sess2 = subset(events, events$session==2)
face = subset(events, events$f2f=="f2f")
side = subset(events, events$f2f=="sbs")
face_buttons = subset(dyn_buttons, dyn_buttons$f2f=="f2f")
side_buttons = subset(dyn_buttons, dyn_buttons$f2f=="sbs")
### compute frequency
frequencies(events$event,c("tablet","selection_tablet","robot_head","experimenter"),seuil=0.15)
frequencies(face$event,c("tablet","selection_tablet","robot_head","experimenter"),seuil=0.15)
frequencies(side$event,c("tablet","selection_tablet","robot_head","experimenter"),seuil=0.15)
frequencies(sess1$event,c("tablet","selection_tablet","robot_head","experimenter"),seuil=0.15)
frequencies(sess2$event,c("tablet","selection_tablet","robot_head","experimenter"),seuil=0.15)
frequencies(face_buttons$event,c("tablet","selection_tablet","robot_head","experimenter"),seuil=0.05)
frequencies(side_buttons$event,c("tablet","selection_tablet","robot_head","experimenter"),seuil=0.05)
|
library(readr)
# constants
u0 <- 4 * pi * 10 ^ (-7)
degree <- pi / 180
i <- 351.1
Bterra <- 1.70192 * 10 ^ (-5)
# get data
metric_i_cte <-
read_delim(
"data/metric_i_cte_r-em-cm.csv",
";",
escape_double = FALSE,
trim_ws = TRUE
)
r_mm = metric_i_cte[, 1] * 10
teta = metric_i_cte[, 2]
# calculating Bexp and Bnom
Bexp = Bterra * tan(teta * degree)
colnames(Bexp) <- c("Bexp (T)")
Bnom <- ((u0 * i) / (2 * pi * r_mm))
colnames(Bnom) <- c("Bnom (T)")
table = r_mm
table[, 2] <- Bexp * (10 ^ 6)
colnames(table) <- c("r", "Bexp")
# fitting curve
fit = lm(Bexp ~ I(1 / r), data = table)
# getting info from the fit
summary(fit)
# write summary result into a file
#write(summary(fit), "../plots_and_figures/ex2_output.txt")
# create sequence for x coordinate
xx <- seq(5, 50, length = 50)
# save to file
png("../plots_and_figures/ex2_teta_per_r.png")
# png("../plots_and_figures/ex2_teta_per_r.png", width = 640, height = 480)
# plotting
plot(
table,
pch = 19,
xlab = "r (mm)",
ylab = "B (mT)",
main = "Campo Magnético do fio x Distância ao fio"
)
# add adjested curve to the plot
lines(xx, predict(fit, data.frame(r = xx)), col = "red")
# adding subtitles to this graph
legend(
"topright",
inset = .05,
c("B medido", "B ajustado"),
col = c("black", "red"),
lty = c(NA, 1),
pch = c(20, NA),
cex = 1.2,
box.lty = 0
)
# save in device
dev.off()
table = r_mm
table[, 2] <- teta
table[, 3] <- Bexp * (10 ^ 6)
table[, 4] <- Bnom * (10 ^ 6)
colnames(table) <-
c("r (mm)", "Deflexão ()", "Bexp (mT)", "Bnom (mT)")
write.table(
table,
file = "../plots_and_figures/output_table_exp2.csv",
sep = ";",
quote = FALSE,
row.names = TRUE
)
| /second_script.R | no_license | WellingtonEspindula/Eletromagnetism-experiments-plots | R | false | false | 1,695 | r | library(readr)
# constants
u0 <- 4 * pi * 10 ^ (-7)
degree <- pi / 180
i <- 351.1
Bterra <- 1.70192 * 10 ^ (-5)
# get data
metric_i_cte <-
read_delim(
"data/metric_i_cte_r-em-cm.csv",
";",
escape_double = FALSE,
trim_ws = TRUE
)
r_mm = metric_i_cte[, 1] * 10
teta = metric_i_cte[, 2]
# calculating Bexp and Bnom
Bexp = Bterra * tan(teta * degree)
colnames(Bexp) <- c("Bexp (T)")
Bnom <- ((u0 * i) / (2 * pi * r_mm))
colnames(Bnom) <- c("Bnom (T)")
table = r_mm
table[, 2] <- Bexp * (10 ^ 6)
colnames(table) <- c("r", "Bexp")
# fitting curve
fit = lm(Bexp ~ I(1 / r), data = table)
# getting info from the fit
summary(fit)
# write summary result into a file
#write(summary(fit), "../plots_and_figures/ex2_output.txt")
# create sequence for x coordinate
xx <- seq(5, 50, length = 50)
# save to file
png("../plots_and_figures/ex2_teta_per_r.png")
# png("../plots_and_figures/ex2_teta_per_r.png", width = 640, height = 480)
# plotting
plot(
table,
pch = 19,
xlab = "r (mm)",
ylab = "B (mT)",
main = "Campo Magnético do fio x Distância ao fio"
)
# add adjested curve to the plot
lines(xx, predict(fit, data.frame(r = xx)), col = "red")
# adding subtitles to this graph
legend(
"topright",
inset = .05,
c("B medido", "B ajustado"),
col = c("black", "red"),
lty = c(NA, 1),
pch = c(20, NA),
cex = 1.2,
box.lty = 0
)
# save in device
dev.off()
table = r_mm
table[, 2] <- teta
table[, 3] <- Bexp * (10 ^ 6)
table[, 4] <- Bnom * (10 ^ 6)
colnames(table) <-
c("r (mm)", "Deflexão ()", "Bexp (mT)", "Bnom (mT)")
write.table(
table,
file = "../plots_and_figures/output_table_exp2.csv",
sep = ";",
quote = FALSE,
row.names = TRUE
)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/zero-inflated-poisson-distribution.R
\name{ZIP}
\alias{ZIP}
\alias{dzip}
\alias{pzip}
\alias{qzip}
\alias{rzip}
\title{Zero-inflated Poisson distribution}
\usage{
dzip(x, lambda, pi, log = FALSE)
pzip(q, lambda, pi, lower.tail = TRUE, log.p = FALSE)
qzip(p, lambda, pi, lower.tail = TRUE, log.p = FALSE)
rzip(n, lambda, pi)
}
\arguments{
\item{x, q}{vector of quantiles.}
\item{lambda}{vector of (non-negative) means.}
\item{pi}{probability of extra zeros.}
\item{log, log.p}{logical; if TRUE, probabilities p are given as log(p).}
\item{lower.tail}{logical; if TRUE (default), probabilities are \eqn{P[X \le x]}
otherwise, \eqn{P[X > x]}.}
\item{p}{vector of probabilities.}
\item{n}{number of observations. If \code{length(n) > 1},
the length is taken to be the number required.}
}
\description{
Probability mass function and random generation
for the zero-inflated Poisson distribution.
}
\details{
Probability density function
\deqn{
f(x) = \left\{\begin{array}{ll}
\pi + (1 - \pi) e^{-\lambda} & x = 0 \\
(1 - \pi) \frac{\lambda^{x} e^{-\lambda}} {x!} & x > 0 \\
\end{array}\right.
}{
f(x) = [if x = 0:] \pi + (1-\pi) * exp(-\lambda) [else:] (1-\pi) * dpois(x, lambda)
}
}
\examples{
x <- rzip(1e5, 6, 0.33)
xx <- -2:20
plot(prop.table(table(x)), type = "h")
lines(xx, dzip(xx, 6, 0.33), col = "red")
xx <- seq(0, 20, by = 0.01)
plot(ecdf(x))
lines(xx, pzip(xx, 6, 0.33), col = "red")
}
\seealso{
\code{\link[stats]{Poisson}}
}
\concept{Discrete}
\concept{Univariate}
\keyword{distribution}
| /man/ZIP.Rd | no_license | twolodzko/extraDistr | R | false | true | 1,586 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/zero-inflated-poisson-distribution.R
\name{ZIP}
\alias{ZIP}
\alias{dzip}
\alias{pzip}
\alias{qzip}
\alias{rzip}
\title{Zero-inflated Poisson distribution}
\usage{
dzip(x, lambda, pi, log = FALSE)
pzip(q, lambda, pi, lower.tail = TRUE, log.p = FALSE)
qzip(p, lambda, pi, lower.tail = TRUE, log.p = FALSE)
rzip(n, lambda, pi)
}
\arguments{
\item{x, q}{vector of quantiles.}
\item{lambda}{vector of (non-negative) means.}
\item{pi}{probability of extra zeros.}
\item{log, log.p}{logical; if TRUE, probabilities p are given as log(p).}
\item{lower.tail}{logical; if TRUE (default), probabilities are \eqn{P[X \le x]}
otherwise, \eqn{P[X > x]}.}
\item{p}{vector of probabilities.}
\item{n}{number of observations. If \code{length(n) > 1},
the length is taken to be the number required.}
}
\description{
Probability mass function and random generation
for the zero-inflated Poisson distribution.
}
\details{
Probability density function
\deqn{
f(x) = \left\{\begin{array}{ll}
\pi + (1 - \pi) e^{-\lambda} & x = 0 \\
(1 - \pi) \frac{\lambda^{x} e^{-\lambda}} {x!} & x > 0 \\
\end{array}\right.
}{
f(x) = [if x = 0:] \pi + (1-\pi) * exp(-\lambda) [else:] (1-\pi) * dpois(x, lambda)
}
}
\examples{
x <- rzip(1e5, 6, 0.33)
xx <- -2:20
plot(prop.table(table(x)), type = "h")
lines(xx, dzip(xx, 6, 0.33), col = "red")
xx <- seq(0, 20, by = 0.01)
plot(ecdf(x))
lines(xx, pzip(xx, 6, 0.33), col = "red")
}
\seealso{
\code{\link[stats]{Poisson}}
}
\concept{Discrete}
\concept{Univariate}
\keyword{distribution}
|
#' Definition of the eigenfunctions and eigenvalues of the kernel
#'
#' Given a particular type of kernel, to be chosen among (\code{gaussian},
#' \code{exponential} and \code{sobolev}), it returns the
#' evaluation of the eigenfunctions of the kernel on the grid \code{domain}
#' and the correspondent eigenvalues.
#'
#' @param type string. Type of kernel. Three possible choices implemented:
#' \code{gaussian}, \code{exponential} and \code{sobolev}. For the other
#' types of kernel, define manually the eigenfunctions and eigenvectors
#' of the kernel.
#' @param parameter scalar. Value of the characteristic parameter of the kernel.
#' It is the \eqn{\sigma}
#' parameter of the Gaussian and the Exponential kernel, as introduced in \code{kernlab};
#' and the \eqn{\sigma} parameter of the Sobolev
#' kernel as in the \link[=sobolev_kernel_generation]{sobolev_kernel_generation} function.
#'
#' @param domain vector. \code{m}-length vector of the
#' abscissa grid for the definition of the kernel.
#' @param thres scalar. Threshold to identify the significant
#' eigenvalues of the kernel. The number of significant eigennvalues \code{J} is
#' the minimum \eqn{J} s.t.
#' \deqn{
#' \sum_{j = 1}^J \theta_j \geq \textrm{thres} \sum_{j = 1}^{\infty} \theta_j.
#' } Default is 0.99.
#'
#' @param return.derivatives bool. If \code{TRUE} the function returns also
#' the matrix of the evaluation of
#' the derivatives of the eigenfunctions on the time domain.
#' Default is \code{FALSE}.
#'
#' @return list containing
#' \itemize{
#' \item \code{eigenvect} \code{m} \eqn{\times} \code{J} matrix of
#' the eigenfunctions of the kernel evaluated on the \code{domain}.
#' \item \code{eigenval} \code{J}-length vector of the
#' eigenvalues of the kernel
#' \item \code{derivatives}. if \code{return.derivatives = TRUE}.
#' \code{derivatives} is the (\code{m-1}) \eqn{\times} \code{J} matrix of the derivatives of
#' the eigenfunctions evaluated on the time domain.
#'}
#' @details Here the list of the kernel defined in this function
#' \itemize{
#' \item \code{gaussian}
#' \deqn{
#' k(x, x') = \exp(-\sigma \| x- x'\|^2)
#' }
#' \item \code{exponential}
#' \deqn{
#' k(x, x') = \exp(-\sigma \| x- x'\|)
#' }
#' \item \code{sobolev}, the kernel associated to the norm in the \eqn{H^1} space
#' \deqn{
#' \| f \|^2 = \int_{D} f(t)^2 dt + \frac{1}{\sigma} \int_{D} f'(t)^2 dt
#' }
#' where \eqn{D} is the one-dimensional \code{domain} and \eqn{f'} is the first derivative of the function.
#' }
#'
#' @export
#'
#' @import kernlab
#'
#' @examples
#' # definition of the kernel
#' type_kernel <- 'sobolev'
#' param_kernel <- 8
#' T_domain <- seq(0, 1, length = 50)
#' kernel_here <- generation_kernel ( type = type_kernel,
#' parameter = param_kernel,
#' domain = T_domain,
#' thres = 0.99,
#' return.derivatives = TRUE)
#'
#' eigenvalues <- kernel_here$eigenval
#' eigenvectors <- kernel_here$eigenvect
#' der <- kernel_here$derivatives
#'
generation_kernel <- function(type = 'sobolev', parameter = NULL, domain, thres = 0.99,
return.derivatives = FALSE)
{
## ?@A: shouldn't it be M_integ <- (length(domain)-1)/diff(range(domain))?
M_integ <- length(domain)/diff(range(domain))
if (!(type %in% c('sobolev', 'exponential', 'gaussian')))
{
stop ("error: not defined kernel, please define the set
of eigenfunctions and eigenvectors manually")
}
if (length(parameter) != 1 )
{
stop ("please provide the parameters of the kernel. See the help page
of the function for the parameter definition")
}
# here the defintion of the eigenfunctions and eigenvalues of the
# three tipes of kernel.
if (type =='sobolev')
{
kernel_def <- sobolev_kernel_generation(a = domain[1], b = domain[length(domain)],
m = length(domain), sigma = parameter,
plot.eigen = FALSE)
## @A:when we want to find the eigenval/eigenvec of a kernel function, say K(t,s)
## we should find a \lambda and v such that \int{ K(t,s) v(s) ds} = \lambda v(t)
## numerically we can write the integral as \sum{j=1}^m K(t,s_j)v(s_j)*1/M = \lambda v(t) for any t
## or even by matrix [K(t_i_s_j)][v(s_1),...,v(s_m)]*1/m=\lambda [v(t_1),...,v(t_m)]
## Since by linear algebra we can find the eigenval/eigenvec of the matrix [K(t_i_s_j)], called \lambda_new and v_new
## we can see v=v_new but \lambda=\lambda_new*1/m
## ?@A: Shouln't it be kernel_def$vectors <- kernel_def$vectors
kernel_def$values <- kernel_def$values/M_integ
kernel_def$vectors <- kernel_def$vectors*sqrt(M_integ)
}
if (type == 'exponential')
{
## @A: in "kernlab" package, the "laplacedot" makes the exponential kernel function
rbfkernel <- laplacedot(sigma = parameter)
mat <- kernelMatrix(rbfkernel, domain)
## ?@A: Shouln't it be vectors = eigen(mat)$vectors?
kernel_def <- list(vectors = eigen(mat)$vectors*sqrt(M_integ),
values = eigen(mat)$values/M_integ)
}
if (type == 'gaussian')
{
## @A: in "kernlab" package, the "rbfdot" makes the gaussian kernel function
rbfkernel <- rbfdot(sigma = parameter)
mat <- kernelMatrix(rbfkernel, domain)
## ?@A: Shouln't it be vectors = eigen(mat)$vectors?
kernel_def <- list(vectors = eigen(mat)$vectors*sqrt(M_integ),
values = eigen(mat)$values/M_integ)
}
# isolate the number of significant eigenvalues
num_eigen <- which((cumsum(kernel_def$values)/sum(kernel_def$values))>thres)[1]
eigen_chosen <- 1:num_eigen
# defintion of the eigenvalues and eigenvectros of the kernel
autoval <- kernel_def$values[eigen_chosen]
autovett <- kernel_def$vectors[,eigen_chosen]
if (return.derivatives == TRUE)
{
## @A: since v'(x)=(v(x+h)-v(x))/h so we take each column and make v' for it.
## It is obvious when we have m points for each v, we will have m-1 points for each v'
## ?@A: Shouldn't it be diff_autovett <- apply(autovett, 2, diff)/(M_integ)?
diff_autovett <- apply(autovett, 2, diff)/(domain[2]-domain[1])
return(list(eigenvect = autovett, eigenval = autoval,
derivatives = diff_autovett))
}else{
return(list(eigenvect = autovett, eigenval = autoval))
}
}
| /R/generation_kernel.R | no_license | ardeeshany/FLAME | R | false | false | 6,585 | r | #' Definition of the eigenfunctions and eigenvalues of the kernel
#'
#' Given a particular type of kernel, to be chosen among (\code{gaussian},
#' \code{exponential} and \code{sobolev}), it returns the
#' evaluation of the eigenfunctions of the kernel on the grid \code{domain}
#' and the correspondent eigenvalues.
#'
#' @param type string. Type of kernel. Three possible choices implemented:
#' \code{gaussian}, \code{exponential} and \code{sobolev}. For the other
#' types of kernel, define manually the eigenfunctions and eigenvectors
#' of the kernel.
#' @param parameter scalar. Value of the characteristic parameter of the kernel.
#' It is the \eqn{\sigma}
#' parameter of the Gaussian and the Exponential kernel, as introduced in \code{kernlab};
#' and the \eqn{\sigma} parameter of the Sobolev
#' kernel as in the \link[=sobolev_kernel_generation]{sobolev_kernel_generation} function.
#'
#' @param domain vector. \code{m}-length vector of the
#' abscissa grid for the definition of the kernel.
#' @param thres scalar. Threshold to identify the significant
#' eigenvalues of the kernel. The number of significant eigennvalues \code{J} is
#' the minimum \eqn{J} s.t.
#' \deqn{
#' \sum_{j = 1}^J \theta_j \geq \textrm{thres} \sum_{j = 1}^{\infty} \theta_j.
#' } Default is 0.99.
#'
#' @param return.derivatives bool. If \code{TRUE} the function returns also
#' the matrix of the evaluation of
#' the derivatives of the eigenfunctions on the time domain.
#' Default is \code{FALSE}.
#'
#' @return list containing
#' \itemize{
#' \item \code{eigenvect} \code{m} \eqn{\times} \code{J} matrix of
#' the eigenfunctions of the kernel evaluated on the \code{domain}.
#' \item \code{eigenval} \code{J}-length vector of the
#' eigenvalues of the kernel
#' \item \code{derivatives}. if \code{return.derivatives = TRUE}.
#' \code{derivatives} is the (\code{m-1}) \eqn{\times} \code{J} matrix of the derivatives of
#' the eigenfunctions evaluated on the time domain.
#'}
#' @details Here the list of the kernel defined in this function
#' \itemize{
#' \item \code{gaussian}
#' \deqn{
#' k(x, x') = \exp(-\sigma \| x- x'\|^2)
#' }
#' \item \code{exponential}
#' \deqn{
#' k(x, x') = \exp(-\sigma \| x- x'\|)
#' }
#' \item \code{sobolev}, the kernel associated to the norm in the \eqn{H^1} space
#' \deqn{
#' \| f \|^2 = \int_{D} f(t)^2 dt + \frac{1}{\sigma} \int_{D} f'(t)^2 dt
#' }
#' where \eqn{D} is the one-dimensional \code{domain} and \eqn{f'} is the first derivative of the function.
#' }
#'
#' @export
#'
#' @import kernlab
#'
#' @examples
#' # definition of the kernel
#' type_kernel <- 'sobolev'
#' param_kernel <- 8
#' T_domain <- seq(0, 1, length = 50)
#' kernel_here <- generation_kernel ( type = type_kernel,
#' parameter = param_kernel,
#' domain = T_domain,
#' thres = 0.99,
#' return.derivatives = TRUE)
#'
#' eigenvalues <- kernel_here$eigenval
#' eigenvectors <- kernel_here$eigenvect
#' der <- kernel_here$derivatives
#'
generation_kernel <- function(type = 'sobolev', parameter = NULL, domain, thres = 0.99,
return.derivatives = FALSE)
{
## ?@A: shouldn't it be M_integ <- (length(domain)-1)/diff(range(domain))?
M_integ <- length(domain)/diff(range(domain))
if (!(type %in% c('sobolev', 'exponential', 'gaussian')))
{
stop ("error: not defined kernel, please define the set
of eigenfunctions and eigenvectors manually")
}
if (length(parameter) != 1 )
{
stop ("please provide the parameters of the kernel. See the help page
of the function for the parameter definition")
}
# here the defintion of the eigenfunctions and eigenvalues of the
# three tipes of kernel.
if (type =='sobolev')
{
kernel_def <- sobolev_kernel_generation(a = domain[1], b = domain[length(domain)],
m = length(domain), sigma = parameter,
plot.eigen = FALSE)
## @A:when we want to find the eigenval/eigenvec of a kernel function, say K(t,s)
## we should find a \lambda and v such that \int{ K(t,s) v(s) ds} = \lambda v(t)
## numerically we can write the integral as \sum{j=1}^m K(t,s_j)v(s_j)*1/M = \lambda v(t) for any t
## or even by matrix [K(t_i_s_j)][v(s_1),...,v(s_m)]*1/m=\lambda [v(t_1),...,v(t_m)]
## Since by linear algebra we can find the eigenval/eigenvec of the matrix [K(t_i_s_j)], called \lambda_new and v_new
## we can see v=v_new but \lambda=\lambda_new*1/m
## ?@A: Shouln't it be kernel_def$vectors <- kernel_def$vectors
kernel_def$values <- kernel_def$values/M_integ
kernel_def$vectors <- kernel_def$vectors*sqrt(M_integ)
}
if (type == 'exponential')
{
## @A: in "kernlab" package, the "laplacedot" makes the exponential kernel function
rbfkernel <- laplacedot(sigma = parameter)
mat <- kernelMatrix(rbfkernel, domain)
## ?@A: Shouln't it be vectors = eigen(mat)$vectors?
kernel_def <- list(vectors = eigen(mat)$vectors*sqrt(M_integ),
values = eigen(mat)$values/M_integ)
}
if (type == 'gaussian')
{
## @A: in "kernlab" package, the "rbfdot" makes the gaussian kernel function
rbfkernel <- rbfdot(sigma = parameter)
mat <- kernelMatrix(rbfkernel, domain)
## ?@A: Shouln't it be vectors = eigen(mat)$vectors?
kernel_def <- list(vectors = eigen(mat)$vectors*sqrt(M_integ),
values = eigen(mat)$values/M_integ)
}
# isolate the number of significant eigenvalues
num_eigen <- which((cumsum(kernel_def$values)/sum(kernel_def$values))>thres)[1]
eigen_chosen <- 1:num_eigen
# defintion of the eigenvalues and eigenvectros of the kernel
autoval <- kernel_def$values[eigen_chosen]
autovett <- kernel_def$vectors[,eigen_chosen]
if (return.derivatives == TRUE)
{
## @A: since v'(x)=(v(x+h)-v(x))/h so we take each column and make v' for it.
## It is obvious when we have m points for each v, we will have m-1 points for each v'
## ?@A: Shouldn't it be diff_autovett <- apply(autovett, 2, diff)/(M_integ)?
diff_autovett <- apply(autovett, 2, diff)/(domain[2]-domain[1])
return(list(eigenvect = autovett, eigenval = autoval,
derivatives = diff_autovett))
}else{
return(list(eigenvect = autovett, eigenval = autoval))
}
}
|
library(rtracklayer)
# import gff annotation files and filter to remove contig regions, then rewrite in GFF format
gff <- readGFF(snakemake@input[['gff']], filter = list(type=c("CDS", "ncRNA", "tRNA", "rRNA", "tmRNA")))
export(gff, snakemake@output[['gff']])
| /scripts/filter_gff.R | permissive | greenelab/2022-microberna | R | false | false | 260 | r | library(rtracklayer)
# import gff annotation files and filter to remove contig regions, then rewrite in GFF format
gff <- readGFF(snakemake@input[['gff']], filter = list(type=c("CDS", "ncRNA", "tRNA", "rRNA", "tmRNA")))
export(gff, snakemake@output[['gff']])
|
# Random Forest Classifier
# Importing the dataset
dataset = read.csv('Data.csv')
dataset = dataset[2:11]
dataset$Class = factor(dataset$Class , levels=c(2,4))
# Splitting the dataset into the Training set and Test set
# install.packages('caTools')
library(caTools)
set.seed(123)
split = sample.split(dataset$Class, SplitRatio = 0.75)
training_set = subset(dataset, split == TRUE)
test_set = subset(dataset, split == FALSE)
# Feature Scaling
library(dataPreparation)
scales <- build_scales(training_set, cols = "auto",
verbose = TRUE)
training_set[,1:9] = fast_scale(training_set[,1:9], scales = scales,
verbose = TRUE)
test_set[,1:9] = fast_scale(test_set[,1:9], scales = scales, verbose = TRUE)
# Fitting Logistic REgression to the Training Set
# classifier = glm(formula = Purchased ~ .,
# family = binomial,
# data = training_set)
#
# summary(classifier)
# Create classifier here
library(randomForest)
classifier = randomForest(Class ~.,
data = training_set,
ntree = 5)
# Predicting the test set results
y_pred = predict(classifier, newdata = test_set[-10], type = "class")
cm = table(test_set[,10],y_pred)
| /Classification_R/RandomForestClassifier.R | no_license | Akash0811/MLComparison | R | false | false | 1,247 | r | # Random Forest Classifier
# Importing the dataset
dataset = read.csv('Data.csv')
dataset = dataset[2:11]
dataset$Class = factor(dataset$Class , levels=c(2,4))
# Splitting the dataset into the Training set and Test set
# install.packages('caTools')
library(caTools)
set.seed(123)
split = sample.split(dataset$Class, SplitRatio = 0.75)
training_set = subset(dataset, split == TRUE)
test_set = subset(dataset, split == FALSE)
# Feature Scaling
library(dataPreparation)
scales <- build_scales(training_set, cols = "auto",
verbose = TRUE)
training_set[,1:9] = fast_scale(training_set[,1:9], scales = scales,
verbose = TRUE)
test_set[,1:9] = fast_scale(test_set[,1:9], scales = scales, verbose = TRUE)
# Fitting Logistic REgression to the Training Set
# classifier = glm(formula = Purchased ~ .,
# family = binomial,
# data = training_set)
#
# summary(classifier)
# Create classifier here
library(randomForest)
classifier = randomForest(Class ~.,
data = training_set,
ntree = 5)
# Predicting the test set results
y_pred = predict(classifier, newdata = test_set[-10], type = "class")
cm = table(test_set[,10],y_pred)
|
tweets = searchTwitter("#ghoe", n=200, lang='en')
Tweets.text = lapply(tweets,function(t)t$getText())
Tweets.text = sapply(Tweets.text, function(row) iconv(row, "latin1", "ASCII", sub=""))
pos = scan('/Users/jweathe9/Documents/twitter-sentiment-analysis-master/twitter-sentiment-analysis-master/wordbanks/positive-words.txt', what='character', comment.char=';')
neg = scan('/Users/jweathe9/Documents/twitter-sentiment-analysis-master/twitter-sentiment-analysis-master/wordbanks/negative-words.txt', what='character', comment.char=';')
score.sentiment = function(sentences, pos.words, neg.words){
scores = lapply(sentences, function(sentence, pos.words, neg.words) {
# clean up sentences, remove puncutation
sentence = gsub('[[:punct:]]', '', sentence)
sentence = gsub('[[:cntrl:]]', '', sentence)
sentence = gsub('\\d+', '', sentence)
sentence = tolower(sentence)
# split into words. str_split is in the stringr package
word.list = strsplit(sentence, '\\s+')
# sometimes a list() is one level of hierarchy too much
words = unlist(word.list)
# compare our words to the dictionaries of positive & negative terms
# match() returns the position of the matched term or NA
pos.matches = match(words, pos.words)
neg.matches = match(words, neg.words)
#turns to TRUE/FALSE
pos.matches = !is.na(pos.matches)
neg.matches = !is.na(neg.matches)
#TRUE/FALSE will be treated as 1/0 by sum():
#super simple way to calculate score: needs improvement
score = sum(pos.matches) - sum(neg.matches) #score is # positive words - # negative words
return(score)
}, pos.words, neg.words)
#puts tweets and its sentiment score into table
df = data.frame(
text = sentences
)
df$score = scores
return(df)
}
analysis = score.sentiment(Tweets.text, pos, neg)
mean(as.numeric(analysis$score)) | /update for nccu.R | no_license | weathejs/Social-Media-Sentiment-Analysis | R | false | false | 1,942 | r | tweets = searchTwitter("#ghoe", n=200, lang='en')
Tweets.text = lapply(tweets,function(t)t$getText())
Tweets.text = sapply(Tweets.text, function(row) iconv(row, "latin1", "ASCII", sub=""))
pos = scan('/Users/jweathe9/Documents/twitter-sentiment-analysis-master/twitter-sentiment-analysis-master/wordbanks/positive-words.txt', what='character', comment.char=';')
neg = scan('/Users/jweathe9/Documents/twitter-sentiment-analysis-master/twitter-sentiment-analysis-master/wordbanks/negative-words.txt', what='character', comment.char=';')
score.sentiment = function(sentences, pos.words, neg.words){
scores = lapply(sentences, function(sentence, pos.words, neg.words) {
# clean up sentences, remove puncutation
sentence = gsub('[[:punct:]]', '', sentence)
sentence = gsub('[[:cntrl:]]', '', sentence)
sentence = gsub('\\d+', '', sentence)
sentence = tolower(sentence)
# split into words. str_split is in the stringr package
word.list = strsplit(sentence, '\\s+')
# sometimes a list() is one level of hierarchy too much
words = unlist(word.list)
# compare our words to the dictionaries of positive & negative terms
# match() returns the position of the matched term or NA
pos.matches = match(words, pos.words)
neg.matches = match(words, neg.words)
#turns to TRUE/FALSE
pos.matches = !is.na(pos.matches)
neg.matches = !is.na(neg.matches)
#TRUE/FALSE will be treated as 1/0 by sum():
#super simple way to calculate score: needs improvement
score = sum(pos.matches) - sum(neg.matches) #score is # positive words - # negative words
return(score)
}, pos.words, neg.words)
#puts tweets and its sentiment score into table
df = data.frame(
text = sentences
)
df$score = scores
return(df)
}
analysis = score.sentiment(Tweets.text, pos, neg)
mean(as.numeric(analysis$score)) |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dasl.R
\docType{data}
\name{dasl.housing_prices}
\alias{dasl.housing_prices}
\title{Housing prices}
\format{1057 observations}
\source{
DASL – The Data And Story Library: \href{https://dasl.datadescription.com/datafile/housing-prices/?sf_paged=20}{Housing prices}
}
\description{
House prices and properties in New York. What properties of a house can predict its price? Can we use such a model to identify houses that are extraordinarily expensive or inexpensive?
}
\details{
\url{https://github.com/sigbertklinke/wwwdata/tree/master/wwwdata/dasl}
}
\references{
random sample of 1057 houses taken from full Saratoga Housing Data (De Veaux)
}
\concept{Correlation}
\concept{Multiple Regression}
\concept{Regression}
\concept{Scatterplot}
| /man/dasl.housing_prices.Rd | no_license | sigbertklinke/mmstat.data | R | false | true | 819 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dasl.R
\docType{data}
\name{dasl.housing_prices}
\alias{dasl.housing_prices}
\title{Housing prices}
\format{1057 observations}
\source{
DASL – The Data And Story Library: \href{https://dasl.datadescription.com/datafile/housing-prices/?sf_paged=20}{Housing prices}
}
\description{
House prices and properties in New York. What properties of a house can predict its price? Can we use such a model to identify houses that are extraordinarily expensive or inexpensive?
}
\details{
\url{https://github.com/sigbertklinke/wwwdata/tree/master/wwwdata/dasl}
}
\references{
random sample of 1057 houses taken from full Saratoga Housing Data (De Veaux)
}
\concept{Correlation}
\concept{Multiple Regression}
\concept{Regression}
\concept{Scatterplot}
|
## Generate copulas and normal, t, gamma distributions for the data
## With Gaussian Copula to estimate the FDR
p = 50
n = 200
cov_type = c("diag", "toeplitz", "general")
freq_list = list(freq_p = c(0.4, 0.6, 0.8), name_p = c("04", "06", "08"))
out_table = c()
for(k in 1 : 3){
for(j in 1 : 3){
freq_p = freq_list$freq_p[k]
copula = gaussianCopula(p = p, n = n, type = cov_type[j])
gaussian.list = gaussianDistributionGeneration(copula, p, n)
t.list = tDistributionGeneration(copula, p, n)
gamma.list = gammaDistributionGeneration(copula, p, n)
set.seed(1001)
## Generate the data for lasso regression
gaussian.lasso = LassoGenerationModel(gaussian.list$data.normal, freq_p)
t.lasso = LassoGenerationModel(t.list$data.t, freq_p)
gamma.lasso = LassoGenerationModel(gamma.list$data, freq_p)
set.seed(1001)
## generate knockoff copies
gaussian.knockoff = copulaKnockoff(gaussian.list$data)
t.knockoff = copulaKnockoff(t.list$data)
gamma.knockoff = copulaKnockoff(gamma.list$data)
set.seed(1001)
## Fit lasso model with knockoff variables for gaussian, t and gamma
lasso.gaussian.FDR = lassoFDR(gaussian.list$data, gaussian.knockoff$x.knockoff.copy, gaussian.lasso$y, gaussian.lasso$mask, family = "gaussian")
lasso.t.FDR = lassoFDR(t.list$data, t.knockoff$x.knockoff.copy, t.lasso$y, t.lasso$mask, family = "gaussian")
lasso.gamma.FDR = lassoFDR(gamma.list$data, gamma.knockoff$x.knockoff.copy, gamma.lasso$y, gamma.lasso$mask, family = "gaussian")
## Fit ridge model with knockoff variables for gaussian, t and gamma
ridge.gaussian.FDR = lassoFDR(gaussian.list$data, gaussian.knockoff$x.knockoff.copy, gaussian.lasso$y, gaussian.lasso$mask, family = "gaussian")
ridge.t.FDR = lassoFDR(t.list$data, t.knockoff$x.knockoff.copy, t.lasso$y, t.lasso$mask, family = "gaussian")
ridge.gamma.FDR = lassoFDR(gamma.list$data, gamma.knockoff$x.knockoff.copy, gamma.lasso$y, gamma.lasso$mask, family = "gaussian")
## Output the table
out_table = rbind(out_table, c(cov_type[j], freq_list$freq_p[k], "Gaussian", lasso.gaussian.FDR$power, lasso.gaussian.FDR$FDR, "Lasso"))
out_table = rbind(out_table, c(cov_type[j], freq_list$freq_p[k], "t", lasso.t.FDR$power, lasso.t.FDR$FDR, "Lasso"))
out_table = rbind(out_table, c(cov_type[j], freq_list$freq_p[k], "Gamma", lasso.gamma.FDR$power, lasso.gamma.FDR$FDR, "Lasso"))
out_table = rbind(out_table, c(cov_type[j], freq_list$freq_p[k], "Gaussian", ridge.gaussian.FDR$power, ridge.gaussian.FDR$FDR, "Ridge"))
out_table = rbind(out_table, c(cov_type[j], freq_list$freq_p[k], "t", ridge.t.FDR$power, ridge.t.FDR$FDR, "Ridge"))
out_table = rbind(out_table, c(cov_type[j], freq_list$freq_p[k], "Gamma", ridge.gamma.FDR$power, ridge.gamma.FDR$FDR, "Ridge"))
}
}
out_table = print(as.data.frame(out_table))
colnames(out_table) = c("Covariance", "Sparsity", "Marginal Family", "Power", "FDR", "Model")
print(out_table)
## cov_type freq_list margin power FDR model
| /examples/marginSimulation.R | no_license | CHuanSite/copulaKnockoff | R | false | false | 3,155 | r | ## Generate copulas and normal, t, gamma distributions for the data
## With Gaussian Copula to estimate the FDR
p = 50
n = 200
cov_type = c("diag", "toeplitz", "general")
freq_list = list(freq_p = c(0.4, 0.6, 0.8), name_p = c("04", "06", "08"))
out_table = c()
for(k in 1 : 3){
for(j in 1 : 3){
freq_p = freq_list$freq_p[k]
copula = gaussianCopula(p = p, n = n, type = cov_type[j])
gaussian.list = gaussianDistributionGeneration(copula, p, n)
t.list = tDistributionGeneration(copula, p, n)
gamma.list = gammaDistributionGeneration(copula, p, n)
set.seed(1001)
## Generate the data for lasso regression
gaussian.lasso = LassoGenerationModel(gaussian.list$data.normal, freq_p)
t.lasso = LassoGenerationModel(t.list$data.t, freq_p)
gamma.lasso = LassoGenerationModel(gamma.list$data, freq_p)
set.seed(1001)
## generate knockoff copies
gaussian.knockoff = copulaKnockoff(gaussian.list$data)
t.knockoff = copulaKnockoff(t.list$data)
gamma.knockoff = copulaKnockoff(gamma.list$data)
set.seed(1001)
## Fit lasso model with knockoff variables for gaussian, t and gamma
lasso.gaussian.FDR = lassoFDR(gaussian.list$data, gaussian.knockoff$x.knockoff.copy, gaussian.lasso$y, gaussian.lasso$mask, family = "gaussian")
lasso.t.FDR = lassoFDR(t.list$data, t.knockoff$x.knockoff.copy, t.lasso$y, t.lasso$mask, family = "gaussian")
lasso.gamma.FDR = lassoFDR(gamma.list$data, gamma.knockoff$x.knockoff.copy, gamma.lasso$y, gamma.lasso$mask, family = "gaussian")
## Fit ridge model with knockoff variables for gaussian, t and gamma
ridge.gaussian.FDR = lassoFDR(gaussian.list$data, gaussian.knockoff$x.knockoff.copy, gaussian.lasso$y, gaussian.lasso$mask, family = "gaussian")
ridge.t.FDR = lassoFDR(t.list$data, t.knockoff$x.knockoff.copy, t.lasso$y, t.lasso$mask, family = "gaussian")
ridge.gamma.FDR = lassoFDR(gamma.list$data, gamma.knockoff$x.knockoff.copy, gamma.lasso$y, gamma.lasso$mask, family = "gaussian")
## Output the table
out_table = rbind(out_table, c(cov_type[j], freq_list$freq_p[k], "Gaussian", lasso.gaussian.FDR$power, lasso.gaussian.FDR$FDR, "Lasso"))
out_table = rbind(out_table, c(cov_type[j], freq_list$freq_p[k], "t", lasso.t.FDR$power, lasso.t.FDR$FDR, "Lasso"))
out_table = rbind(out_table, c(cov_type[j], freq_list$freq_p[k], "Gamma", lasso.gamma.FDR$power, lasso.gamma.FDR$FDR, "Lasso"))
out_table = rbind(out_table, c(cov_type[j], freq_list$freq_p[k], "Gaussian", ridge.gaussian.FDR$power, ridge.gaussian.FDR$FDR, "Ridge"))
out_table = rbind(out_table, c(cov_type[j], freq_list$freq_p[k], "t", ridge.t.FDR$power, ridge.t.FDR$FDR, "Ridge"))
out_table = rbind(out_table, c(cov_type[j], freq_list$freq_p[k], "Gamma", ridge.gamma.FDR$power, ridge.gamma.FDR$FDR, "Ridge"))
}
}
out_table = print(as.data.frame(out_table))
colnames(out_table) = c("Covariance", "Sparsity", "Marginal Family", "Power", "FDR", "Model")
print(out_table)
## cov_type freq_list margin power FDR model
|
library(data.table)
seedValues <- 25
trainFraction <- 0.8
# Read in as character to make sure that no re-formatting happens
dataValues <- fread("data/train_values.csv", colClasses = "character", strip.white = FALSE)
dataLabels <- fread("data/train_labels.csv", colClasses = "character", strip.white = FALSE)
stopifnot(all(dataValues$id == dataLabels$id)) # make sure rows are aligned
for (seedValue in seedValues) {
set.seed(seedValue)
classLabels <- factor(dataLabels$status_group)
classIdx <- sapply(levels(classLabels), function(x) which(classLabels == x))
trainIdx <- sapply(classIdx, function(x) sample(x, size = round(0.8 * length(x)), replace = FALSE))
trainIdx <- sort(unlist(trainIdx))
trainValues <- dataValues[trainIdx, ]
trainLabels <- dataLabels[trainIdx, ]
testValues <- dataValues[-trainIdx, ]
testLabels <- dataLabels[-trainIdx, ]
fwrite(trainValues, file = paste0("data/split_train_values_", seedValue, ".csv"),
quote = FALSE, eol = "\n")
fwrite(trainLabels, file = paste0("data/split_train_labels_", seedValue, ".csv"),
quote = FALSE, eol = "\n")
fwrite(testValues, file = paste0("data/split_test_values_", seedValue, ".csv"),
quote = FALSE, eol = "\n")
fwrite(testLabels, file = paste0("data/split_test_labels_", seedValue, ".csv"),
quote = FALSE, eol = "\n")
}
| /Task_1_PumpItUp/SplitForStudentsRepeatedHoldout.R | permissive | Jakob-Bach/AGD-Lab-2020 | R | false | false | 1,346 | r | library(data.table)
seedValues <- 25
trainFraction <- 0.8
# Read in as character to make sure that no re-formatting happens
dataValues <- fread("data/train_values.csv", colClasses = "character", strip.white = FALSE)
dataLabels <- fread("data/train_labels.csv", colClasses = "character", strip.white = FALSE)
stopifnot(all(dataValues$id == dataLabels$id)) # make sure rows are aligned
for (seedValue in seedValues) {
set.seed(seedValue)
classLabels <- factor(dataLabels$status_group)
classIdx <- sapply(levels(classLabels), function(x) which(classLabels == x))
trainIdx <- sapply(classIdx, function(x) sample(x, size = round(0.8 * length(x)), replace = FALSE))
trainIdx <- sort(unlist(trainIdx))
trainValues <- dataValues[trainIdx, ]
trainLabels <- dataLabels[trainIdx, ]
testValues <- dataValues[-trainIdx, ]
testLabels <- dataLabels[-trainIdx, ]
fwrite(trainValues, file = paste0("data/split_train_values_", seedValue, ".csv"),
quote = FALSE, eol = "\n")
fwrite(trainLabels, file = paste0("data/split_train_labels_", seedValue, ".csv"),
quote = FALSE, eol = "\n")
fwrite(testValues, file = paste0("data/split_test_values_", seedValue, ".csv"),
quote = FALSE, eol = "\n")
fwrite(testLabels, file = paste0("data/split_test_labels_", seedValue, ".csv"),
quote = FALSE, eol = "\n")
}
|
library(shinydashboard)
library(shiny)
library(shinycssloaders)
library(stringr)
library(DT)
library(fBasics)
source("DataInput.R")
source("QCplots.R")
source("Preprocessing.R")
source("testingDE.R")
source("DimenReduce.R")
source("UnsupervisedLearning.R")
source("annotationtool.R")
source("EquivalenceTest.R")
source("PeptideCalculate.R")
source("batcheffect.R")
source("IDV.R")
options(shiny.maxRequestSize=20*1024^2)
# Define UI ----
ui <- dashboardPage(
dashboardHeader(title="Proteomics Data Analysis Pipeline", titleWidth = 350),
dashboardSidebar(width = 275,sidebarMenu(
menuItem("Input and annotation", tabName = "IA"),
menuItem("Quality Control" , tabname = "QC", icon = icon("bars"),
menuSubItem("Standard based QC", tabName = "StandardQC"),
menuSubItem("Sample based QC", tabName = "SampleQC")),
menuItem("Pre-processing", tabName = "PP"),
menuItem("Statistical Inference", tabName = "StatInf"),
menuItem("Dimensionality reduction", tabName = "DR"),
menuItem("Clustering", tabName = "Clustering"),
menuItem("Individual Protein Visualization",tabname="IV",icon=icon("bars"),
menuSubItem("Boxplot", tabName = "IDV1"),
menuSubItem("Correlation plot", tabName = "IDV2"),
menuSubItem("Domain structure & annotated PTMs ", tabName = "IDV3"),
menuSubItem("Circosplot", tabName = "IDV4")),
menuItem("Generate a report", tabName="GenerateReport"))),
dashboardBody(
tabItems(
# Input and annotation ----
tabItem(tabName="IA",
h4("File input"),
fluidRow(
box(h4("Meta file input"),
fileInput("meta_data", "please upload .csv/.xlsx file of meta data with only sample names matched with raw data, and corresponding covariates of interest:",
multiple = FALSE,
accept = c("text/csv",
"text/comma-separated-values,text/plain",
".csv", ".xlsx")),
checkboxInput("whether_replica", "Is there a column indicating replica in the meta data file?", value=FALSE),
conditionalPanel('input.whether_replica==1',
uiOutput("replicacol")),
checkboxInput("whether_protein_file", "Upload Protein file", value=FALSE),
conditionalPanel('input.whether_protein_file==1',
fileInput("protein_data", "please upload .csv/.xlsx file of raw data with quantitative intensities of proteins:",
multiple = FALSE,
accept = c("text/csv",
"text/comma-separated-values,text/plain",
".csv", ".xlsx")),
numericInput("protein_col_id","Which column contains unique identifiers of this dataset?",1)),
checkboxInput("whether_termini_file", "Upload Termini file", value=FALSE),
conditionalPanel('input.whether_termini_file==1',
fileInput("termini_data", "please upload .csv/.xlsx file of raw termini data with quantitative intensities of terminis:",
multiple = FALSE,
accept = c("text/csv",
"text/comma-separated-values,text/plain",
".csv", ".xlsx")),
numericInput("termini_col_id","Which column contains unique identifiers of this dataset?",1)),
checkboxInput("whether_peptide_file","Upload Peptide file", value=FALSE),
conditionalPanel('input.whether_peptide_file==1',
fileInput("peptide_data", "please upload .csv/.xlsx file of raw peptide data with quantitative intensities of peptides:",
multiple = FALSE,
accept = c("text/csv",
"text/comma-separated-values,text/plain",
".csv", ".xlsx")),
numericInput("peptide_col_id","Which column contains unique identifiers of this dataset?",1)),
checkboxInput("whether_PTM_file","Upload PTM file", value=FALSE),
conditionalPanel('input.whether_PTM_file==1',
fileInput("PTM_data", "please upload .csv/.xlsx file of raw PTM file",
multiple = FALSE,
accept = c("text/csv",
"text/comma-separated-values,text/plain",
".csv", ".xlsx")),
numericInput("PTM_col_id","Which column contains unique identifiers of this dataset?",1))),
box(h4("Annotation tools"),
conditionalPanel('input.whether_termini_file==1 || input.whether_PTM_file==1',
uiOutput("select_dataset_for_anno"),
uiOutput("anno_idf"),
uiOutput("anno_seq"),
numericInput("lseqwindow",
h5("Length of sequence window:"),
value = 1),
actionButton("annoButton", "Get annotation"),
div(dataTableOutput("annotationpreview")%>% withSpinner(color="#0dc5c1"), style = "font-size:60%"),
downloadButton("downloadAnnotation", "Download full annotation"))),
box(h4("Protein intensity calculator based on peptide"),
conditionalPanel('input.whether_peptide_file==1',
numericInput("peptide_col_ACC",
"Please input the column of protein accession:",
value=1),
selectInput("sumform", "Choose method of sum:",choices=c("sum","weighted","top3")),
actionButton("pepcalButton", "Calculate"),
div(dataTableOutput("peptidepreview")%>% withSpinner(color="#0dc5c1"), style = "font-size:60%"),
checkboxInput("whether_replace_protein", "Use the dataset calculated from peptides to replace the protein data?",
value=FALSE)))
)),
# Standard QC ----
tabItem(tabName = "StandardQC",
box(fileInput("standardQC_meta", "please upload .csv/.xlsx file of meta data for standard samples:",
multiple = FALSE,
accept = c("text/csv",
"text/comma-separated-values,text/plain",
".csv", ".xlsx")),
fileInput("standardQC_data", "please upload .csv/.xlsx file of raw data with quantitative intensities of standard samples:",
multiple = FALSE,
accept = c("text/csv",
"text/comma-separated-values,text/plain",
".csv", ".xlsx"))
),
fluidRow(
box(uiOutput("batch_col"),
uiOutput("order_col"),
uiOutput("factor_cols"),
selectizeInput("correctmethod",label="Batch effect correction method",
choices=c("MedianCentering","MeanCentering")),
actionButton("standar_batcheffectdButton", "Confirm"),
downloadButton("DownloadCorrectedQCData","Download corrected QC data")),
box(plotOutput("standard_batcheffectplots")%>% withSpinner(color="#0dc5c1")),
box(plotOutput("standard_batcheffect_clustering")%>% withSpinner(color="#0dc5c1")),
box(plotOutput("standard_batcheffect_heatmap")%>% withSpinner(color="#0dc5c1"))
)),
# Sample QC ----
tabItem(tabName = "SampleQC",
fluidRow(
box(h4("Show the QC graphs of"),
uiOutput("selectdata_QC"),
br(),
checkboxInput("whethercorrectbatch","Correct the batch effect with the same setting in Standard QC?"),
checkboxInput("whether_average_replica",
"Average the intensities across replica?",
value=FALSE)),
tabBox(title = "QC plots",
tabPanel("%CV plots for QC samples",
plotOutput("graph1")),
tabPanel("Distribution of identified proteins",
uiOutput("group"),
plotOutput("graph3")),
tabPanel("Overlap of identified proteins",
uiOutput("group2"),
plotOutput("graph4")),
tabPanel("Data Completeness",
plotOutput("graph5")),
tabPanel("Correlation Plot",
plotOutput("graph6")))
#conditionalPanel("input.whethercorrectbatch==1",
# plotOutput("sample_batch_plot"))
)),
# Pre-processing ----
tabItem(tabName = "PP",
h4("2. Pre-processing"),
# Normalization, imputation, etc.
fluidRow(
box(uiOutput("preprocessing_panel"))
)),
#plotOutput("viewviolin")%>% withSpinner(color="#0dc5c1"),
#downloadButton("DownloadProcessedData", "Download")),
# Statistical Inference ----
tabItem(tabName="StatInf",
# Equivalence test
#h4("3.1 Inference on Equivalence"),
uiOutput("selectdata_SI"),
# box(
# selectInput(inputId = "dd",
# label="Specify the way of testing...",
# choices = c("By column, test all proteins at the same time",
# "By row, test each protein respectively")),
# numericInput("lowerbound", "Lower bound:", -0.5),
# numericInput("upperbound", "Upper bound:", 0.5),
# uiOutput("eqFoI"),
# uiOutput("eqFoIlevels"),
# conditionalPanel('input.dd=="By row, test each protein respectively"',
# plotOutput("eqtestres1")%>% withSpinner(color="#0dc5c1")),
# conditionalPanel('input.dd=="By column, test all proteins at the same time"',
# plotOutput("eqtestres2"))
# ),
# DE tests
#h4("3.2 Inference on Differential Expression"),
fluidRow(
box(width=12,
checkboxInput(inputId = "whetherstandardDE",
"Standardization?",
value=FALSE),
#conditionalPanel('input.whetherstandardDE==1',
# selectInput(inputId = "standardmethodDE",
# label="Standardization by",
# choices = c("z-score",
# "log2FC over median"))),
checkboxInput(inputId = "whetherblock",
"Blocking", value = FALSE),
conditionalPanel('input.whetherblock==1',
uiOutput("blockfactor")),
checkboxInput(inputId = "whetherweighting",
"Weighting?",
value=FALSE),
conditionalPanel('input.whetherweighting==1',
numericInput("NAweights", "Weight",value=10e-5)),
selectInput(inputId = "DEtest",
label="Statistical testing for DE",
choices = c("limma",
"t-test"),
selected = NULL, width="33%"),
uiOutput("FoI"),
uiOutput("FoIlevels"),
h5("Volcano plot of DE testing results"),
plotOutput("volcanoplot")%>% withSpinner(color="#0dc5c1"),
downloadButton("DownloadDEresults","Download Differential Expression test results"))
)),
# Dimensionality Reduction ----
tabItem(tabName = "DR",
h4("4. Dimensionality reduction"),
uiOutput("selectdata_DR"),
fluidRow(
box(
selectInput(inputId = "DRrows",
label="use the rows of...",
choices=c("all","significant")),
uiOutput("colorfactor")),
box(
selectInput(inputId = "DRmethod",
label="use the method of...",
choices=c("PCA","t-SNE", "UMAP")),
conditionalPanel('input.DRmethod=="t-SNE"',
numericInput("tSNEper","Specify the perplexity for t-SNE", 0)))),
box(width=12,plotOutput("dimenreduction"))),
# clustering ----
tabItem(tabName="Clustering",
h4("5. Clustering"),
uiOutput("selectdata_cluster"),
fluidRow(
box(
selectInput(inputId = "Cmethod",
label="use the method of...",
choices=c("Hierarchical Clustering",
"k-means",
"Fuzzy Clustering")),
selectInput(inputId = "rows",
label="use the rows of...",
choices=c("significant","all")),
conditionalPanel('input.Cmethod == "Hierarchical Clustering"',
checkboxInput("whetherlabel",
h5("include row labels"),
value = FALSE)),
conditionalPanel('input.Cmethod != "Hierarchical Clustering"',
numericInput("clusternum",
h5("the number of centers: "),
value = 4)))),
box(width=12,plotOutput("cluster"))),
# Individual Protein Visualization ----
tabItem(tabName="IDV1",
uiOutput("selectdata_IDV"),
selectInput("proteingroup","Select the group of...",
choices=c("significant in differential expression",
"specify below...")),
uiOutput("proteingroupID"),
uiOutput("facet_factor"),
fluidRow(
box(width=12,
column(width=6,
DT::dataTableOutput("IDV_table"),style = "height:500px; overflow-y: scroll;overflow-x: scroll;"),
column(width=6,
plotOutput("IDV_boxplot"))))
),
tabItem(tabName="IDV2",
selectInput("corrorder","Please specify the type of correlation plot",
choices=c("hclust","test")),
conditionalPanel('input.corrorder=="test"',
numericInput("p_threshold", "Please specify p-value threshold",value=0.05)),
conditionalPanel('input.corrorder=="hclust"',
numericInput("ncluster","Please specify the number of clusters",value=3)),
selectInput("colorscheme","Please specify the color scheme",
choices=c("red-white-blue","heat","cm")),
fluidRow(
box(width=12,
column(width=6,DT::dataTableOutput("IDV_corrtable"),style = "height:500px; overflow-y: scroll;overflow-x: scroll;"),
column(width=6,plotOutput("IDV_corrplot"))))
),
tabItem(tabName="IDV3",
uiOutput("proteinACC"),
uiOutput("col_position"),
uiOutput("modificationType"),
fluidRow(
box(uiOutput("IDVseperate_panel")))
),
tabItem(tabName="IDV4",
uiOutput("selectdata_circos"),
uiOutput("selectcol_proteinACC_termini"),
uiOutput("selectcol_proteinACC_peptide"),
uiOutput("selectcol_proteinACC_PTM"),
fluidRow(box(plotOutput("circosplot")))
),
# generate report ----
tabItem(tabName="GenerateReport",
textInput("title","Customize a title of report...",value="Analysis"),
textInput("description", "Customize a description of the report...", value=""),
downloadButton("report", "Generate report"))
)
)
)
# Define server logic ----
server <- function(input, output) {
report_vals <- reactiveValues(
QCeffectPlot=NULL,
cvviolin=NULL,
distprotein=NULL,
upset=NULL,
datacompleteness=NULL,
corplot=NULL,
available_sets=NULL,
"viewviolin_for_protein data"=NULL,
"viewsummary_for_protein data"=NULL,
"viewviolin_for_termini data"=NULL,
"viewsummary_for_termini data"=NULL,
"viewviolin_for_peptide data"=NULL,
"viewsummary_for_peptide data"=NULL,
"viewviolin_for_PTM data"=NULL,
"viewsummary_for_PTM data"=NULL,
volcanoplot=NULL,
dmr=NULL,
clustering=NULL,
IDV_boxplot=NULL,
IDV_corrplot=NULL,
IDV_corrtable=NULL,
circosplot=NULL)
# Data input ----
protein_data<-reactive({
req(input$protein_data)
if(grepl(".xlsx",input$protein_data$name)){
df <- read.xlsx(input$protein_data$datapath)}
if (grepl(".csv",input$protein_data$name)){
df <- data.frame(read.csv(input$protein_data$datapath, header=TRUE, check.names=FALSE))
}
df
})
termini_data<-reactive({
req(input$termini_data)
if(grepl(".xlsx",input$termini_data$name)){
df <- read.xlsx(input$termini_data$datapath)}
if (grepl(".csv",input$termini_data$name)){
df <- read.csv(input$termini_data$datapath, header=TRUE, check.names=FALSE)
}
data.frame(df)
})
PTM_data<-reactive({
req(input$PTM_data)
if(grepl(".xlsx",input$PTM_data$name)){
df <- read.xlsx(input$PTM_data$datapath)}
if (grepl(".csv",input$PTM_data$name)){
df <- read.csv(input$PTM_data$datapath, header=TRUE, check.names=FALSE)
}
data.frame(df)
})
peptide_data<-reactive({
req(input$peptide_data)
if(grepl(".xlsx",input$peptide_data$name)){
df <- read.xlsx(input$peptide_data$datapath)}
if (grepl(".csv",input$peptide_data$name)){
df <- read.csv(input$peptide_data$datapath, header=TRUE, check.names=FALSE)
}
data.frame(df)
})
# data integration ----
DoE0<-reactive({
req(input$meta_data)
if(grepl(".xlsx",input$meta_data$name)){
df <- read.xlsx(input$meta_data$datapath)}
if (grepl(".csv",input$meta_data$name)){
df <- read.csv(input$meta_data$datapath, header=TRUE, check.names=FALSE)
}
dt<-setdoe(data.frame(df))
dt
})
output$replicacol<-renderUI({
selectizeInput(inputId = "replicacol",
label="Select the column indicates replica",
choices = colnames(DoE0())[-1])
})
replica_list<-reactive({
validate(need(input$replicacol, ""))
df<-cbind(DoE0()[,1],DoE0()[,input$replicacol])
})
DoE<-reactive({
dt<-DoE0()
if(input$whether_average_replica==TRUE){
dt[,1]<-dt[,input$replicacol]
dt[,input$replicacol]<-NULL
dt<-unique(dt)
}
dt
})
data_collection0<-reactive({
ls<-list()
if(input$whether_protein_file==TRUE){ls[["protein data"]]<-inputraw(protein_data(),DoE0(),input$protein_col_id)}
if(input$whether_termini_file==TRUE){ls[["termini data"]]<-inputraw(termini_data(),DoE0(),input$termini_col_id)}
if(input$whether_peptide_file==TRUE){ls[["peptide data"]]<-inputraw(peptide_data(),DoE0(),input$peptide_col_id)}
if(input$whether_PTM_file==TRUE){ls[["PTM data"]]<-inputraw(PTM_data(),DoE0(),input$PTM_col_id)}
if(input$whether_replace_protein==TRUE){ls[["protein data"]]$data<-peptide_based_fixed_data()}
ls
})
data_collection<-reactive({
ls<-data_collection0()
if(input$whethercorrectbatch==TRUE){
ls[["protein data"]]$data<-QCeffectPlot(data_collection0()[["protein data"]]$data,DoE0(),input$batch_col,
input$order_col, input$factor_cols, input$correctmethod)[["batchcorrectedmatrix"]]
ls[["termini data"]]$data<-QCeffectPlot(data_collection0()[["termini data"]]$data,DoE0(),input$batch_col,
input$order_col, input$factor_cols, input$correctmethod)[["batchcorrectedmatrix"]]
ls[["peptide data"]]$data<-QCeffectPlot(data_collection0()[["peptide data"]]$data,DoE0(),input$batch_col,
input$order_col, input$factor_cols, input$correctmethod)[["batchcorrectedmatrix"]]
ls[["PTM data"]]$data<-QCeffectPlot(data_collection0()[["PTM data"]]$data,DoE0(),input$batch_col,
input$order_col, input$factor_cols, input$correctmethod)[["batchcorrectedmatrix"]]
}
if(input$whether_average_replica==TRUE){
ls[["protein data"]]$data<-averagereplica(ls[["protein data"]]$data, replica_list())
ls[["termini data"]]$data<-averagereplica(ls[["termini data"]]$data, replica_list())
ls[["peptide data"]]$data<-averagereplica(ls[["peptide data"]]$data, replica_list())
ls[["PTM data"]]$data<-averagereplica(ls[["PTM data"]]$data, replica_list())
}
ls
})
# annotation tool ----
output$select_dataset_for_anno<-renderUI({
cc<-c("termini data", "PTM data")[c(input$whether_termini_file, input$whether_PTM_file)]
selectInput("select_dataset_for_anno",
label="Choose the dataset for annotation",
choices=cc,
selected=NULL)
})
dataforanno<-reactive({
validate(need(!is.null(input$select_dataset_for_anno),""))
if (input$select_dataset_for_anno=="termini data"){df<-termini_data()}
if (input$select_dataset_for_anno=="PTM data"){df<-PTM_data()}
df
})
output$anno_idf<-renderUI({
validate(need(!is.null(dataforanno()),""))
coln<-colnames(dataforanno())
selectizeInput(inputId = "anno_idf",
label="Select the column of identifier for data",
choices = coln)
})
output$anno_seq<-renderUI({
validate(need(!is.null(dataforanno()),""))
coln<-colnames(dataforanno())
selectizeInput(inputId = "anno_seq",
label="Select the column of stripped sequence for data",
choices = coln)
})
annotation_res<-eventReactive(input$annoButton,{
annotationtool(dataforanno()[,input$anno_idf], dataforanno()[,input$anno_seq], input$lseqwindow)}
)
output$downloadAnnotation <- downloadHandler(
filename = function() {
paste("AnnotationResults", ".csv", sep = "")
},
content = function(file) {
write.csv(annotation_res(), file, row.names = FALSE)
}
)
output$annotationpreview<-renderDataTable({
annotation_res()[c(1:20),c(1:4,9:10)]
})
# peptide calculator ----
peptide_based_fixed_data<-eventReactive(
input$pepcalButton,{
calIntPep(peptide_data(),input$sumform, DoE0(),input$peptide_col_ACC)
})
output$peptidepreview<-renderDataTable({
peptide_based_fixed_data()[c(1:20),c(1:6)]
})
# QC plots ----
standardQC_meta<-reactive({
req(input$standardQC_meta)
if(grepl(".xlsx",input$standardQC_meta$name)){
df <- read.xlsx(input$standardQC_meta$datapath)}
if (grepl(".csv",input$standardQC_meta$name)){
df <- read.csv(input$standardQC_meta$datapath, header=TRUE, check.names=FALSE)
}
setdoe(data.frame(df))
})
standardQC_data<-reactive({
req(input$standardQC_data)
if(grepl(".xlsx",input$standardQC_data$name)){
df <- read.xlsx(input$standardQC_data$datapath)}
if (grepl(".csv",input$standardQC_data$name)){
df <- read.csv(input$standardQC_data$datapath, header=TRUE, check.names=FALSE)
}
aa<-inputraw(data.frame(df),standardQC_meta(),1)
aa[["data"]]
})
output$batch_col<-renderUI({
validate(need(standardQC_data(),""),need(standardQC_meta(),""))
selectInput(inputId = "batch_col",
label="select the column indicates batch of samples:",
choices=colnames(standardQC_meta())[-1])
})
output$order_col<-renderUI({
validate(need(standardQC_data(),""),need(standardQC_meta(),""))
selectInput(inputId = "order_col",
label="select the column indicates running order of samples:",
choices=colnames(standardQC_meta())[-1])
})
output$factor_cols<-renderUI({
validate(need(standardQC_data(),""),need(standardQC_meta(),""))
selectInput(inputId = "factor_cols",
label="select the factors you would like to include in annotation",
choices = colnames(standardQC_meta())[-1],
selected = NULL, multiple=TRUE)
})
standardQCoutput<-eventReactive(
input$standar_batcheffectdButton,{
report_vals$QCeffectPlot<-QCeffectPlot(standardQC_data(),standardQC_meta(),input$batch_col,
input$order_col, input$factor_cols, input$correctmethod)
report_vals$QCeffectPlot
})
output$standard_batcheffectplots <- renderPlot({
grid.arrange(grobs=standardQCoutput()[["plot"]])
})
output$standard_batcheffect_clustering <- renderPlot({
standardQCoutput()[["clustering"]]
})
output$standard_batcheffect_heatmap <- renderPlot({
standardQCoutput()[["heatmap"]]
})
output$DownloadCorrectedQCData <- downloadHandler(
filename = function() {
paste("CorrectedQCData", ".csv", sep = "")
},
content = function(file) {
write.csv(standardQCoutput()[["batchcorrectedmatrix"]], file, row.names = TRUE)
}
)
output$selectdata_QC<-renderUI({
dataset<-c("protein data", "termini data", "peptide data", "PTM data")[
c(input$whether_protein_file,input$whether_termini_file,input$whether_peptide_file, input$whether_PTM_file)]
selectInput(inputId = "selectdata_QC",
label="Select data for QC plots:",
choices=dataset)
})
output$group<-renderUI({
DoE<-DoE()
selectizeInput(inputId = "group",
label="Select group column for grouped graphs",
choices = colnames(DoE)[-1])
})
output$group2<-renderUI({
DoE<-DoE()
selectizeInput(inputId = "group2",
label="Select group column for grouped graphs",
choices = colnames(DoE)[-1])
})
output$graph1 <- renderPlot({
validate(need(input$selectdata_QC,""))
report_vals$cvviolin<-cvplots(data_collection()[[input$selectdata_QC]]$data,DoE())
report_vals$cvviolin})
output$graph3 <- renderPlot({
validate(need(input$selectdata_QC,""))
report_vals$distprotein<-distIndProtein(data_collection()[[input$selectdata_QC]]$data,DoE(),input$group)
report_vals$distprotein})
output$graph4 <- renderPlot({
validate(need(input$selectdata_QC,""))
report_vals$upset<-upsetplot(data_collection()[[input$selectdata_QC]]$data,DoE(),input$group2)
report_vals$upset})
output$graph5 <- renderPlot({
validate(need(input$selectdata_QC,""))
report_vals$datacompleteness<-datacompleteness(data_collection()[[input$selectdata_QC]]$data,DoE())
report_vals$datacompleteness})
output$graph6 <- renderPlot({
validate(need(input$selectdata_QC,""))
report_vals$corplot<-corplot(data_collection()[[input$selectdata_QC]]$data)
report_vals$corplot})
# pre process ----
react_data_collection<-reactiveValues()
react_na_index<-reactiveValues()
output$preprocessing_panel<-renderUI({
available_sets<-names(data_collection())
report_vals$available_sets<-available_sets
cc<-c("No normalization",
"median randomization",
"normalization over same protein and samples under same condition",
"normalization over samples under same condition")
lapply(available_sets,function(x){
output[[paste0("filter_level_for_",x)]]<-
renderUI(
numericInput(inputId = paste0("filter_level_for_",x),
label=paste0("select the row filter level (%) for ", x),
value=0)
)
})
lapply(available_sets,function(x){
output[[paste0("normalization_method_for_",x)]]<-
renderUI(
selectInput(inputId = paste0("normalization_method_for_",x),
label=paste0("select the normalization method for ", x),
choices=cc[c(TRUE,TRUE,(x %in% c("termini data","PTM data")), TRUE)])
)
})
lapply(available_sets, function(x){
output[[paste0("norm_condition_for_",x)]]<-renderUI({
validate(need(input[[paste0("normalization_method_for_",x)]],""))
if (input[[paste0("normalization_method_for_",x)]] %in% c("normalization over same protein and samples under same condition",
"normalization over samples under same condition")){
cc<-colnames(DoE())[-1]
}else{cc<-c(NULL)}
selectInput(inputId = paste0("norm_condition_for_",x),
label="select the condition",
choices=cc)
})
})
lapply(available_sets,function(x){
output[[paste0("protein_anno_for_",x)]]<-
renderUI({
validate(need(input[[paste0("normalization_method_for_",x)]]=="normalization over same protein and samples under same condition",""))
selectInput(inputId = paste0("protein_anno_for_",x),
label=paste0("select the column cotains protein groups for ", x),
choices=colnames(data_collection()[[x]]$other_annotation))
})
})
lapply(available_sets,function(x){
output[[paste0("imputation_method_for_",x)]]<-
renderUI(
selectInput(inputId = paste0("imputation_method_for_",x),
label="Imputation Method",
choices = c( "No imputation",
"Down-shifted Normal samples",
"MinProb",
"knn",
"min"),
selected = NULL)
)
})
lapply(available_sets, function(x){
output[[paste0("impute_condition_for_",x)]]<-renderUI({
validate(need(input[[paste0("imputation_method_for_",x)]],""))
if (input[[paste0("imputation_method_for_",x)]]== "Down-shifted Normal samples"){
cc<-colnames(DoE())[-1]
}else{cc<-c(NULL)}
selectInput(inputId = paste0("impute_condition_for_",x),
label="select the condition",
choices=cc)
})
})
lapply(available_sets,function(x){
output[[paste0("viewviolin_for_",x)]]<-
renderPlot({
validate(need(data_collection(),""))
ctitle<-paste0(c("filtered for ",input[[paste0("filter_level_for_",x)]],
"% completeness, normalized by ", input[[paste0("normalization_method_for_",x)]],"\n",
"imputation by ", input[[paste0("imputation_method_for_",x)]]), collapse="")
aa<-preprocessing(x,data_collection(),DoE(),
input[[paste0("filter_level_for_",x)]],
input[[paste0("normalization_method_for_",x)]],
input[[paste0("imputation_method_for_",x)]],
input[[paste0("norm_condition_for_",x)]],
input[[paste0("impute_condition_for_",x)]],
input[[paste0("protein_anno_for_",x)]])
react_data_collection[[x]]<-aa[["data"]]
react_na_index[[x]]<-aa[["na.index"]]
report_vals[[paste0("viewviolin_for_",x)]]<-plotviolin(aa[["data"]],ctitle)
report_vals[[paste0("viewviolin_for_",x)]]
})
})
lapply(available_sets,function(x){
output[[paste0("viewsummary_for_",x)]]<-
renderPrint({
report_vals[[paste0("viewsummary_for_",x)]]<-basicStats(react_data_collection[[x]])
report_vals[[paste0("viewsummary_for_",x)]]
})
})
lapply(available_sets, function(x){
output[[paste0("DownloadProcessedData_for",x)]]<- downloadHandler(
filename = function() {
paste("ProcessedData_", x, ".csv", sep = "")
},
content = function(file) {
write.csv(react_data_collection[[x]], file, row.names = TRUE)
}
)
})
myTabs = lapply(available_sets, function(x){
tabPanel(title=x,
uiOutput(paste0("filter_level_for_",x)),
uiOutput(paste0("normalization_method_for_",x)),
uiOutput(paste0("norm_condition_for_",x)),
uiOutput(paste0("protein_anno_for_",x)),
uiOutput(paste0("imputation_method_for_",x)),
uiOutput(paste0("impute_condition_for_",x)),
plotOutput(paste0("viewviolin_for_",x)),
box(width=12,
column(width=12,verbatimTextOutput(paste0("viewsummary_for_",x)),style = "height:500px; overflow-y: scroll;overflow-x: scroll;")),
downloadButton(paste0("DownloadProcessedData_for",x),"Download processed data")
)
})
do.call(tabsetPanel, myTabs)
})
# statistical inference ----
output$selectdata_SI<-renderUI({
dataset<-c("protein data", "termini data", "peptide data", "PTM data")[
c(input$whether_protein_file,input$whether_termini_file,input$whether_peptide_file, input$whether_PTM_file)]
selectInput(inputId = "selectdata_SI",
label="Select data for statistical inference:",
choices=dataset)
})
# equivalence test FoI and FoI levels
output$blockfactor<-renderUI({
DoE<-DoE()
selectInput(inputId = "blockfactor",
label="Blocking on...(supports 2-level factors only)",
choices = c(colnames(DoE)[-1]),
selected=NULL)
})
output$FoI<-renderUI({
DoE<-DoE()
selectInput(inputId = "FoI",
label="test the factor of...",
choices = colnames(DoE)[-1],
selected = NULL)
})
output$colorfactor<-renderUI({
DoE<-DoE()
selectInput(inputId = "colorfactor",
label="color by...",
choices = c(colnames(DoE)[-1]),
selected = NULL)
})
# output$eqFoI<-renderUI({
# DoE<-DoE()
# selectInput(inputId = "eqFoI",
# label="test the factor of...",
# choices = c(colnames(DoE)[-1]),
# selected = NULL)
# })
# output$eqFoIlevels<-renderUI({
# DoE<-DoE()
# selectInput(inputId = "eqFoIlevels",
# label="test the level of...",
# choices = unique(DoE[,input$eqFoI]),
# selected = NULL, multiple=TRUE)
# })
# output$eqtestres1<- renderPlot({
# validate(need(input$eqFoIlevels[2], "Please select two levels to conduct this test."))
# report_vals$eqtest1<-eqtest.row(react_fixed_data(), DoE(), input$eqFoI,
# input$eqFoIlevels[1], input$eqFoIlevels[2],
# input$lowerbound, input$upperbound)
# report_vals$eqtest1
# })
# output$eqtestres2<-renderPlot({
# validate(need(input$eqFoIlevels[2], "Please select two levels to conduct this test."))
# report_vals$eqtest2<-eqtest.all(react_fixed_data(), DoE(), input$eqFoI,
# input$eqFoIlevels[1], input$eqFoIlevels[2],
# input$lowerbound, input$upperbound)
# report_vals$eqtest2
# })
output$FoIlevels<-renderUI({
DoE<-DoE()
selectInput(inputId = "FoIlevels",
label="test the level of...",
choices = unique(DoE[,input$FoI]),
selected = NULL, multiple=TRUE)
})
listoutput<-reactiveValues()
output$volcanoplot <- renderPlot({
validate(need(input$FoIlevels[2], "Please select two levels to conduct this test."))
listoutput[[input$selectdata_SI]]<-testingDE(react_data_collection[[input$selectdata_SI]], DoE(),input$DEtest,
input$FoI, input$FoIlevels, input$whetherblock,input$blockfactor,
input$whetherweighting, input$NAweights, react_na_index[[input$selectdata_SI]], input$whetherstandardDE)
report_vals$volcanoplot<-listoutput[[input$selectdata_SI]][["graph"]]
report_vals$volcanoplot
})
output$DownloadDEresults <- downloadHandler(
filename = function() {
paste("StatisticalTestResultsfor_", input$selectdata_SI, ".csv", sep = "")
},
content = function(file) {
write.csv(listoutput[[input$selectdata_SI]][["alldf"]], file, row.names = TRUE)
}
)
# dimensionality reduction ----
output$selectdata_DR<-renderUI({
dataset<-c("protein data", "termini data", "peptide data", "PTM data")[
c(input$whether_protein_file,input$whether_termini_file,input$whether_peptide_file, input$whether_PTM_file)]
selectInput(inputId = "selectdata_DR",
label="Select data for dimensionality reduction:",
choices=dataset)
})
output$dimenreduction <- renderPlot({
validate(need(input$selectdata_DR,""))
if (input$DRrows=="all"){includedrows=c(1:nrow(react_data_collection[[input$selectdata_DR]]))}else{
includedrows=listoutput[[input$selectdata_DR]][["DEdf"]]$name}
report_vals$dmr<-dimen.reduce(react_data_collection[[input$selectdata_DR]], DoE(), input$DRmethod, input$colorfactor, input$tSNEper, includedrows)
report_vals$dmr
})
# clustering ----
output$selectdata_cluster<-renderUI({
dataset<-c("protein data", "termini data", "peptide data", "PTM data")[
c(input$whether_protein_file,input$whether_termini_file,input$whether_peptide_file, input$whether_PTM_file)]
selectInput(inputId = "selectdata_cluster",
label="Select data for clustering:",
choices=dataset)
})
output$cluster <- renderPlot({
validate(need(input$selectdata_cluster,""))
if (input$rows=="all"){includedrows=c(1:nrow(react_data_collection[[input$selectdata_cluster]]))}else{
includedrows=listoutput[[input$selectdata_cluster]][["DEdf"]]$name}
if (input$Cmethod=="Hierarchical Clustering"){
report_vals$clustering<-fcluster(react_data_collection[[input$selectdata_cluster]], input$Cmethod, includedrows, input$whetherlabel, 0)
}else{
report_vals$clustering<-fcluster(react_data_collection[[input$selectdata_cluster]], input$Cmethod, includedrows, FALSE, input$clusternum)
}
report_vals$clustering
})
# Individual Protein Visualization ----
output$selectdata_IDV<-renderUI({
dataset<-c("protein data", "termini data", "peptide data", "PTM data")[
c(input$whether_protein_file,input$whether_termini_file,input$whether_peptide_file, input$whether_PTM_file)]
selectInput(inputId = "selectdata_IDV",
label="Select dataset for individual protein visualization (valid for boxplot and corrplot):",
choices=dataset)
})
output$proteingroupID<-renderUI({
validate(need(input$proteingroup=="specify below...",""))
textInput("proteingroupID", "Which to visualize? (separate by comma)", value = "")
})
rows_include<-reactive({
validate(need(input$selectdata_IDV,""))
if(input$proteingroup=="significant in differential expression"){
r<-listoutput[[input$selectdata_IDV]][["DEdf"]]$name}
if(input$proteingroup=="specify below..."){
r<-unlist(strsplit(input$proteingroupID,","))}
r
})
output$IDV_table<-DT::renderDataTable({
validate(need(rows_include(),""))
options = list(paging=FALSE)
react_data_collection[[input$selectdata_IDV]][rows_include(),]
})
output$facet_factor<-renderUI({
selectizeInput(inputId = "facet_factor",
label="Select the faceting variable in the boxplot:",
choices = colnames(DoE0())[-1])
})
output$IDV_boxplot<-renderPlot({
validate(need(rows_include(),""))
report_vals$IDV_boxplot<-IDV_plot(react_data_collection[[input$selectdata_IDV]][rows_include(),],input$facet_factor,DoE())
report_vals$IDV_boxplot
})
output$IDV_corrplot<-renderPlot({
validate(need(rows_include(),""))
report_vals$IDV_corrplot<-do.call(corrplot_customize,list(data=react_data_collection[[input$selectdata_IDV]][rows_include(),],
order=input$corrorder,
p_threshold=input$p_threshold,
ncluster=input$ncluster,
colorscheme=input$colorscheme))
report_vals$IDV_corrplot
})
output$IDV_corrtable<-DT::renderDataTable({
validate(need(rows_include(),""))
options = list(paging=FALSE)
data<-react_data_collection[[input$selectdata_IDV]][rows_include(),]
t_data<-t(data)
M<-cor(t_data,use="pairwise.complete.obs")
for (i in rownames(M)){
if (sum(!is.na(M[,i]))<2){
M<-M[-which(rownames(M)==i),-which(rownames(M)==i)]}
}
report_vals$IDV_corrtable<-M
M
})
output$proteinACC<-renderUI({
validate(need((input$selectdata_IDV=="protein data")&(!is.null(data_collection()[["PTM data"]])),""))
selectInput("proteinACC","Select the column specifying protein accession in the PTM data:",
choices=c(colnames(data_collection()[["PTM data"]]$other_annotation)[-1]))
})
output$col_position<-renderUI({
validate(need((input$selectdata_IDV=="protein data")&(!is.null(data_collection()[["PTM data"]])),""))
selectInput("col_position","Select the column specifying modification position in the PTM data:",
choices=c(colnames(data_collection()[["PTM data"]]$other_annotation)[-1]))
})
output$modificationType<-renderUI({
validate(need((input$selectdata_IDV=="protein data")&(!is.null(data_collection()[["PTM data"]])),""))
selectInput("modificationType","Select the column specifying modification type in the PTM data:",
choices=c(colnames(data_collection()[["PTM data"]]$other_annotation)[-1]))
})
output$IDVseperate_panel<-renderUI({
validate(need((input$selectdata_IDV=="protein data")&(!is.null(data_collection()[["PTM data"]])),
"This function is valid only when protein data selected previously and PTM data available."))
available_proteins<-rows_include()
lapply(available_proteins,function(x){
output[[paste0("CLG_for_",x)]]<-renderPlot({
# data_collection()[["PTM data"]][["other_annotation"]][,input$proteinACC]
#if (length(which(data_collection()[["PTM data"]][["other_annotation"]][,input$proteinACC]==x))==0){
# o<-renderText("No PTM protein groups match this protein selected")
#}else{
ind<-which(data_collection()[["PTM data"]][["other_annotation"]][,input$proteinACC]==x)
report_vals[[paste0("CLG_for_",x)]]<-combined_lolipop_plot(x, data_collection()[["PTM data"]],ind, input$proteinACC,input$col_position,input$modificationType)
report_vals[[paste0("CLG_for_",x)]]
})
})
myTabs = lapply(available_proteins, function(x){
tabPanel(title=x,
plotOutput(paste0("CLG_for_",x)))
})
do.call(tabsetPanel, myTabs)
})
output$selectdata_circos<-renderUI({
dataset<-c("termini data", "peptide data", "PTM data")[c(input$whether_termini_file,input$whether_peptide_file, input$whether_PTM_file)]
checkboxGroupInput("selectdata_circos",
"please select dataset for circosplot:",
choices = dataset)
})
output$selectcol_proteinACC_termini<-renderUI({
validate(need("termini data" %in% input$selectdata_circos,""))
selectInput("selectcol_proteinACCs_termini",
"please select column of protein groups:",
choices = colnames(data_collection()[["termini data"]][["other_annotation"]])[-1])
})
output$selectcol_proteinACC_peptide<-renderUI({
validate(need("peptide data" %in% input$selectdata_circos,""))
selectInput("selectcol_proteinACCs_peptide",
"please select column of protein groups:",
choices = colnames(data_collection()[["peptide data"]][["other_annotation"]])[-1])
})
output$selectcol_proteinACC_PTM<-renderUI({
validate(need("PTM data" %in% input$selectdata_circos,""))
selectInput("selectcol_proteinACCs_PTM",
"please select column of protein groups:",
choices = colnames(data_collection()[["PTM data"]][["other_annotation"]])[-1])
})
output$circosplot<-renderPlot({
validate(need(input$selectdata_circos,""))
proteinACC<-c(input$selectcol_proteinACCs_termini, input$selectcol_proteinACCs_peptide, input$selectcol_proteinACCs_PTM)
names(proteinACC)<-input$selectdata_circos
aa<-circosplot.fun(listoutput,data_collection(),proteinACC,input$selectdata_circos)
report_vals$circosplot<-aa
aa
})
# report generate ----
output$report <- downloadHandler(
filename = "report.html",
content = function(file) {
# Copy the report file to a temporary directory before processing it, in
# case we don't have write permissions to the current working dir (which
# can happen when deployed).
#tempReport<-file.path(tempdir(), "report.rmd")
#file.copy("report.Rmd", tempReport, overwrite = TRUE)
# Knit the document, passing in the `params` list, and eval it in a
src <- normalizePath("report.rmd")
owd <- setwd(tempdir())
on.exit(setwd(owd))
file.copy(src, "report.rmd", overwrite = TRUE)
report_vals$title<-input$title
report_vals$description<-input$description
params0<-list(imported=report_vals)
# child of the global environment (this isolates the code in the document
# from the code in this app).
out<-rmarkdown::render("report.rmd", output_file = file,params = params0)
file.rename(out, file)
}
)
}
# Run the app ----
shinyApp(ui = ui, server = server) | /app.R | no_license | yesu01/DataAnalysisPipeline | R | false | false | 53,889 | r | library(shinydashboard)
library(shiny)
library(shinycssloaders)
library(stringr)
library(DT)
library(fBasics)
source("DataInput.R")
source("QCplots.R")
source("Preprocessing.R")
source("testingDE.R")
source("DimenReduce.R")
source("UnsupervisedLearning.R")
source("annotationtool.R")
source("EquivalenceTest.R")
source("PeptideCalculate.R")
source("batcheffect.R")
source("IDV.R")
options(shiny.maxRequestSize=20*1024^2)
# Define UI ----
ui <- dashboardPage(
dashboardHeader(title="Proteomics Data Analysis Pipeline", titleWidth = 350),
dashboardSidebar(width = 275,sidebarMenu(
menuItem("Input and annotation", tabName = "IA"),
menuItem("Quality Control" , tabname = "QC", icon = icon("bars"),
menuSubItem("Standard based QC", tabName = "StandardQC"),
menuSubItem("Sample based QC", tabName = "SampleQC")),
menuItem("Pre-processing", tabName = "PP"),
menuItem("Statistical Inference", tabName = "StatInf"),
menuItem("Dimensionality reduction", tabName = "DR"),
menuItem("Clustering", tabName = "Clustering"),
menuItem("Individual Protein Visualization",tabname="IV",icon=icon("bars"),
menuSubItem("Boxplot", tabName = "IDV1"),
menuSubItem("Correlation plot", tabName = "IDV2"),
menuSubItem("Domain structure & annotated PTMs ", tabName = "IDV3"),
menuSubItem("Circosplot", tabName = "IDV4")),
menuItem("Generate a report", tabName="GenerateReport"))),
dashboardBody(
tabItems(
# Input and annotation ----
tabItem(tabName="IA",
h4("File input"),
fluidRow(
box(h4("Meta file input"),
fileInput("meta_data", "please upload .csv/.xlsx file of meta data with only sample names matched with raw data, and corresponding covariates of interest:",
multiple = FALSE,
accept = c("text/csv",
"text/comma-separated-values,text/plain",
".csv", ".xlsx")),
checkboxInput("whether_replica", "Is there a column indicating replica in the meta data file?", value=FALSE),
conditionalPanel('input.whether_replica==1',
uiOutput("replicacol")),
checkboxInput("whether_protein_file", "Upload Protein file", value=FALSE),
conditionalPanel('input.whether_protein_file==1',
fileInput("protein_data", "please upload .csv/.xlsx file of raw data with quantitative intensities of proteins:",
multiple = FALSE,
accept = c("text/csv",
"text/comma-separated-values,text/plain",
".csv", ".xlsx")),
numericInput("protein_col_id","Which column contains unique identifiers of this dataset?",1)),
checkboxInput("whether_termini_file", "Upload Termini file", value=FALSE),
conditionalPanel('input.whether_termini_file==1',
fileInput("termini_data", "please upload .csv/.xlsx file of raw termini data with quantitative intensities of terminis:",
multiple = FALSE,
accept = c("text/csv",
"text/comma-separated-values,text/plain",
".csv", ".xlsx")),
numericInput("termini_col_id","Which column contains unique identifiers of this dataset?",1)),
checkboxInput("whether_peptide_file","Upload Peptide file", value=FALSE),
conditionalPanel('input.whether_peptide_file==1',
fileInput("peptide_data", "please upload .csv/.xlsx file of raw peptide data with quantitative intensities of peptides:",
multiple = FALSE,
accept = c("text/csv",
"text/comma-separated-values,text/plain",
".csv", ".xlsx")),
numericInput("peptide_col_id","Which column contains unique identifiers of this dataset?",1)),
checkboxInput("whether_PTM_file","Upload PTM file", value=FALSE),
conditionalPanel('input.whether_PTM_file==1',
fileInput("PTM_data", "please upload .csv/.xlsx file of raw PTM file",
multiple = FALSE,
accept = c("text/csv",
"text/comma-separated-values,text/plain",
".csv", ".xlsx")),
numericInput("PTM_col_id","Which column contains unique identifiers of this dataset?",1))),
box(h4("Annotation tools"),
conditionalPanel('input.whether_termini_file==1 || input.whether_PTM_file==1',
uiOutput("select_dataset_for_anno"),
uiOutput("anno_idf"),
uiOutput("anno_seq"),
numericInput("lseqwindow",
h5("Length of sequence window:"),
value = 1),
actionButton("annoButton", "Get annotation"),
div(dataTableOutput("annotationpreview")%>% withSpinner(color="#0dc5c1"), style = "font-size:60%"),
downloadButton("downloadAnnotation", "Download full annotation"))),
box(h4("Protein intensity calculator based on peptide"),
conditionalPanel('input.whether_peptide_file==1',
numericInput("peptide_col_ACC",
"Please input the column of protein accession:",
value=1),
selectInput("sumform", "Choose method of sum:",choices=c("sum","weighted","top3")),
actionButton("pepcalButton", "Calculate"),
div(dataTableOutput("peptidepreview")%>% withSpinner(color="#0dc5c1"), style = "font-size:60%"),
checkboxInput("whether_replace_protein", "Use the dataset calculated from peptides to replace the protein data?",
value=FALSE)))
)),
# Standard QC ----
tabItem(tabName = "StandardQC",
box(fileInput("standardQC_meta", "please upload .csv/.xlsx file of meta data for standard samples:",
multiple = FALSE,
accept = c("text/csv",
"text/comma-separated-values,text/plain",
".csv", ".xlsx")),
fileInput("standardQC_data", "please upload .csv/.xlsx file of raw data with quantitative intensities of standard samples:",
multiple = FALSE,
accept = c("text/csv",
"text/comma-separated-values,text/plain",
".csv", ".xlsx"))
),
fluidRow(
box(uiOutput("batch_col"),
uiOutput("order_col"),
uiOutput("factor_cols"),
selectizeInput("correctmethod",label="Batch effect correction method",
choices=c("MedianCentering","MeanCentering")),
actionButton("standar_batcheffectdButton", "Confirm"),
downloadButton("DownloadCorrectedQCData","Download corrected QC data")),
box(plotOutput("standard_batcheffectplots")%>% withSpinner(color="#0dc5c1")),
box(plotOutput("standard_batcheffect_clustering")%>% withSpinner(color="#0dc5c1")),
box(plotOutput("standard_batcheffect_heatmap")%>% withSpinner(color="#0dc5c1"))
)),
# Sample QC ----
tabItem(tabName = "SampleQC",
fluidRow(
box(h4("Show the QC graphs of"),
uiOutput("selectdata_QC"),
br(),
checkboxInput("whethercorrectbatch","Correct the batch effect with the same setting in Standard QC?"),
checkboxInput("whether_average_replica",
"Average the intensities across replica?",
value=FALSE)),
tabBox(title = "QC plots",
tabPanel("%CV plots for QC samples",
plotOutput("graph1")),
tabPanel("Distribution of identified proteins",
uiOutput("group"),
plotOutput("graph3")),
tabPanel("Overlap of identified proteins",
uiOutput("group2"),
plotOutput("graph4")),
tabPanel("Data Completeness",
plotOutput("graph5")),
tabPanel("Correlation Plot",
plotOutput("graph6")))
#conditionalPanel("input.whethercorrectbatch==1",
# plotOutput("sample_batch_plot"))
)),
# Pre-processing ----
tabItem(tabName = "PP",
h4("2. Pre-processing"),
# Normalization, imputation, etc.
fluidRow(
box(uiOutput("preprocessing_panel"))
)),
#plotOutput("viewviolin")%>% withSpinner(color="#0dc5c1"),
#downloadButton("DownloadProcessedData", "Download")),
# Statistical Inference ----
tabItem(tabName="StatInf",
# Equivalence test
#h4("3.1 Inference on Equivalence"),
uiOutput("selectdata_SI"),
# box(
# selectInput(inputId = "dd",
# label="Specify the way of testing...",
# choices = c("By column, test all proteins at the same time",
# "By row, test each protein respectively")),
# numericInput("lowerbound", "Lower bound:", -0.5),
# numericInput("upperbound", "Upper bound:", 0.5),
# uiOutput("eqFoI"),
# uiOutput("eqFoIlevels"),
# conditionalPanel('input.dd=="By row, test each protein respectively"',
# plotOutput("eqtestres1")%>% withSpinner(color="#0dc5c1")),
# conditionalPanel('input.dd=="By column, test all proteins at the same time"',
# plotOutput("eqtestres2"))
# ),
# DE tests
#h4("3.2 Inference on Differential Expression"),
fluidRow(
box(width=12,
checkboxInput(inputId = "whetherstandardDE",
"Standardization?",
value=FALSE),
#conditionalPanel('input.whetherstandardDE==1',
# selectInput(inputId = "standardmethodDE",
# label="Standardization by",
# choices = c("z-score",
# "log2FC over median"))),
checkboxInput(inputId = "whetherblock",
"Blocking", value = FALSE),
conditionalPanel('input.whetherblock==1',
uiOutput("blockfactor")),
checkboxInput(inputId = "whetherweighting",
"Weighting?",
value=FALSE),
conditionalPanel('input.whetherweighting==1',
numericInput("NAweights", "Weight",value=10e-5)),
selectInput(inputId = "DEtest",
label="Statistical testing for DE",
choices = c("limma",
"t-test"),
selected = NULL, width="33%"),
uiOutput("FoI"),
uiOutput("FoIlevels"),
h5("Volcano plot of DE testing results"),
plotOutput("volcanoplot")%>% withSpinner(color="#0dc5c1"),
downloadButton("DownloadDEresults","Download Differential Expression test results"))
)),
# Dimensionality Reduction ----
tabItem(tabName = "DR",
h4("4. Dimensionality reduction"),
uiOutput("selectdata_DR"),
fluidRow(
box(
selectInput(inputId = "DRrows",
label="use the rows of...",
choices=c("all","significant")),
uiOutput("colorfactor")),
box(
selectInput(inputId = "DRmethod",
label="use the method of...",
choices=c("PCA","t-SNE", "UMAP")),
conditionalPanel('input.DRmethod=="t-SNE"',
numericInput("tSNEper","Specify the perplexity for t-SNE", 0)))),
box(width=12,plotOutput("dimenreduction"))),
# clustering ----
tabItem(tabName="Clustering",
h4("5. Clustering"),
uiOutput("selectdata_cluster"),
fluidRow(
box(
selectInput(inputId = "Cmethod",
label="use the method of...",
choices=c("Hierarchical Clustering",
"k-means",
"Fuzzy Clustering")),
selectInput(inputId = "rows",
label="use the rows of...",
choices=c("significant","all")),
conditionalPanel('input.Cmethod == "Hierarchical Clustering"',
checkboxInput("whetherlabel",
h5("include row labels"),
value = FALSE)),
conditionalPanel('input.Cmethod != "Hierarchical Clustering"',
numericInput("clusternum",
h5("the number of centers: "),
value = 4)))),
box(width=12,plotOutput("cluster"))),
# Individual Protein Visualization ----
tabItem(tabName="IDV1",
uiOutput("selectdata_IDV"),
selectInput("proteingroup","Select the group of...",
choices=c("significant in differential expression",
"specify below...")),
uiOutput("proteingroupID"),
uiOutput("facet_factor"),
fluidRow(
box(width=12,
column(width=6,
DT::dataTableOutput("IDV_table"),style = "height:500px; overflow-y: scroll;overflow-x: scroll;"),
column(width=6,
plotOutput("IDV_boxplot"))))
),
tabItem(tabName="IDV2",
selectInput("corrorder","Please specify the type of correlation plot",
choices=c("hclust","test")),
conditionalPanel('input.corrorder=="test"',
numericInput("p_threshold", "Please specify p-value threshold",value=0.05)),
conditionalPanel('input.corrorder=="hclust"',
numericInput("ncluster","Please specify the number of clusters",value=3)),
selectInput("colorscheme","Please specify the color scheme",
choices=c("red-white-blue","heat","cm")),
fluidRow(
box(width=12,
column(width=6,DT::dataTableOutput("IDV_corrtable"),style = "height:500px; overflow-y: scroll;overflow-x: scroll;"),
column(width=6,plotOutput("IDV_corrplot"))))
),
tabItem(tabName="IDV3",
uiOutput("proteinACC"),
uiOutput("col_position"),
uiOutput("modificationType"),
fluidRow(
box(uiOutput("IDVseperate_panel")))
),
tabItem(tabName="IDV4",
uiOutput("selectdata_circos"),
uiOutput("selectcol_proteinACC_termini"),
uiOutput("selectcol_proteinACC_peptide"),
uiOutput("selectcol_proteinACC_PTM"),
fluidRow(box(plotOutput("circosplot")))
),
# generate report ----
tabItem(tabName="GenerateReport",
textInput("title","Customize a title of report...",value="Analysis"),
textInput("description", "Customize a description of the report...", value=""),
downloadButton("report", "Generate report"))
)
)
)
# Define server logic ----
server <- function(input, output) {
report_vals <- reactiveValues(
QCeffectPlot=NULL,
cvviolin=NULL,
distprotein=NULL,
upset=NULL,
datacompleteness=NULL,
corplot=NULL,
available_sets=NULL,
"viewviolin_for_protein data"=NULL,
"viewsummary_for_protein data"=NULL,
"viewviolin_for_termini data"=NULL,
"viewsummary_for_termini data"=NULL,
"viewviolin_for_peptide data"=NULL,
"viewsummary_for_peptide data"=NULL,
"viewviolin_for_PTM data"=NULL,
"viewsummary_for_PTM data"=NULL,
volcanoplot=NULL,
dmr=NULL,
clustering=NULL,
IDV_boxplot=NULL,
IDV_corrplot=NULL,
IDV_corrtable=NULL,
circosplot=NULL)
# Data input ----
protein_data<-reactive({
req(input$protein_data)
if(grepl(".xlsx",input$protein_data$name)){
df <- read.xlsx(input$protein_data$datapath)}
if (grepl(".csv",input$protein_data$name)){
df <- data.frame(read.csv(input$protein_data$datapath, header=TRUE, check.names=FALSE))
}
df
})
termini_data<-reactive({
req(input$termini_data)
if(grepl(".xlsx",input$termini_data$name)){
df <- read.xlsx(input$termini_data$datapath)}
if (grepl(".csv",input$termini_data$name)){
df <- read.csv(input$termini_data$datapath, header=TRUE, check.names=FALSE)
}
data.frame(df)
})
PTM_data<-reactive({
req(input$PTM_data)
if(grepl(".xlsx",input$PTM_data$name)){
df <- read.xlsx(input$PTM_data$datapath)}
if (grepl(".csv",input$PTM_data$name)){
df <- read.csv(input$PTM_data$datapath, header=TRUE, check.names=FALSE)
}
data.frame(df)
})
peptide_data<-reactive({
req(input$peptide_data)
if(grepl(".xlsx",input$peptide_data$name)){
df <- read.xlsx(input$peptide_data$datapath)}
if (grepl(".csv",input$peptide_data$name)){
df <- read.csv(input$peptide_data$datapath, header=TRUE, check.names=FALSE)
}
data.frame(df)
})
# data integration ----
DoE0<-reactive({
req(input$meta_data)
if(grepl(".xlsx",input$meta_data$name)){
df <- read.xlsx(input$meta_data$datapath)}
if (grepl(".csv",input$meta_data$name)){
df <- read.csv(input$meta_data$datapath, header=TRUE, check.names=FALSE)
}
dt<-setdoe(data.frame(df))
dt
})
output$replicacol<-renderUI({
selectizeInput(inputId = "replicacol",
label="Select the column indicates replica",
choices = colnames(DoE0())[-1])
})
replica_list<-reactive({
validate(need(input$replicacol, ""))
df<-cbind(DoE0()[,1],DoE0()[,input$replicacol])
})
DoE<-reactive({
dt<-DoE0()
if(input$whether_average_replica==TRUE){
dt[,1]<-dt[,input$replicacol]
dt[,input$replicacol]<-NULL
dt<-unique(dt)
}
dt
})
data_collection0<-reactive({
ls<-list()
if(input$whether_protein_file==TRUE){ls[["protein data"]]<-inputraw(protein_data(),DoE0(),input$protein_col_id)}
if(input$whether_termini_file==TRUE){ls[["termini data"]]<-inputraw(termini_data(),DoE0(),input$termini_col_id)}
if(input$whether_peptide_file==TRUE){ls[["peptide data"]]<-inputraw(peptide_data(),DoE0(),input$peptide_col_id)}
if(input$whether_PTM_file==TRUE){ls[["PTM data"]]<-inputraw(PTM_data(),DoE0(),input$PTM_col_id)}
if(input$whether_replace_protein==TRUE){ls[["protein data"]]$data<-peptide_based_fixed_data()}
ls
})
data_collection<-reactive({
ls<-data_collection0()
if(input$whethercorrectbatch==TRUE){
ls[["protein data"]]$data<-QCeffectPlot(data_collection0()[["protein data"]]$data,DoE0(),input$batch_col,
input$order_col, input$factor_cols, input$correctmethod)[["batchcorrectedmatrix"]]
ls[["termini data"]]$data<-QCeffectPlot(data_collection0()[["termini data"]]$data,DoE0(),input$batch_col,
input$order_col, input$factor_cols, input$correctmethod)[["batchcorrectedmatrix"]]
ls[["peptide data"]]$data<-QCeffectPlot(data_collection0()[["peptide data"]]$data,DoE0(),input$batch_col,
input$order_col, input$factor_cols, input$correctmethod)[["batchcorrectedmatrix"]]
ls[["PTM data"]]$data<-QCeffectPlot(data_collection0()[["PTM data"]]$data,DoE0(),input$batch_col,
input$order_col, input$factor_cols, input$correctmethod)[["batchcorrectedmatrix"]]
}
if(input$whether_average_replica==TRUE){
ls[["protein data"]]$data<-averagereplica(ls[["protein data"]]$data, replica_list())
ls[["termini data"]]$data<-averagereplica(ls[["termini data"]]$data, replica_list())
ls[["peptide data"]]$data<-averagereplica(ls[["peptide data"]]$data, replica_list())
ls[["PTM data"]]$data<-averagereplica(ls[["PTM data"]]$data, replica_list())
}
ls
})
# annotation tool ----
output$select_dataset_for_anno<-renderUI({
cc<-c("termini data", "PTM data")[c(input$whether_termini_file, input$whether_PTM_file)]
selectInput("select_dataset_for_anno",
label="Choose the dataset for annotation",
choices=cc,
selected=NULL)
})
dataforanno<-reactive({
validate(need(!is.null(input$select_dataset_for_anno),""))
if (input$select_dataset_for_anno=="termini data"){df<-termini_data()}
if (input$select_dataset_for_anno=="PTM data"){df<-PTM_data()}
df
})
output$anno_idf<-renderUI({
validate(need(!is.null(dataforanno()),""))
coln<-colnames(dataforanno())
selectizeInput(inputId = "anno_idf",
label="Select the column of identifier for data",
choices = coln)
})
output$anno_seq<-renderUI({
validate(need(!is.null(dataforanno()),""))
coln<-colnames(dataforanno())
selectizeInput(inputId = "anno_seq",
label="Select the column of stripped sequence for data",
choices = coln)
})
annotation_res<-eventReactive(input$annoButton,{
annotationtool(dataforanno()[,input$anno_idf], dataforanno()[,input$anno_seq], input$lseqwindow)}
)
output$downloadAnnotation <- downloadHandler(
filename = function() {
paste("AnnotationResults", ".csv", sep = "")
},
content = function(file) {
write.csv(annotation_res(), file, row.names = FALSE)
}
)
output$annotationpreview<-renderDataTable({
annotation_res()[c(1:20),c(1:4,9:10)]
})
# peptide calculator ----
peptide_based_fixed_data<-eventReactive(
input$pepcalButton,{
calIntPep(peptide_data(),input$sumform, DoE0(),input$peptide_col_ACC)
})
output$peptidepreview<-renderDataTable({
peptide_based_fixed_data()[c(1:20),c(1:6)]
})
# QC plots ----
standardQC_meta<-reactive({
req(input$standardQC_meta)
if(grepl(".xlsx",input$standardQC_meta$name)){
df <- read.xlsx(input$standardQC_meta$datapath)}
if (grepl(".csv",input$standardQC_meta$name)){
df <- read.csv(input$standardQC_meta$datapath, header=TRUE, check.names=FALSE)
}
setdoe(data.frame(df))
})
standardQC_data<-reactive({
req(input$standardQC_data)
if(grepl(".xlsx",input$standardQC_data$name)){
df <- read.xlsx(input$standardQC_data$datapath)}
if (grepl(".csv",input$standardQC_data$name)){
df <- read.csv(input$standardQC_data$datapath, header=TRUE, check.names=FALSE)
}
aa<-inputraw(data.frame(df),standardQC_meta(),1)
aa[["data"]]
})
output$batch_col<-renderUI({
validate(need(standardQC_data(),""),need(standardQC_meta(),""))
selectInput(inputId = "batch_col",
label="select the column indicates batch of samples:",
choices=colnames(standardQC_meta())[-1])
})
output$order_col<-renderUI({
validate(need(standardQC_data(),""),need(standardQC_meta(),""))
selectInput(inputId = "order_col",
label="select the column indicates running order of samples:",
choices=colnames(standardQC_meta())[-1])
})
output$factor_cols<-renderUI({
validate(need(standardQC_data(),""),need(standardQC_meta(),""))
selectInput(inputId = "factor_cols",
label="select the factors you would like to include in annotation",
choices = colnames(standardQC_meta())[-1],
selected = NULL, multiple=TRUE)
})
standardQCoutput<-eventReactive(
input$standar_batcheffectdButton,{
report_vals$QCeffectPlot<-QCeffectPlot(standardQC_data(),standardQC_meta(),input$batch_col,
input$order_col, input$factor_cols, input$correctmethod)
report_vals$QCeffectPlot
})
output$standard_batcheffectplots <- renderPlot({
grid.arrange(grobs=standardQCoutput()[["plot"]])
})
output$standard_batcheffect_clustering <- renderPlot({
standardQCoutput()[["clustering"]]
})
output$standard_batcheffect_heatmap <- renderPlot({
standardQCoutput()[["heatmap"]]
})
output$DownloadCorrectedQCData <- downloadHandler(
filename = function() {
paste("CorrectedQCData", ".csv", sep = "")
},
content = function(file) {
write.csv(standardQCoutput()[["batchcorrectedmatrix"]], file, row.names = TRUE)
}
)
output$selectdata_QC<-renderUI({
dataset<-c("protein data", "termini data", "peptide data", "PTM data")[
c(input$whether_protein_file,input$whether_termini_file,input$whether_peptide_file, input$whether_PTM_file)]
selectInput(inputId = "selectdata_QC",
label="Select data for QC plots:",
choices=dataset)
})
output$group<-renderUI({
DoE<-DoE()
selectizeInput(inputId = "group",
label="Select group column for grouped graphs",
choices = colnames(DoE)[-1])
})
output$group2<-renderUI({
DoE<-DoE()
selectizeInput(inputId = "group2",
label="Select group column for grouped graphs",
choices = colnames(DoE)[-1])
})
output$graph1 <- renderPlot({
validate(need(input$selectdata_QC,""))
report_vals$cvviolin<-cvplots(data_collection()[[input$selectdata_QC]]$data,DoE())
report_vals$cvviolin})
output$graph3 <- renderPlot({
validate(need(input$selectdata_QC,""))
report_vals$distprotein<-distIndProtein(data_collection()[[input$selectdata_QC]]$data,DoE(),input$group)
report_vals$distprotein})
output$graph4 <- renderPlot({
validate(need(input$selectdata_QC,""))
report_vals$upset<-upsetplot(data_collection()[[input$selectdata_QC]]$data,DoE(),input$group2)
report_vals$upset})
output$graph5 <- renderPlot({
validate(need(input$selectdata_QC,""))
report_vals$datacompleteness<-datacompleteness(data_collection()[[input$selectdata_QC]]$data,DoE())
report_vals$datacompleteness})
output$graph6 <- renderPlot({
validate(need(input$selectdata_QC,""))
report_vals$corplot<-corplot(data_collection()[[input$selectdata_QC]]$data)
report_vals$corplot})
# pre process ----
react_data_collection<-reactiveValues()
react_na_index<-reactiveValues()
output$preprocessing_panel<-renderUI({
available_sets<-names(data_collection())
report_vals$available_sets<-available_sets
cc<-c("No normalization",
"median randomization",
"normalization over same protein and samples under same condition",
"normalization over samples under same condition")
lapply(available_sets,function(x){
output[[paste0("filter_level_for_",x)]]<-
renderUI(
numericInput(inputId = paste0("filter_level_for_",x),
label=paste0("select the row filter level (%) for ", x),
value=0)
)
})
lapply(available_sets,function(x){
output[[paste0("normalization_method_for_",x)]]<-
renderUI(
selectInput(inputId = paste0("normalization_method_for_",x),
label=paste0("select the normalization method for ", x),
choices=cc[c(TRUE,TRUE,(x %in% c("termini data","PTM data")), TRUE)])
)
})
lapply(available_sets, function(x){
output[[paste0("norm_condition_for_",x)]]<-renderUI({
validate(need(input[[paste0("normalization_method_for_",x)]],""))
if (input[[paste0("normalization_method_for_",x)]] %in% c("normalization over same protein and samples under same condition",
"normalization over samples under same condition")){
cc<-colnames(DoE())[-1]
}else{cc<-c(NULL)}
selectInput(inputId = paste0("norm_condition_for_",x),
label="select the condition",
choices=cc)
})
})
lapply(available_sets,function(x){
output[[paste0("protein_anno_for_",x)]]<-
renderUI({
validate(need(input[[paste0("normalization_method_for_",x)]]=="normalization over same protein and samples under same condition",""))
selectInput(inputId = paste0("protein_anno_for_",x),
label=paste0("select the column cotains protein groups for ", x),
choices=colnames(data_collection()[[x]]$other_annotation))
})
})
lapply(available_sets,function(x){
output[[paste0("imputation_method_for_",x)]]<-
renderUI(
selectInput(inputId = paste0("imputation_method_for_",x),
label="Imputation Method",
choices = c( "No imputation",
"Down-shifted Normal samples",
"MinProb",
"knn",
"min"),
selected = NULL)
)
})
lapply(available_sets, function(x){
output[[paste0("impute_condition_for_",x)]]<-renderUI({
validate(need(input[[paste0("imputation_method_for_",x)]],""))
if (input[[paste0("imputation_method_for_",x)]]== "Down-shifted Normal samples"){
cc<-colnames(DoE())[-1]
}else{cc<-c(NULL)}
selectInput(inputId = paste0("impute_condition_for_",x),
label="select the condition",
choices=cc)
})
})
lapply(available_sets,function(x){
output[[paste0("viewviolin_for_",x)]]<-
renderPlot({
validate(need(data_collection(),""))
ctitle<-paste0(c("filtered for ",input[[paste0("filter_level_for_",x)]],
"% completeness, normalized by ", input[[paste0("normalization_method_for_",x)]],"\n",
"imputation by ", input[[paste0("imputation_method_for_",x)]]), collapse="")
aa<-preprocessing(x,data_collection(),DoE(),
input[[paste0("filter_level_for_",x)]],
input[[paste0("normalization_method_for_",x)]],
input[[paste0("imputation_method_for_",x)]],
input[[paste0("norm_condition_for_",x)]],
input[[paste0("impute_condition_for_",x)]],
input[[paste0("protein_anno_for_",x)]])
react_data_collection[[x]]<-aa[["data"]]
react_na_index[[x]]<-aa[["na.index"]]
report_vals[[paste0("viewviolin_for_",x)]]<-plotviolin(aa[["data"]],ctitle)
report_vals[[paste0("viewviolin_for_",x)]]
})
})
lapply(available_sets,function(x){
output[[paste0("viewsummary_for_",x)]]<-
renderPrint({
report_vals[[paste0("viewsummary_for_",x)]]<-basicStats(react_data_collection[[x]])
report_vals[[paste0("viewsummary_for_",x)]]
})
})
lapply(available_sets, function(x){
output[[paste0("DownloadProcessedData_for",x)]]<- downloadHandler(
filename = function() {
paste("ProcessedData_", x, ".csv", sep = "")
},
content = function(file) {
write.csv(react_data_collection[[x]], file, row.names = TRUE)
}
)
})
myTabs = lapply(available_sets, function(x){
tabPanel(title=x,
uiOutput(paste0("filter_level_for_",x)),
uiOutput(paste0("normalization_method_for_",x)),
uiOutput(paste0("norm_condition_for_",x)),
uiOutput(paste0("protein_anno_for_",x)),
uiOutput(paste0("imputation_method_for_",x)),
uiOutput(paste0("impute_condition_for_",x)),
plotOutput(paste0("viewviolin_for_",x)),
box(width=12,
column(width=12,verbatimTextOutput(paste0("viewsummary_for_",x)),style = "height:500px; overflow-y: scroll;overflow-x: scroll;")),
downloadButton(paste0("DownloadProcessedData_for",x),"Download processed data")
)
})
do.call(tabsetPanel, myTabs)
})
# statistical inference ----
output$selectdata_SI<-renderUI({
dataset<-c("protein data", "termini data", "peptide data", "PTM data")[
c(input$whether_protein_file,input$whether_termini_file,input$whether_peptide_file, input$whether_PTM_file)]
selectInput(inputId = "selectdata_SI",
label="Select data for statistical inference:",
choices=dataset)
})
# equivalence test FoI and FoI levels
output$blockfactor<-renderUI({
DoE<-DoE()
selectInput(inputId = "blockfactor",
label="Blocking on...(supports 2-level factors only)",
choices = c(colnames(DoE)[-1]),
selected=NULL)
})
output$FoI<-renderUI({
DoE<-DoE()
selectInput(inputId = "FoI",
label="test the factor of...",
choices = colnames(DoE)[-1],
selected = NULL)
})
output$colorfactor<-renderUI({
DoE<-DoE()
selectInput(inputId = "colorfactor",
label="color by...",
choices = c(colnames(DoE)[-1]),
selected = NULL)
})
# output$eqFoI<-renderUI({
# DoE<-DoE()
# selectInput(inputId = "eqFoI",
# label="test the factor of...",
# choices = c(colnames(DoE)[-1]),
# selected = NULL)
# })
# output$eqFoIlevels<-renderUI({
# DoE<-DoE()
# selectInput(inputId = "eqFoIlevels",
# label="test the level of...",
# choices = unique(DoE[,input$eqFoI]),
# selected = NULL, multiple=TRUE)
# })
# output$eqtestres1<- renderPlot({
# validate(need(input$eqFoIlevels[2], "Please select two levels to conduct this test."))
# report_vals$eqtest1<-eqtest.row(react_fixed_data(), DoE(), input$eqFoI,
# input$eqFoIlevels[1], input$eqFoIlevels[2],
# input$lowerbound, input$upperbound)
# report_vals$eqtest1
# })
# output$eqtestres2<-renderPlot({
# validate(need(input$eqFoIlevels[2], "Please select two levels to conduct this test."))
# report_vals$eqtest2<-eqtest.all(react_fixed_data(), DoE(), input$eqFoI,
# input$eqFoIlevels[1], input$eqFoIlevels[2],
# input$lowerbound, input$upperbound)
# report_vals$eqtest2
# })
output$FoIlevels<-renderUI({
DoE<-DoE()
selectInput(inputId = "FoIlevels",
label="test the level of...",
choices = unique(DoE[,input$FoI]),
selected = NULL, multiple=TRUE)
})
listoutput<-reactiveValues()
output$volcanoplot <- renderPlot({
validate(need(input$FoIlevels[2], "Please select two levels to conduct this test."))
listoutput[[input$selectdata_SI]]<-testingDE(react_data_collection[[input$selectdata_SI]], DoE(),input$DEtest,
input$FoI, input$FoIlevels, input$whetherblock,input$blockfactor,
input$whetherweighting, input$NAweights, react_na_index[[input$selectdata_SI]], input$whetherstandardDE)
report_vals$volcanoplot<-listoutput[[input$selectdata_SI]][["graph"]]
report_vals$volcanoplot
})
output$DownloadDEresults <- downloadHandler(
filename = function() {
paste("StatisticalTestResultsfor_", input$selectdata_SI, ".csv", sep = "")
},
content = function(file) {
write.csv(listoutput[[input$selectdata_SI]][["alldf"]], file, row.names = TRUE)
}
)
# dimensionality reduction ----
output$selectdata_DR<-renderUI({
dataset<-c("protein data", "termini data", "peptide data", "PTM data")[
c(input$whether_protein_file,input$whether_termini_file,input$whether_peptide_file, input$whether_PTM_file)]
selectInput(inputId = "selectdata_DR",
label="Select data for dimensionality reduction:",
choices=dataset)
})
output$dimenreduction <- renderPlot({
validate(need(input$selectdata_DR,""))
if (input$DRrows=="all"){includedrows=c(1:nrow(react_data_collection[[input$selectdata_DR]]))}else{
includedrows=listoutput[[input$selectdata_DR]][["DEdf"]]$name}
report_vals$dmr<-dimen.reduce(react_data_collection[[input$selectdata_DR]], DoE(), input$DRmethod, input$colorfactor, input$tSNEper, includedrows)
report_vals$dmr
})
# clustering ----
output$selectdata_cluster<-renderUI({
dataset<-c("protein data", "termini data", "peptide data", "PTM data")[
c(input$whether_protein_file,input$whether_termini_file,input$whether_peptide_file, input$whether_PTM_file)]
selectInput(inputId = "selectdata_cluster",
label="Select data for clustering:",
choices=dataset)
})
output$cluster <- renderPlot({
validate(need(input$selectdata_cluster,""))
if (input$rows=="all"){includedrows=c(1:nrow(react_data_collection[[input$selectdata_cluster]]))}else{
includedrows=listoutput[[input$selectdata_cluster]][["DEdf"]]$name}
if (input$Cmethod=="Hierarchical Clustering"){
report_vals$clustering<-fcluster(react_data_collection[[input$selectdata_cluster]], input$Cmethod, includedrows, input$whetherlabel, 0)
}else{
report_vals$clustering<-fcluster(react_data_collection[[input$selectdata_cluster]], input$Cmethod, includedrows, FALSE, input$clusternum)
}
report_vals$clustering
})
# Individual Protein Visualization ----
output$selectdata_IDV<-renderUI({
dataset<-c("protein data", "termini data", "peptide data", "PTM data")[
c(input$whether_protein_file,input$whether_termini_file,input$whether_peptide_file, input$whether_PTM_file)]
selectInput(inputId = "selectdata_IDV",
label="Select dataset for individual protein visualization (valid for boxplot and corrplot):",
choices=dataset)
})
output$proteingroupID<-renderUI({
validate(need(input$proteingroup=="specify below...",""))
textInput("proteingroupID", "Which to visualize? (separate by comma)", value = "")
})
rows_include<-reactive({
validate(need(input$selectdata_IDV,""))
if(input$proteingroup=="significant in differential expression"){
r<-listoutput[[input$selectdata_IDV]][["DEdf"]]$name}
if(input$proteingroup=="specify below..."){
r<-unlist(strsplit(input$proteingroupID,","))}
r
})
output$IDV_table<-DT::renderDataTable({
validate(need(rows_include(),""))
options = list(paging=FALSE)
react_data_collection[[input$selectdata_IDV]][rows_include(),]
})
output$facet_factor<-renderUI({
selectizeInput(inputId = "facet_factor",
label="Select the faceting variable in the boxplot:",
choices = colnames(DoE0())[-1])
})
output$IDV_boxplot<-renderPlot({
validate(need(rows_include(),""))
report_vals$IDV_boxplot<-IDV_plot(react_data_collection[[input$selectdata_IDV]][rows_include(),],input$facet_factor,DoE())
report_vals$IDV_boxplot
})
output$IDV_corrplot<-renderPlot({
validate(need(rows_include(),""))
report_vals$IDV_corrplot<-do.call(corrplot_customize,list(data=react_data_collection[[input$selectdata_IDV]][rows_include(),],
order=input$corrorder,
p_threshold=input$p_threshold,
ncluster=input$ncluster,
colorscheme=input$colorscheme))
report_vals$IDV_corrplot
})
output$IDV_corrtable<-DT::renderDataTable({
validate(need(rows_include(),""))
options = list(paging=FALSE)
data<-react_data_collection[[input$selectdata_IDV]][rows_include(),]
t_data<-t(data)
M<-cor(t_data,use="pairwise.complete.obs")
for (i in rownames(M)){
if (sum(!is.na(M[,i]))<2){
M<-M[-which(rownames(M)==i),-which(rownames(M)==i)]}
}
report_vals$IDV_corrtable<-M
M
})
output$proteinACC<-renderUI({
validate(need((input$selectdata_IDV=="protein data")&(!is.null(data_collection()[["PTM data"]])),""))
selectInput("proteinACC","Select the column specifying protein accession in the PTM data:",
choices=c(colnames(data_collection()[["PTM data"]]$other_annotation)[-1]))
})
output$col_position<-renderUI({
validate(need((input$selectdata_IDV=="protein data")&(!is.null(data_collection()[["PTM data"]])),""))
selectInput("col_position","Select the column specifying modification position in the PTM data:",
choices=c(colnames(data_collection()[["PTM data"]]$other_annotation)[-1]))
})
output$modificationType<-renderUI({
validate(need((input$selectdata_IDV=="protein data")&(!is.null(data_collection()[["PTM data"]])),""))
selectInput("modificationType","Select the column specifying modification type in the PTM data:",
choices=c(colnames(data_collection()[["PTM data"]]$other_annotation)[-1]))
})
output$IDVseperate_panel<-renderUI({
validate(need((input$selectdata_IDV=="protein data")&(!is.null(data_collection()[["PTM data"]])),
"This function is valid only when protein data selected previously and PTM data available."))
available_proteins<-rows_include()
lapply(available_proteins,function(x){
output[[paste0("CLG_for_",x)]]<-renderPlot({
# data_collection()[["PTM data"]][["other_annotation"]][,input$proteinACC]
#if (length(which(data_collection()[["PTM data"]][["other_annotation"]][,input$proteinACC]==x))==0){
# o<-renderText("No PTM protein groups match this protein selected")
#}else{
ind<-which(data_collection()[["PTM data"]][["other_annotation"]][,input$proteinACC]==x)
report_vals[[paste0("CLG_for_",x)]]<-combined_lolipop_plot(x, data_collection()[["PTM data"]],ind, input$proteinACC,input$col_position,input$modificationType)
report_vals[[paste0("CLG_for_",x)]]
})
})
myTabs = lapply(available_proteins, function(x){
tabPanel(title=x,
plotOutput(paste0("CLG_for_",x)))
})
do.call(tabsetPanel, myTabs)
})
output$selectdata_circos<-renderUI({
dataset<-c("termini data", "peptide data", "PTM data")[c(input$whether_termini_file,input$whether_peptide_file, input$whether_PTM_file)]
checkboxGroupInput("selectdata_circos",
"please select dataset for circosplot:",
choices = dataset)
})
output$selectcol_proteinACC_termini<-renderUI({
validate(need("termini data" %in% input$selectdata_circos,""))
selectInput("selectcol_proteinACCs_termini",
"please select column of protein groups:",
choices = colnames(data_collection()[["termini data"]][["other_annotation"]])[-1])
})
output$selectcol_proteinACC_peptide<-renderUI({
validate(need("peptide data" %in% input$selectdata_circos,""))
selectInput("selectcol_proteinACCs_peptide",
"please select column of protein groups:",
choices = colnames(data_collection()[["peptide data"]][["other_annotation"]])[-1])
})
output$selectcol_proteinACC_PTM<-renderUI({
validate(need("PTM data" %in% input$selectdata_circos,""))
selectInput("selectcol_proteinACCs_PTM",
"please select column of protein groups:",
choices = colnames(data_collection()[["PTM data"]][["other_annotation"]])[-1])
})
output$circosplot<-renderPlot({
validate(need(input$selectdata_circos,""))
proteinACC<-c(input$selectcol_proteinACCs_termini, input$selectcol_proteinACCs_peptide, input$selectcol_proteinACCs_PTM)
names(proteinACC)<-input$selectdata_circos
aa<-circosplot.fun(listoutput,data_collection(),proteinACC,input$selectdata_circos)
report_vals$circosplot<-aa
aa
})
# report generate ----
output$report <- downloadHandler(
filename = "report.html",
content = function(file) {
# Copy the report file to a temporary directory before processing it, in
# case we don't have write permissions to the current working dir (which
# can happen when deployed).
#tempReport<-file.path(tempdir(), "report.rmd")
#file.copy("report.Rmd", tempReport, overwrite = TRUE)
# Knit the document, passing in the `params` list, and eval it in a
src <- normalizePath("report.rmd")
owd <- setwd(tempdir())
on.exit(setwd(owd))
file.copy(src, "report.rmd", overwrite = TRUE)
report_vals$title<-input$title
report_vals$description<-input$description
params0<-list(imported=report_vals)
# child of the global environment (this isolates the code in the document
# from the code in this app).
out<-rmarkdown::render("report.rmd", output_file = file,params = params0)
file.rename(out, file)
}
)
}
# Run the app ----
shinyApp(ui = ui, server = server) |
suppressPackageStartupMessages(library("optparse"))
# Make option list
option_list <- list(
make_option(c("-i", "--input"), type="character", default=NULL, help="Input 4D functional image (required)", metavar="file"),
make_option(c("-m", "--mask"), type="character", default=NULL, help="3D brain mask (required)", metavar="file"),
make_option("--min", type="double", default=0.5, help="Percent of neighboring voxels that must be non-zero if voxel is to be used (otherwise will have a value o 0) [default: %default]", metavar="0-1"),
make_option("--nei1", type="integer", default=1, help="voxel distance #1 (more of an internal option) [default: %default]", metavar="positive-number"),
make_option("--nei2", type="integer", default=3, help="voxel distance #2 (more of an internal option) [default: %default]", metavar="positive-number"),
make_option(c("-c", "--forks"), type="integer", default=1, help="Number of computer processors to use in parallel by forking the complete processing stream [default: %default]", metavar="number"),
make_option("--overwrite", action="store_true", default=FALSE, help="Overwrite output if it already exists"),
make_option(c("-v", "--verbose"), action="store_true", default=TRUE, help="Print extra output [default]"),
make_option(c("-q", "--quiet"), action="store_false", dest="verbose", help="Print little output")
)
# Make class/usage
parser <- OptionParser(usage = "%prog [options] outfile",
option_list=option_list,
add_help_option=TRUE)
# Parse
parser_out <- parse_args(parser, positional_arguments = TRUE)
args <- parser_out$args
opts <- parser_out$options
# Check options/arguments
if (length(args) != 1) {
print_help(parser)
quit(save="no", status=1)
}
saved_opts <- list(args=args, opts=opts)
tryCatch({
# load connectir
suppressWarnings(suppressPackageStartupMessages(library("connectir")))
# parallel processing setup
set_parallel_procs(opts$forks, 1, opts$verbose)
# use foreach parallelization and shared memory?
parallel_forks <- ifelse(opts$forks == 1, FALSE, TRUE)
# set output
opts$outfile <- args[1]
###
# Check Inputs
###
vcat(opts$verbose, "Checking options")
if (is.null(opts$input))
stop("Must specify input (-i/--input)")
if (is.null(opts$mask))
stop("Must specify mask (-m/--mask)")
if (!file.exists(opts$input))
vstop("Input '%s' doesn't exist", opts$input)
if (!file.exists(opts$mask))
vstop("Mask '%s' doesn't exist", opts$mask)
if (file.exists(opts$outfile) && !opts$overwrite)
vstop("Output '%s' already exists (consider using --overwrite)", opts$outfile)
###
# Setup
###
vcat(opts$verbose, "Setup inputs")
parallel <- opts$forks > 1
input <- abspath(opts$input)
mask <- abspath(opts$mask)
outfile <- abspath(opts$outfile)
overwrite <- opts$overwrite
verbose <- opts$verbose
nei <- opts$nei1
nei.dist <- opts$nei2
min.nei <- opts$min
###
# Run IT!
###
start.time <- Sys.time()
rs <- wrap_reho(input, mask, outfile,
overwrite=overwrite, verbose=verbose, parallel=parallel,
nei=nei, nei.dist=nei.dist, min.nei=min.nei)
end.time <- Sys.time()
vcat(verbose, "Done! Total computation time: %.1f minutes\n",
as.numeric(end.time-start.time, units="mins"))
}, warning = function(ex) {
cat("\nA warning was detected: \n")
cat(ex$message, "\n\n")
cat("Called by: \n")
print(ex$call)
cat("\nSaving options...\n")
save(saved_opts, file="called_options.rda")
}, error = function(ex) {
cat("\nAn error was detected: \n")
cat(ex$message, "\n\n")
cat("Called by: \n")
print(ex$call)
cat("\nSaving options...\n")
save(saved_opts, file="called_options.rda")
}, interrupt = function(ex) {
cat("\nSaving options...\n")
save(saved_opts, file="called_options.rda")
cat("\nKill signal sent. Trying to clean up...\n")
rm(list=ls())
gc(FALSE)
cat("...success\n")
}, finally = {
cat("\nRemoving everything from memory\n")
rm(list=ls())
gc(FALSE)
cat("...sucesss\n")
})
| /inst/scripts/connectir_reho_worker.R | no_license | onebacha/connectir | R | false | false | 4,184 | r | suppressPackageStartupMessages(library("optparse"))
# Make option list
option_list <- list(
make_option(c("-i", "--input"), type="character", default=NULL, help="Input 4D functional image (required)", metavar="file"),
make_option(c("-m", "--mask"), type="character", default=NULL, help="3D brain mask (required)", metavar="file"),
make_option("--min", type="double", default=0.5, help="Percent of neighboring voxels that must be non-zero if voxel is to be used (otherwise will have a value o 0) [default: %default]", metavar="0-1"),
make_option("--nei1", type="integer", default=1, help="voxel distance #1 (more of an internal option) [default: %default]", metavar="positive-number"),
make_option("--nei2", type="integer", default=3, help="voxel distance #2 (more of an internal option) [default: %default]", metavar="positive-number"),
make_option(c("-c", "--forks"), type="integer", default=1, help="Number of computer processors to use in parallel by forking the complete processing stream [default: %default]", metavar="number"),
make_option("--overwrite", action="store_true", default=FALSE, help="Overwrite output if it already exists"),
make_option(c("-v", "--verbose"), action="store_true", default=TRUE, help="Print extra output [default]"),
make_option(c("-q", "--quiet"), action="store_false", dest="verbose", help="Print little output")
)
# Make class/usage
parser <- OptionParser(usage = "%prog [options] outfile",
option_list=option_list,
add_help_option=TRUE)
# Parse
parser_out <- parse_args(parser, positional_arguments = TRUE)
args <- parser_out$args
opts <- parser_out$options
# Check options/arguments
if (length(args) != 1) {
print_help(parser)
quit(save="no", status=1)
}
saved_opts <- list(args=args, opts=opts)
tryCatch({
# load connectir
suppressWarnings(suppressPackageStartupMessages(library("connectir")))
# parallel processing setup
set_parallel_procs(opts$forks, 1, opts$verbose)
# use foreach parallelization and shared memory?
parallel_forks <- ifelse(opts$forks == 1, FALSE, TRUE)
# set output
opts$outfile <- args[1]
###
# Check Inputs
###
vcat(opts$verbose, "Checking options")
if (is.null(opts$input))
stop("Must specify input (-i/--input)")
if (is.null(opts$mask))
stop("Must specify mask (-m/--mask)")
if (!file.exists(opts$input))
vstop("Input '%s' doesn't exist", opts$input)
if (!file.exists(opts$mask))
vstop("Mask '%s' doesn't exist", opts$mask)
if (file.exists(opts$outfile) && !opts$overwrite)
vstop("Output '%s' already exists (consider using --overwrite)", opts$outfile)
###
# Setup
###
vcat(opts$verbose, "Setup inputs")
parallel <- opts$forks > 1
input <- abspath(opts$input)
mask <- abspath(opts$mask)
outfile <- abspath(opts$outfile)
overwrite <- opts$overwrite
verbose <- opts$verbose
nei <- opts$nei1
nei.dist <- opts$nei2
min.nei <- opts$min
###
# Run IT!
###
start.time <- Sys.time()
rs <- wrap_reho(input, mask, outfile,
overwrite=overwrite, verbose=verbose, parallel=parallel,
nei=nei, nei.dist=nei.dist, min.nei=min.nei)
end.time <- Sys.time()
vcat(verbose, "Done! Total computation time: %.1f minutes\n",
as.numeric(end.time-start.time, units="mins"))
}, warning = function(ex) {
cat("\nA warning was detected: \n")
cat(ex$message, "\n\n")
cat("Called by: \n")
print(ex$call)
cat("\nSaving options...\n")
save(saved_opts, file="called_options.rda")
}, error = function(ex) {
cat("\nAn error was detected: \n")
cat(ex$message, "\n\n")
cat("Called by: \n")
print(ex$call)
cat("\nSaving options...\n")
save(saved_opts, file="called_options.rda")
}, interrupt = function(ex) {
cat("\nSaving options...\n")
save(saved_opts, file="called_options.rda")
cat("\nKill signal sent. Trying to clean up...\n")
rm(list=ls())
gc(FALSE)
cat("...success\n")
}, finally = {
cat("\nRemoving everything from memory\n")
rm(list=ls())
gc(FALSE)
cat("...sucesss\n")
})
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/keys.R
\name{keysInput}
\alias{keysInput}
\title{Create a keys input control}
\usage{
keysInput(inputId, keys, global = FALSE)
}
\arguments{
\item{inputId}{The input slot that will be used to access the value.}
\item{keys}{A character vector of keys to bind. Examples include, \code{command},
\code{command+shift+a}, \verb{up down left right}, and more.}
\item{global}{Should keys work anywhere? If TRUE, keys are triggered when
inside a textInput.}
}
\description{
Create a key input that can be used to observe keys pressed by
the user.
}
\examples{
\dontrun{
ui <- fluidPage(
keysInput("keys", c(
"1",
"2",
"3",
"command+shift+k",
"up up down down left right left right b a enter"
)),
)
server <- function(input, output, session) {
observeEvent(input$keys, {
print(input$keys)
})
}
shinyApp(ui, server)
}
}
| /man/keysInput.Rd | no_license | cran/keys | R | false | true | 925 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/keys.R
\name{keysInput}
\alias{keysInput}
\title{Create a keys input control}
\usage{
keysInput(inputId, keys, global = FALSE)
}
\arguments{
\item{inputId}{The input slot that will be used to access the value.}
\item{keys}{A character vector of keys to bind. Examples include, \code{command},
\code{command+shift+a}, \verb{up down left right}, and more.}
\item{global}{Should keys work anywhere? If TRUE, keys are triggered when
inside a textInput.}
}
\description{
Create a key input that can be used to observe keys pressed by
the user.
}
\examples{
\dontrun{
ui <- fluidPage(
keysInput("keys", c(
"1",
"2",
"3",
"command+shift+k",
"up up down down left right left right b a enter"
)),
)
server <- function(input, output, session) {
observeEvent(input$keys, {
print(input$keys)
})
}
shinyApp(ui, server)
}
}
|
# This script calculates standardised time series of all climate variables (i.e. division by standard deviation).
project_path = "/Volumes/Seagate/PhD/Programming_Analysis/03_extremes_and_crop_yields/github"
parameter_list = create_stasticial_analysis_parameter_list(
crop = crop,
statistical_method = statistical_method,
type_of_analysis = type_of_analysis,
yield_dataset = "deepak",
crop_calendar = "agmip",
irrigation = "combined",
mask_growing_season_areas = TRUE,
detrending_climate = "ssa",
detrending_yield = "ssa",
grouping_by = "global",
use_time_as_predictor = FALSE,
use_location_as_predictor = FALSE,
use_only_regions_with_frost_days = FALSE,
res = 1.5,
year_min = 1961,
year_max = 2008,
oos_calculation = TRUE,
seed = 100,
n_groups_out_of_bag_sampling = 5,
save_files = TRUE,
source_area_harvested = "deepak",
standardise_variables = TRUE,
verbose = TRUE,
manual_subsetting = TRUE,
sampling_fraction = "max",
include_interaction_terms = FALSE
)
| /code/data_preparation/03_prepare_parameter_list.R | no_license | elisabethvogel/climate_extremes_agriculture | R | false | false | 1,031 | r | # This script calculates standardised time series of all climate variables (i.e. division by standard deviation).
project_path = "/Volumes/Seagate/PhD/Programming_Analysis/03_extremes_and_crop_yields/github"
parameter_list = create_stasticial_analysis_parameter_list(
crop = crop,
statistical_method = statistical_method,
type_of_analysis = type_of_analysis,
yield_dataset = "deepak",
crop_calendar = "agmip",
irrigation = "combined",
mask_growing_season_areas = TRUE,
detrending_climate = "ssa",
detrending_yield = "ssa",
grouping_by = "global",
use_time_as_predictor = FALSE,
use_location_as_predictor = FALSE,
use_only_regions_with_frost_days = FALSE,
res = 1.5,
year_min = 1961,
year_max = 2008,
oos_calculation = TRUE,
seed = 100,
n_groups_out_of_bag_sampling = 5,
save_files = TRUE,
source_area_harvested = "deepak",
standardise_variables = TRUE,
verbose = TRUE,
manual_subsetting = TRUE,
sampling_fraction = "max",
include_interaction_terms = FALSE
)
|
testlist <- list(x = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), y = numeric(0))
result <- do.call(netrankr:::checkPairs,testlist)
str(result) | /netrankr/inst/testfiles/checkPairs/libFuzzer_checkPairs/checkPairs_valgrind_files/1612798583-test.R | no_license | akhikolla/updatedatatype-list3 | R | false | false | 162 | r | testlist <- list(x = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), y = numeric(0))
result <- do.call(netrankr:::checkPairs,testlist)
str(result) |
#################################################################################################################
#*** Mackerel MSE
#*** Plot model fits
#*** based on CCAM package
#################################################################################################################
#x <- get(load(file='Rdata/fit/fit.Rdata'))
type <- 'png'
retro <- TRUE
res <- TRUE
procres <- TRUE
.wd <- paste0('img/fit/',name)
dir.create(.wd, showWarnings = FALSE)
### reference points
refBase <- ypr(x,rec.years=1969:2016)
yr <- range(x$data$year)
yr[1] <- yr[1]+1
### plots
saveplot(srplot(x,curve=TRUE),name='sr',dim=c(17,10),wd=.wd,type=type)
saveplot(recplot(x),name='rec',dim=c(17,10),wd=.wd,type=type)
saveplot(recplot(x,years=yr[1]:yr[2]),name='rec_1969',dim=c(17,10),wd=.wd,type=type)
saveplot(recplot(x,trans=function(x)x),name='rec_log',dim=c(17,10),wd=.wd,type=type)
saveplot(catchplot(x,fleet = 1,ci=FALSE)+scale_y_continuous(limits=c(0,100000),expand = c(0,0)),name='catch',dim=c(17,10),wd=.wd,type=type)
saveplot(ssbplot(x)+scale_y_continuous(limits=c(0,9e5),expand = c(0,0)),name='ssb',dim=c(17,10),wd=.wd,type=type)
saveplot(fbarplot(x)+scale_y_continuous(limits=c(0,4),expand = c(0,0)),name='F',dim=c(17,10),wd=.wd,type=type)
saveplot(plot(refBase),name='RP',dim=c(14,14),wd=.wd,type=type)
saveplot(selplot(x),name='sel',dim=c(14,14),wd=.wd,type=type)
saveplot(expplot(x),name='exp',dim=c(17,10),wd=.wd,type=type)
saveplot(parplot(x),name='par',dim=c(17,17),wd=.wd,type=type)
saveplot(plot(x),name='plot_all',dim=c(17,20),wd=.wd,type=type)
saveplot(scplot(x),name='ssb_rel',dim=c(20,12),wd=.wd,type=type)
saveplot(prodplot(x),name='prod',dim=c(20,12),wd=.wd,type=type)
saveplot(kobeplot(x),name='kobe',dim=c(14,12),wd=.wd,type=type)
update_geom_defaults("line", list(size = 1))
saveplot(scplot(x),name='ssb_rel',dim=c(20,12),wd=.wd,type=type)
if(retro){
r <- retro(x,year=7,parallell=FALSE,silent=TRUE) #maybe make plot with relative change
save(r, file=paste0('Rdata/retro/',name,'_retro.Rdata'))
saveplot(plot(r,ci=FALSE),name="retro",dim=c(25,20),wd=.wd,type=type)
saveplot(plot(r,ci=TRUE),name="retro_CI",dim=c(25,20),wd=.wd,type=type)
m <- round(mohn(r),2)
write.table(m,paste0(.wd,"/mohn.txt"))
#plot(unlist(lapply(lapply(r,ypr),'[','f40ssb')),type='l',ylab='SSBF40%',xlab='peel')
#plot(unlist(lapply(lapply(r,ypr),'[','f40')),type='l',ylab='F40%',xlab='peel')
}
if(res){
myres <- residuals(x)
saveplot(plot(myres,fleet=c(2,3),qq=FALSE),name="res",dim=c(30,10),wd=.wd,type=type)
}
if(procres){
myprocres <- procres(x)
saveplot(plot(myprocres,qq=FALSE),name="pe",dim=c(20,10),wd=.wd,type=type)
}
# residuals the old way (though they are wrong because of autocorrelation due to random effects)
# saveplot(resplot(x,fleets = 3,type=1),name="/res_index_1",dim=c(17,10),wd=.wd,type=type)
# saveplot(resplot(x,fleets = 3,type=2,out=1),name="/res_index_2",dim=c(17,10),wd=.wd,type=type)
# saveplot(resplot(x,fleets = 3,type=3),name="/res_index_3",dim=c(17,10),wd=.wd,type=type)
# saveplot(resplot(x,fleets = 3,type=4),name="/res_index_4",dim=c(17,10),wd=.wd,type=type)
# saveplot(resplot(x,fleets = 3,type=4,trans = exp),name="/res_index_5exp",dim=c(17,10),wd=.wd,type=type)
#
# saveplot(resplot(x,fleets = 2,type=1,low=c('red','orange'),high=c('grey','green','darkgreen')),name="/res_caa_1",dim=c(17,10),wd=.wd,type=type)
# saveplot(resplot(x,fleets = 2,type=2,out=3),name="/res_caa_2",dim=c(17,10),wd=.wd,type=type)
# saveplot(resplot(x,fleets = 2,type=3),name="/res_caa_3",dim=c(17,10),wd=.wd,type=type)
# saveplot(resplot(x,fleets = 2,type=4),name="/res_caa_4",dim=c(25,20),wd=.wd,type=type)
# saveplot(resplot(x,fleets = 2,type=5,std=TRUE),name="/res_caa_5",dim=c(25,20),wd=.wd,type=type)
# saveplot(resplot(x,fleets = 2,type=6),name="/res_caa_6",dim=c(17,10),wd=.wd,type=type)
# saveplot(resplot(x,fleets = 2,type=7),name="/res_caa_7",dim=c(17,10),wd=.wd,type=type)
| /Rscripts/plot_fit.R | no_license | adsmithca/mackerel_assessment | R | false | false | 3,959 | r | #################################################################################################################
#*** Mackerel MSE
#*** Plot model fits
#*** based on CCAM package
#################################################################################################################
#x <- get(load(file='Rdata/fit/fit.Rdata'))
type <- 'png'
retro <- TRUE
res <- TRUE
procres <- TRUE
.wd <- paste0('img/fit/',name)
dir.create(.wd, showWarnings = FALSE)
### reference points
refBase <- ypr(x,rec.years=1969:2016)
yr <- range(x$data$year)
yr[1] <- yr[1]+1
### plots
saveplot(srplot(x,curve=TRUE),name='sr',dim=c(17,10),wd=.wd,type=type)
saveplot(recplot(x),name='rec',dim=c(17,10),wd=.wd,type=type)
saveplot(recplot(x,years=yr[1]:yr[2]),name='rec_1969',dim=c(17,10),wd=.wd,type=type)
saveplot(recplot(x,trans=function(x)x),name='rec_log',dim=c(17,10),wd=.wd,type=type)
saveplot(catchplot(x,fleet = 1,ci=FALSE)+scale_y_continuous(limits=c(0,100000),expand = c(0,0)),name='catch',dim=c(17,10),wd=.wd,type=type)
saveplot(ssbplot(x)+scale_y_continuous(limits=c(0,9e5),expand = c(0,0)),name='ssb',dim=c(17,10),wd=.wd,type=type)
saveplot(fbarplot(x)+scale_y_continuous(limits=c(0,4),expand = c(0,0)),name='F',dim=c(17,10),wd=.wd,type=type)
saveplot(plot(refBase),name='RP',dim=c(14,14),wd=.wd,type=type)
saveplot(selplot(x),name='sel',dim=c(14,14),wd=.wd,type=type)
saveplot(expplot(x),name='exp',dim=c(17,10),wd=.wd,type=type)
saveplot(parplot(x),name='par',dim=c(17,17),wd=.wd,type=type)
saveplot(plot(x),name='plot_all',dim=c(17,20),wd=.wd,type=type)
saveplot(scplot(x),name='ssb_rel',dim=c(20,12),wd=.wd,type=type)
saveplot(prodplot(x),name='prod',dim=c(20,12),wd=.wd,type=type)
saveplot(kobeplot(x),name='kobe',dim=c(14,12),wd=.wd,type=type)
update_geom_defaults("line", list(size = 1))
saveplot(scplot(x),name='ssb_rel',dim=c(20,12),wd=.wd,type=type)
if(retro){
r <- retro(x,year=7,parallell=FALSE,silent=TRUE) #maybe make plot with relative change
save(r, file=paste0('Rdata/retro/',name,'_retro.Rdata'))
saveplot(plot(r,ci=FALSE),name="retro",dim=c(25,20),wd=.wd,type=type)
saveplot(plot(r,ci=TRUE),name="retro_CI",dim=c(25,20),wd=.wd,type=type)
m <- round(mohn(r),2)
write.table(m,paste0(.wd,"/mohn.txt"))
#plot(unlist(lapply(lapply(r,ypr),'[','f40ssb')),type='l',ylab='SSBF40%',xlab='peel')
#plot(unlist(lapply(lapply(r,ypr),'[','f40')),type='l',ylab='F40%',xlab='peel')
}
if(res){
myres <- residuals(x)
saveplot(plot(myres,fleet=c(2,3),qq=FALSE),name="res",dim=c(30,10),wd=.wd,type=type)
}
if(procres){
myprocres <- procres(x)
saveplot(plot(myprocres,qq=FALSE),name="pe",dim=c(20,10),wd=.wd,type=type)
}
# residuals the old way (though they are wrong because of autocorrelation due to random effects)
# saveplot(resplot(x,fleets = 3,type=1),name="/res_index_1",dim=c(17,10),wd=.wd,type=type)
# saveplot(resplot(x,fleets = 3,type=2,out=1),name="/res_index_2",dim=c(17,10),wd=.wd,type=type)
# saveplot(resplot(x,fleets = 3,type=3),name="/res_index_3",dim=c(17,10),wd=.wd,type=type)
# saveplot(resplot(x,fleets = 3,type=4),name="/res_index_4",dim=c(17,10),wd=.wd,type=type)
# saveplot(resplot(x,fleets = 3,type=4,trans = exp),name="/res_index_5exp",dim=c(17,10),wd=.wd,type=type)
#
# saveplot(resplot(x,fleets = 2,type=1,low=c('red','orange'),high=c('grey','green','darkgreen')),name="/res_caa_1",dim=c(17,10),wd=.wd,type=type)
# saveplot(resplot(x,fleets = 2,type=2,out=3),name="/res_caa_2",dim=c(17,10),wd=.wd,type=type)
# saveplot(resplot(x,fleets = 2,type=3),name="/res_caa_3",dim=c(17,10),wd=.wd,type=type)
# saveplot(resplot(x,fleets = 2,type=4),name="/res_caa_4",dim=c(25,20),wd=.wd,type=type)
# saveplot(resplot(x,fleets = 2,type=5,std=TRUE),name="/res_caa_5",dim=c(25,20),wd=.wd,type=type)
# saveplot(resplot(x,fleets = 2,type=6),name="/res_caa_6",dim=c(17,10),wd=.wd,type=type)
# saveplot(resplot(x,fleets = 2,type=7),name="/res_caa_7",dim=c(17,10),wd=.wd,type=type)
|
# THIS SCRIPT PERFORMS THE FOLLOWING STEPS
# 1. Load data from text files DONE
# 2. Clean data DONE
# 3. Generate corpus DONE
# 4. Clean / transform the corpus DONE
# 5. Generate n-grams & write to output files DONE
# The script generateModel.R will continue with the next steps
## necessary libraries
library(xfun)
# 1. Get the data ###################################################################
if(!file.exists("data")){ dir.create("data") }
if(!file.exists("data/Coursera-SwiftKey.zip")){
url <- "https://d396qusza40orc.cloudfront.net/dsscapstone/dataset/Coursera-SwiftKey.zip"
download.file(url, destfile = "data/Coursera-SwiftKey.zip", mode="wb")
}
if(!file.exists("data/final")){
unzip("data/Coursera-SwiftKey.zip", exdir="data")
}
# 2. Read the data ##################################################################
# vector with all three files
files <- c("data/final/en_US/en_US.blogs.txt",
"data/final/en_US/en_US.news.txt",
"data/final/en_US/en_US.twitter.txt")
con <- file(files[1],"r")
blogs <- readLines(con, encoding = "UTF-8", skipNul = TRUE)
close(con)
# news cannot be completely read in Windows 10 ,
# therefore "rb" instead of "r" for read in binary mode
con <- file(files[2],"rb")
news <- readLines(con, encoding = "UTF-8", skipNul = TRUE)
close(con)
con <- file(files[3],"r")
twitter <- readLines(con, encoding = "UTF-8", skipNul = TRUE)
close(con)
rm("con")
# 3. Clean input data iteratively ###################################################
# to lower
blogs <- stringi::stri_trans_tolower(blogs, locale = NULL)
news <- stringi::stri_trans_tolower(news, locale = NULL)
twitter <- stringi::stri_trans_tolower(twitter, locale = NULL)
# write back to appropriate files such that I can work with gsub_files
stringi::stri_write_lines(blogs, files[1], encoding= "UTF-8",
sep = ifelse(.Platform$OS.type == "windows", "\r\n", "\n"))
stringi::stri_write_lines(news, files[2], encoding= "UTF-8",
sep = ifelse(.Platform$OS.type == "windows", "\r\n", "\n"))
stringi::stri_write_lines(twitter, files[3], encoding= "UTF-8",
sep = ifelse(.Platform$OS.type == "windows", "\r\n", "\n"))
# free up memory space
rm("blogs", "news", "twitter")
#when cleaning this way, the news text file is not fully read
# clean files with appropriate regex expressions
gsub_files(files, "’d like ", " would like ", fixed = TRUE)
gsub_files(files, "'d like ", " would like ", fixed = TRUE)
gsub_files(files, "’d ", " had ", fixed = TRUE)
gsub_files(files, "'d ", " had ", fixed = TRUE)
gsub_files(files, "’ve ", " have ", fixed = TRUE)
gsub_files(files, "'ve ", " have ", fixed = TRUE)
gsub_files(files, "haven't ", "have not ", fixed = TRUE)
gsub_files(files, "hasn't ", "has not ", fixed = TRUE)
gsub_files(files, "hadn't ", "had not ", fixed = TRUE)
gsub_files(files, "’d ", " had ", fixed = TRUE)
gsub_files(files, "’m ", " am ", fixed = TRUE)
gsub_files(files, "'m ", " am ", fixed = TRUE)
gsub_files(files, "[[:space:]]*im ", "i am ", fixed = TRUE)
gsub_files(files, "’ll ", " will ", fixed = TRUE)
gsub_files(files, "'ll ", " will ", fixed = TRUE)
gsub_files(files, "’re ", " are ", fixed = TRUE)
gsub_files(files, "'re ", " are ", fixed = TRUE)
gsub_files(files, "wasn’t ", "was not ", fixed = TRUE)
gsub_files(files, "wasn't ", "was not ", fixed = TRUE)
gsub_files(files, "weren’t ", "were not ", fixed = TRUE)
gsub_files(files, "weren't ", "were not ", fixed = TRUE)
gsub_files(files, "isn’t ", "is not ", fixed = TRUE)
gsub_files(files, "isn't ", "is not ", fixed = TRUE)
gsub_files(files, "aren’t ", "are not ", fixed = TRUE)
gsub_files(files, "aren't ", "are not ", fixed = TRUE)
gsub_files(files, "won’t ", "will not ", fixed = TRUE)
gsub_files(files, "wonn't ", "will not ", fixed = TRUE)
gsub_files(files, "don’t ", "do not ", fixed = TRUE)
gsub_files(files, "don't ", "do not ", fixed = TRUE)
gsub_files(files, "doesn’t ", "does not ", fixed = TRUE)
gsub_files(files, "doesn't ", "does not ", fixed = TRUE)
gsub_files(files, "didn’t ", "did not ", fixed = TRUE)
gsub_files(files, "didn't ", "did not ", fixed = TRUE)
gsub_files(files, "can’t ", "can not ", fixed = TRUE)
gsub_files(files, "can't ", "can not ", fixed = TRUE)
gsub_files(files, "cant ", "can not ", fixed = TRUE)
gsub_files(files, "cannot ", "can not ", fixed = TRUE)
gsub_files(files, "couldn’t ", "could not ", fixed = TRUE)
gsub_files(files, "couldn't ", "could not ", fixed = TRUE)
gsub_files(files, " gonna ", " going to ", fixed = TRUE)
gsub_files(files, " wanna ", "want to ", fixed = TRUE)
gsub_files(files, "’s ", " ", fixed = TRUE)
gsub_files(files, "'s ", " ", fixed = TRUE)
gsub_files(files, "(f|ht)tp(s?)://(.*)[.][a-z]+", " ", fixed = TRUE)
gsub_files(files, "@[^\\s]+", " ", fixed = TRUE)
gsub_files(files,"(", "", fixed = TRUE)
gsub_files(files,")", "", fixed = TRUE)
# somehow it does not work with [0-9], [!,;:] etc. Ergo Regex expressions
# do somehow not follow syntax as expectected ?!
# Realized in generateNGram.R, that I can do this with quanteda anyway
# gsub_files(files,",", "", fixed = TRUE) #remove punctuation
# gsub_files(files,".", "", fixed = TRUE) #remove punctuation
# gsub_files(files,"!", "", fixed = TRUE) #remove punctuation
# gsub_files(files,"?", "", fixed = TRUE) #remove punctuation
# gsub_files(files,":", "", fixed = TRUE) #remove punctuation
# gsub_files(files,";", "", fixed = TRUE) #remove punctuation
# gsub_files(files, "0" , "", fixed = TRUE) # remove numbers
# gsub_files(files, "1" , "", fixed = TRUE) # remove numbers
# gsub_files(files, "2" , "", fixed = TRUE) # remove numbers
# gsub_files(files, "3" , "", fixed = TRUE) # remove numbers
# gsub_files(files, "4" , "", fixed = TRUE) # remove numbers
# gsub_files(files, "5" , "", fixed = TRUE) # remove numbers
# gsub_files(files, "6" , "", fixed = TRUE) # remove numbers
# gsub_files(files, "7" , "", fixed = TRUE) # remove numbers
# gsub_files(files, "8" , "", fixed = TRUE) # remove numbers
# gsub_files(files, "9" , "", fixed = TRUE) # remove numbers
#data is now clean more or less ;-). The rest is done with quanteda
| /src/private/cleanData.R | no_license | marwahaha/Capstone | R | false | false | 6,234 | r | # THIS SCRIPT PERFORMS THE FOLLOWING STEPS
# 1. Load data from text files DONE
# 2. Clean data DONE
# 3. Generate corpus DONE
# 4. Clean / transform the corpus DONE
# 5. Generate n-grams & write to output files DONE
# The script generateModel.R will continue with the next steps
## necessary libraries
library(xfun)
# 1. Get the data ###################################################################
if(!file.exists("data")){ dir.create("data") }
if(!file.exists("data/Coursera-SwiftKey.zip")){
url <- "https://d396qusza40orc.cloudfront.net/dsscapstone/dataset/Coursera-SwiftKey.zip"
download.file(url, destfile = "data/Coursera-SwiftKey.zip", mode="wb")
}
if(!file.exists("data/final")){
unzip("data/Coursera-SwiftKey.zip", exdir="data")
}
# 2. Read the data ##################################################################
# vector with all three files
files <- c("data/final/en_US/en_US.blogs.txt",
"data/final/en_US/en_US.news.txt",
"data/final/en_US/en_US.twitter.txt")
con <- file(files[1],"r")
blogs <- readLines(con, encoding = "UTF-8", skipNul = TRUE)
close(con)
# news cannot be completely read in Windows 10 ,
# therefore "rb" instead of "r" for read in binary mode
con <- file(files[2],"rb")
news <- readLines(con, encoding = "UTF-8", skipNul = TRUE)
close(con)
con <- file(files[3],"r")
twitter <- readLines(con, encoding = "UTF-8", skipNul = TRUE)
close(con)
rm("con")
# 3. Clean input data iteratively ###################################################
# to lower
blogs <- stringi::stri_trans_tolower(blogs, locale = NULL)
news <- stringi::stri_trans_tolower(news, locale = NULL)
twitter <- stringi::stri_trans_tolower(twitter, locale = NULL)
# write back to appropriate files such that I can work with gsub_files
stringi::stri_write_lines(blogs, files[1], encoding= "UTF-8",
sep = ifelse(.Platform$OS.type == "windows", "\r\n", "\n"))
stringi::stri_write_lines(news, files[2], encoding= "UTF-8",
sep = ifelse(.Platform$OS.type == "windows", "\r\n", "\n"))
stringi::stri_write_lines(twitter, files[3], encoding= "UTF-8",
sep = ifelse(.Platform$OS.type == "windows", "\r\n", "\n"))
# free up memory space
rm("blogs", "news", "twitter")
#when cleaning this way, the news text file is not fully read
# clean files with appropriate regex expressions
gsub_files(files, "’d like ", " would like ", fixed = TRUE)
gsub_files(files, "'d like ", " would like ", fixed = TRUE)
gsub_files(files, "’d ", " had ", fixed = TRUE)
gsub_files(files, "'d ", " had ", fixed = TRUE)
gsub_files(files, "’ve ", " have ", fixed = TRUE)
gsub_files(files, "'ve ", " have ", fixed = TRUE)
gsub_files(files, "haven't ", "have not ", fixed = TRUE)
gsub_files(files, "hasn't ", "has not ", fixed = TRUE)
gsub_files(files, "hadn't ", "had not ", fixed = TRUE)
gsub_files(files, "’d ", " had ", fixed = TRUE)
gsub_files(files, "’m ", " am ", fixed = TRUE)
gsub_files(files, "'m ", " am ", fixed = TRUE)
gsub_files(files, "[[:space:]]*im ", "i am ", fixed = TRUE)
gsub_files(files, "’ll ", " will ", fixed = TRUE)
gsub_files(files, "'ll ", " will ", fixed = TRUE)
gsub_files(files, "’re ", " are ", fixed = TRUE)
gsub_files(files, "'re ", " are ", fixed = TRUE)
gsub_files(files, "wasn’t ", "was not ", fixed = TRUE)
gsub_files(files, "wasn't ", "was not ", fixed = TRUE)
gsub_files(files, "weren’t ", "were not ", fixed = TRUE)
gsub_files(files, "weren't ", "were not ", fixed = TRUE)
gsub_files(files, "isn’t ", "is not ", fixed = TRUE)
gsub_files(files, "isn't ", "is not ", fixed = TRUE)
gsub_files(files, "aren’t ", "are not ", fixed = TRUE)
gsub_files(files, "aren't ", "are not ", fixed = TRUE)
gsub_files(files, "won’t ", "will not ", fixed = TRUE)
gsub_files(files, "wonn't ", "will not ", fixed = TRUE)
gsub_files(files, "don’t ", "do not ", fixed = TRUE)
gsub_files(files, "don't ", "do not ", fixed = TRUE)
gsub_files(files, "doesn’t ", "does not ", fixed = TRUE)
gsub_files(files, "doesn't ", "does not ", fixed = TRUE)
gsub_files(files, "didn’t ", "did not ", fixed = TRUE)
gsub_files(files, "didn't ", "did not ", fixed = TRUE)
gsub_files(files, "can’t ", "can not ", fixed = TRUE)
gsub_files(files, "can't ", "can not ", fixed = TRUE)
gsub_files(files, "cant ", "can not ", fixed = TRUE)
gsub_files(files, "cannot ", "can not ", fixed = TRUE)
gsub_files(files, "couldn’t ", "could not ", fixed = TRUE)
gsub_files(files, "couldn't ", "could not ", fixed = TRUE)
gsub_files(files, " gonna ", " going to ", fixed = TRUE)
gsub_files(files, " wanna ", "want to ", fixed = TRUE)
gsub_files(files, "’s ", " ", fixed = TRUE)
gsub_files(files, "'s ", " ", fixed = TRUE)
gsub_files(files, "(f|ht)tp(s?)://(.*)[.][a-z]+", " ", fixed = TRUE)
gsub_files(files, "@[^\\s]+", " ", fixed = TRUE)
gsub_files(files,"(", "", fixed = TRUE)
gsub_files(files,")", "", fixed = TRUE)
# somehow it does not work with [0-9], [!,;:] etc. Ergo Regex expressions
# do somehow not follow syntax as expectected ?!
# Realized in generateNGram.R, that I can do this with quanteda anyway
# gsub_files(files,",", "", fixed = TRUE) #remove punctuation
# gsub_files(files,".", "", fixed = TRUE) #remove punctuation
# gsub_files(files,"!", "", fixed = TRUE) #remove punctuation
# gsub_files(files,"?", "", fixed = TRUE) #remove punctuation
# gsub_files(files,":", "", fixed = TRUE) #remove punctuation
# gsub_files(files,";", "", fixed = TRUE) #remove punctuation
# gsub_files(files, "0" , "", fixed = TRUE) # remove numbers
# gsub_files(files, "1" , "", fixed = TRUE) # remove numbers
# gsub_files(files, "2" , "", fixed = TRUE) # remove numbers
# gsub_files(files, "3" , "", fixed = TRUE) # remove numbers
# gsub_files(files, "4" , "", fixed = TRUE) # remove numbers
# gsub_files(files, "5" , "", fixed = TRUE) # remove numbers
# gsub_files(files, "6" , "", fixed = TRUE) # remove numbers
# gsub_files(files, "7" , "", fixed = TRUE) # remove numbers
# gsub_files(files, "8" , "", fixed = TRUE) # remove numbers
# gsub_files(files, "9" , "", fixed = TRUE) # remove numbers
#data is now clean more or less ;-). The rest is done with quanteda
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cran-interface.R
\name{compile_jar}
\alias{compile_jar}
\title{Compile and package Java code}
\usage{
compile_jar(
path,
nocache = FALSE,
verbose = c("normal", "quiet", "debug"),
with_dependencies = FALSE,
...
)
}
\arguments{
\item{path}{the path to - either a java source code directory containing a
\code{pom.xml} file, the \code{pom.xml} file itself, or a \code{...-src.jar} assembled by
the maven assembly plugin,}
\item{nocache}{normally compilation is only performed if the input has
changed. \code{nocache} forces recompilation}
\item{verbose}{how much output from maven, one of "normal", "quiet", "debug"}
\item{with_dependencies}{compile the Java code to a
'...-jar-with-dependencies.jar' including transitive dependencies which may
be easier to embed into R code as does not need a class path (however may
be large if there are a lot of dependencies)}
\item{...}{passed to \code{execute_maven(...)}, e.g. could include \code{settings}
parameter}
}
\value{
the path to the compiled 'jar' file. If this is a fat jar this can be
passed straight to \code{rJava}, otherwise an additional
\code{resolve_dependencies(...)} call is required
}
\description{
Compilation will package the Java source code in to a Jar file for further
use. It will resolve dependencies and optionally package them into a single
\verb{uber jar} (using maven assembly).
}
\examples{
\donttest{
# This code can take quite a while to run as has to
# download a lot of plugins, especially on first run
path = package_jars("rmaven","src")
compile_jar(path,nocache=TRUE)
path2 = system.file("testdata/test-project",package = "rmaven")
compile_jar(path2,nocache=TRUE,with_dependencies=TRUE)
}
}
| /man/compile_jar.Rd | permissive | terminological/rmaven | R | false | true | 1,761 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cran-interface.R
\name{compile_jar}
\alias{compile_jar}
\title{Compile and package Java code}
\usage{
compile_jar(
path,
nocache = FALSE,
verbose = c("normal", "quiet", "debug"),
with_dependencies = FALSE,
...
)
}
\arguments{
\item{path}{the path to - either a java source code directory containing a
\code{pom.xml} file, the \code{pom.xml} file itself, or a \code{...-src.jar} assembled by
the maven assembly plugin,}
\item{nocache}{normally compilation is only performed if the input has
changed. \code{nocache} forces recompilation}
\item{verbose}{how much output from maven, one of "normal", "quiet", "debug"}
\item{with_dependencies}{compile the Java code to a
'...-jar-with-dependencies.jar' including transitive dependencies which may
be easier to embed into R code as does not need a class path (however may
be large if there are a lot of dependencies)}
\item{...}{passed to \code{execute_maven(...)}, e.g. could include \code{settings}
parameter}
}
\value{
the path to the compiled 'jar' file. If this is a fat jar this can be
passed straight to \code{rJava}, otherwise an additional
\code{resolve_dependencies(...)} call is required
}
\description{
Compilation will package the Java source code in to a Jar file for further
use. It will resolve dependencies and optionally package them into a single
\verb{uber jar} (using maven assembly).
}
\examples{
\donttest{
# This code can take quite a while to run as has to
# download a lot of plugins, especially on first run
path = package_jars("rmaven","src")
compile_jar(path,nocache=TRUE)
path2 = system.file("testdata/test-project",package = "rmaven")
compile_jar(path2,nocache=TRUE,with_dependencies=TRUE)
}
}
|
library(foreign)
library(tidyverse)
boom <- read.spss("Boom_data.sav", to.data.frame = T) %>%
as.tibble() %>%
janitor::clean_names() %>%
mutate(
ssid = trimws(as.character(ssid), "both"),
# ?? What's with this random line that is out of place?
# vid_game = ifelse(ssid == "WPR052", "Wii Play (WP)", as.character(vid_game)),
# condition = ifelse(ssid == "WPR052", "TG-RegControl", as.character(condition)),
vid_game = factor(vid_game, levels = c("Mario Galaxy (MG)",
"Wii Play (WP)",
"Resident Evil (RE)")))
boom_proc <- boom %>% filter(complete.cases(.))
contrasts(boom_proc$vid_game) <- contr.poly(3) # of levels, i.e. 3
mod1 <- aov(headshot ~ vid_game, data = boom_proc)
mod2 <- aov(headshot ~ vid_game + (sex + gn_own_all + fire_gun + gun_att + vg_shoot + hits), data = boom_proc)
anova(mod1, mod2)
summary(mod2)
| /misc/boom_testing.R | no_license | jmbarbone/psy609 | R | false | false | 934 | r | library(foreign)
library(tidyverse)
boom <- read.spss("Boom_data.sav", to.data.frame = T) %>%
as.tibble() %>%
janitor::clean_names() %>%
mutate(
ssid = trimws(as.character(ssid), "both"),
# ?? What's with this random line that is out of place?
# vid_game = ifelse(ssid == "WPR052", "Wii Play (WP)", as.character(vid_game)),
# condition = ifelse(ssid == "WPR052", "TG-RegControl", as.character(condition)),
vid_game = factor(vid_game, levels = c("Mario Galaxy (MG)",
"Wii Play (WP)",
"Resident Evil (RE)")))
boom_proc <- boom %>% filter(complete.cases(.))
contrasts(boom_proc$vid_game) <- contr.poly(3) # of levels, i.e. 3
mod1 <- aov(headshot ~ vid_game, data = boom_proc)
mod2 <- aov(headshot ~ vid_game + (sex + gn_own_all + fire_gun + gun_att + vg_shoot + hits), data = boom_proc)
anova(mod1, mod2)
summary(mod2)
|
#' Inject javascript needed by the sui_shinymod modules
#'
#' @return A script tag
#'
#' @seealso \code{\link{sui_shinymod_ui}}
#'
#' @export
sui_js <- function() {
js <- readLines(system.file("extdata/js/i18n.js", package = "sui18n"))
tags$head(tags$script(HTML(paste(js, collapse = "\n"))))
}
#' A Shiny module for internationalization
#'
#' @param id string: the id of the shiny element
#' @param input : see \code{\link[shiny]{callModule}}
#' @param output : see \code{\link[shiny]{callModule}}
#' @param session : see \code{\link[shiny]{callModule}}
#' @param to string: as for \code{link{sui_translator}}
#' @param csv_path string: as for \code{\link{sui_translator}}
#'
#' @examples
#' \dontrun{
#' library(shiny)
#' app <- shinyApp(
#' ui = fluidPage(
#' sui_js(),
#' sui_shinymod_ui(id = "lang"),
#' tags$p(lang_key = "hello"), ## simple tags can be done like this
#' uiOutput("another")
#' ),
#' server = function(input, output) {
#' ms <- callModule(sui_shinymod_server, id = "lang")
#' ## more complex UI elements can be done like this
#' output$another <- renderUI({
#' ms$i18n_lang()
#' tags$p(ms$i18n$t("Please wait"))
#' })
#' }
#' )
#' runApp(app)
#' }
#' @rdname sui_shinymod
#' @export
sui_shinymod_ui <- function(id) {
ns <- NS(id)
shinyWidgets::pickerInput(ns("select_lang"), label = NULL, choices = NULL, width = "95px")
}
#' @rdname sui_shinymod
#' @export
sui_shinymod_server <- function(input, output, session, csv_path = NULL, to = NULL) {
this_i18n <- if (is.null(csv_path)) sui_translator(to) else sui_translator(to, csv_path = csv_path)
this_i18n_lang <- reactiveVal(this_i18n$target())
shiny::addResourcePath("sui_flags", system.file("extdata/flags", package = "sui18n"))
this_update_selection <- reactiveVal(NULL) ## manually force the selection to change
## update choices
observe({
langs <- tryCatch(setdiff(this_i18n$languages(), "key"), error = function(e) NULL)
if (!is.null(langs)) {
if (!is.null(this_update_selection())) {
sel <- this_update_selection()
} else {
isolate(sel <- input$select_lang)
if (is.null(sel) || !sel %in% langs) sel <- langs[1]
}
flgs <- langs
flgs[flgs == "en"] <- "GB"
flgs <- setNames(lapply(seq_along(flgs), function(fi) paste0("<img src=\"sui_flags/", toupper(flgs[fi]), ".svg\" />", toupper(langs[fi]))), langs)
shinyWidgets::updatePickerInput(session = session, inputId = "select_lang", choices = langs, choicesOpt = list(content = unlist(flgs)), selected = sel)
}
})
observeEvent(input$select_lang, {
if (!is.null(input$select_lang) && input$select_lang %in% this_i18n$languages()) {
this_i18n$set_target(input$select_lang)
this_i18n_lang(input$select_lang)
## construct js-side translator for this language
dict <- this_i18n$get_table()
idx <- is.na(dict[[input$select_lang]])
if (!is.null(this_i18n$warn_unmatched) && this_i18n$warn_unmatched()) dict[[input$select_lang]][idx] <- paste0('<span style="border:1px solid red;">', dict$key[idx], '</span>')
myscr <- paste0('mytr = i18n.create({ values : ',
jsonlite::toJSON(setNames(as.list(dict[[input$select_lang]]), dict$en), auto_unbox = TRUE),
'});')
evaljs(myscr)
## run it
do_translate()
}
})
do_translate <- function() evaljs("translate_all()")
list(i18n = this_i18n, i18n_lang = this_i18n_lang, update_selection = this_update_selection, do_translate = do_translate)
}
evaljs <- function(expr) {
shiny::getDefaultReactiveDomain()$sendCustomMessage("evaljs", expr)
}
| /R/shiny.R | permissive | scienceuntangled/sui18n | R | false | false | 3,950 | r | #' Inject javascript needed by the sui_shinymod modules
#'
#' @return A script tag
#'
#' @seealso \code{\link{sui_shinymod_ui}}
#'
#' @export
sui_js <- function() {
js <- readLines(system.file("extdata/js/i18n.js", package = "sui18n"))
tags$head(tags$script(HTML(paste(js, collapse = "\n"))))
}
#' A Shiny module for internationalization
#'
#' @param id string: the id of the shiny element
#' @param input : see \code{\link[shiny]{callModule}}
#' @param output : see \code{\link[shiny]{callModule}}
#' @param session : see \code{\link[shiny]{callModule}}
#' @param to string: as for \code{link{sui_translator}}
#' @param csv_path string: as for \code{\link{sui_translator}}
#'
#' @examples
#' \dontrun{
#' library(shiny)
#' app <- shinyApp(
#' ui = fluidPage(
#' sui_js(),
#' sui_shinymod_ui(id = "lang"),
#' tags$p(lang_key = "hello"), ## simple tags can be done like this
#' uiOutput("another")
#' ),
#' server = function(input, output) {
#' ms <- callModule(sui_shinymod_server, id = "lang")
#' ## more complex UI elements can be done like this
#' output$another <- renderUI({
#' ms$i18n_lang()
#' tags$p(ms$i18n$t("Please wait"))
#' })
#' }
#' )
#' runApp(app)
#' }
#' @rdname sui_shinymod
#' @export
sui_shinymod_ui <- function(id) {
ns <- NS(id)
shinyWidgets::pickerInput(ns("select_lang"), label = NULL, choices = NULL, width = "95px")
}
#' @rdname sui_shinymod
#' @export
sui_shinymod_server <- function(input, output, session, csv_path = NULL, to = NULL) {
this_i18n <- if (is.null(csv_path)) sui_translator(to) else sui_translator(to, csv_path = csv_path)
this_i18n_lang <- reactiveVal(this_i18n$target())
shiny::addResourcePath("sui_flags", system.file("extdata/flags", package = "sui18n"))
this_update_selection <- reactiveVal(NULL) ## manually force the selection to change
## update choices
observe({
langs <- tryCatch(setdiff(this_i18n$languages(), "key"), error = function(e) NULL)
if (!is.null(langs)) {
if (!is.null(this_update_selection())) {
sel <- this_update_selection()
} else {
isolate(sel <- input$select_lang)
if (is.null(sel) || !sel %in% langs) sel <- langs[1]
}
flgs <- langs
flgs[flgs == "en"] <- "GB"
flgs <- setNames(lapply(seq_along(flgs), function(fi) paste0("<img src=\"sui_flags/", toupper(flgs[fi]), ".svg\" />", toupper(langs[fi]))), langs)
shinyWidgets::updatePickerInput(session = session, inputId = "select_lang", choices = langs, choicesOpt = list(content = unlist(flgs)), selected = sel)
}
})
observeEvent(input$select_lang, {
if (!is.null(input$select_lang) && input$select_lang %in% this_i18n$languages()) {
this_i18n$set_target(input$select_lang)
this_i18n_lang(input$select_lang)
## construct js-side translator for this language
dict <- this_i18n$get_table()
idx <- is.na(dict[[input$select_lang]])
if (!is.null(this_i18n$warn_unmatched) && this_i18n$warn_unmatched()) dict[[input$select_lang]][idx] <- paste0('<span style="border:1px solid red;">', dict$key[idx], '</span>')
myscr <- paste0('mytr = i18n.create({ values : ',
jsonlite::toJSON(setNames(as.list(dict[[input$select_lang]]), dict$en), auto_unbox = TRUE),
'});')
evaljs(myscr)
## run it
do_translate()
}
})
do_translate <- function() evaljs("translate_all()")
list(i18n = this_i18n, i18n_lang = this_i18n_lang, update_selection = this_update_selection, do_translate = do_translate)
}
evaljs <- function(expr) {
shiny::getDefaultReactiveDomain()$sendCustomMessage("evaljs", expr)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/OSTSC.R
\name{OSTSC}
\alias{OSTSC}
\title{Over Sampling for Time Series Classification}
\usage{
OSTSC(sample, label, class, ratio = 1, per = 0.8, r = 1, k = 5,
m = 15, parallel = TRUE, progBar = TRUE)
}
\arguments{
\item{sample}{Univariate sequence data samples}
\item{label}{Labels corresponding to samples}
\item{class}{The number of the classes to be oversampled, starting
from the class with the fewest observations, with the default setting
to progress to as many classes as possible.}
\item{ratio}{The oversampling ratio
number (>=1) (default = 1)}
\item{per}{Ratio of weighting between ESPO and ADASYN (default = 0.8)}
\item{r}{A scalar ratio specifying which level (towards the boundary) we shall
push the synthetic data in ESPO (default = 1)}
\item{k}{Number of nearest neighbours in k-NN (for ADASYN) algorithm (default = 5)}
\item{m}{Seeds from the positive class in m-NN (for ADASYN) algorithm (default = 15)}
\item{parallel}{Whether to execute in parallel mode (default = TRUE).
(Recommended for datasets with over 30,000 records.)}
\item{progBar}{Whether to include progress bars (default = TRUE).
For ESPO approach, the bar charactor is |--------|100\%.
For ADASYN approach, the bar charactor is |========|100\%.}
}
\value{
sample: the time series sequences data oversampled
label: the label corresponding to each row of records
}
\description{
Oversample a univariate, multi-modal time series sequence of imbalanced classified data.
}
\details{
This function balances univariate imbalance time series data based on
structure preserving oversampling.
}
\examples{
# This is a simple example to show the usage of OSTSC. See the vignetter for a tutorial
# demonstrating more complex examples.
# Example one
# loading data
data(Dataset_Synthetic_Control)
# get split feature and label data
train.label <- Dataset_Synthetic_Control$train.y
train.sample <- Dataset_Synthetic_Control$train.x
# the first dimension of the feature set and labels must be the same
# the second dimension of the feature set is the sequence length
dim(train.sample)
dim(train.label)
# check the imbalance ratio of the data
table(train.label)
# oversample class 1 to the same number of observations as class 0
MyData <- OSTSC(train.sample, train.label, parallel = FALSE)
# store the feature data after oversampling
x <- MyData$sample
# store the label data after oversampling
y <- MyData$label
# check the imbalance of the data
table(y)
# Example two
# loading data
ecg <- Dataset_ECG()
# get split feature and label data
train.label <- ecg$train.y
train.sample <- ecg$train.x
# the first dimension of the feature set and labels must be the same
# the second dimension of the feature set is the sequence length
dim(train.sample)
dim(train.label)
# check the imbalance ratio of the data
table(train.label)
# oversample class 3, 4, 5 to the same number of observations as class 1
MyData <- OSTSC(train.sample, train.label, parallel = FALSE)
# store the feature data after oversampling
x <- MyData$sample
# store the label data after oversampling
y <- MyData$label
# check the imbalance of the data
table(y)
}
\references{
H. Cao, X.-L. Li, Y.-K. Woon and S.-K. Ng,
"Integrated Oversampling for Imbalanced Time Series Classification"
IEEE Trans. on Knowledge and Data Engineering (TKDE),
vol. 25(12), pp. 2809-2822, 2013
H. Cao, V. Y. F. Tan and J. Z. F. Pang,
"A Parsimonious Mixture of Gaussian Trees Model for Oversampling in Imbalanced and Multi-Modal Time-Series Classification"
IEEE Trans. on Neural Network and Learning System (TNNLS),
vol. 25(12), pp. 2226-2239, 2014
H. Cao, X. L. Li, Y. K. Woon and S. K. Ng,
"SPO: Structure Preserving Oversampling for Imbalanced Time Series Classification"
Proc. IEEE Int. Conf. on Data Mining ICDM,
pp. 1008-1013, 2011
}
| /man/OSTSC.Rd | no_license | mfrdixon/OSTSC | R | false | true | 4,126 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/OSTSC.R
\name{OSTSC}
\alias{OSTSC}
\title{Over Sampling for Time Series Classification}
\usage{
OSTSC(sample, label, class, ratio = 1, per = 0.8, r = 1, k = 5,
m = 15, parallel = TRUE, progBar = TRUE)
}
\arguments{
\item{sample}{Univariate sequence data samples}
\item{label}{Labels corresponding to samples}
\item{class}{The number of the classes to be oversampled, starting
from the class with the fewest observations, with the default setting
to progress to as many classes as possible.}
\item{ratio}{The oversampling ratio
number (>=1) (default = 1)}
\item{per}{Ratio of weighting between ESPO and ADASYN (default = 0.8)}
\item{r}{A scalar ratio specifying which level (towards the boundary) we shall
push the synthetic data in ESPO (default = 1)}
\item{k}{Number of nearest neighbours in k-NN (for ADASYN) algorithm (default = 5)}
\item{m}{Seeds from the positive class in m-NN (for ADASYN) algorithm (default = 15)}
\item{parallel}{Whether to execute in parallel mode (default = TRUE).
(Recommended for datasets with over 30,000 records.)}
\item{progBar}{Whether to include progress bars (default = TRUE).
For ESPO approach, the bar charactor is |--------|100\%.
For ADASYN approach, the bar charactor is |========|100\%.}
}
\value{
sample: the time series sequences data oversampled
label: the label corresponding to each row of records
}
\description{
Oversample a univariate, multi-modal time series sequence of imbalanced classified data.
}
\details{
This function balances univariate imbalance time series data based on
structure preserving oversampling.
}
\examples{
# This is a simple example to show the usage of OSTSC. See the vignetter for a tutorial
# demonstrating more complex examples.
# Example one
# loading data
data(Dataset_Synthetic_Control)
# get split feature and label data
train.label <- Dataset_Synthetic_Control$train.y
train.sample <- Dataset_Synthetic_Control$train.x
# the first dimension of the feature set and labels must be the same
# the second dimension of the feature set is the sequence length
dim(train.sample)
dim(train.label)
# check the imbalance ratio of the data
table(train.label)
# oversample class 1 to the same number of observations as class 0
MyData <- OSTSC(train.sample, train.label, parallel = FALSE)
# store the feature data after oversampling
x <- MyData$sample
# store the label data after oversampling
y <- MyData$label
# check the imbalance of the data
table(y)
# Example two
# loading data
ecg <- Dataset_ECG()
# get split feature and label data
train.label <- ecg$train.y
train.sample <- ecg$train.x
# the first dimension of the feature set and labels must be the same
# the second dimension of the feature set is the sequence length
dim(train.sample)
dim(train.label)
# check the imbalance ratio of the data
table(train.label)
# oversample class 3, 4, 5 to the same number of observations as class 1
MyData <- OSTSC(train.sample, train.label, parallel = FALSE)
# store the feature data after oversampling
x <- MyData$sample
# store the label data after oversampling
y <- MyData$label
# check the imbalance of the data
table(y)
}
\references{
H. Cao, X.-L. Li, Y.-K. Woon and S.-K. Ng,
"Integrated Oversampling for Imbalanced Time Series Classification"
IEEE Trans. on Knowledge and Data Engineering (TKDE),
vol. 25(12), pp. 2809-2822, 2013
H. Cao, V. Y. F. Tan and J. Z. F. Pang,
"A Parsimonious Mixture of Gaussian Trees Model for Oversampling in Imbalanced and Multi-Modal Time-Series Classification"
IEEE Trans. on Neural Network and Learning System (TNNLS),
vol. 25(12), pp. 2226-2239, 2014
H. Cao, X. L. Li, Y. K. Woon and S. K. Ng,
"SPO: Structure Preserving Oversampling for Imbalanced Time Series Classification"
Proc. IEEE Int. Conf. on Data Mining ICDM,
pp. 1008-1013, 2011
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.