blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
7a5f229c5dfe7f9923700cba85eb523054158c42
|
6c7976f09cbb750dd665e51682bc814a70d101e6
|
/PA2_CacheMatrix/test_cachematrix.R
|
7dd7565f1d79b88bb5f5d4df20e4b4d206b19c0b
|
[] |
no_license
|
nan-wang/Coursera_RProgramming
|
ca05433a210bf4044c204c0e399944d75b0a95b4
|
e26bd4804186706f3e7313d29c47b52b3b3a38d1
|
refs/heads/master
| 2021-01-01T19:43:05.820801
| 2015-04-07T10:33:07
| 2015-04-07T10:33:07
| 33,536,042
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,287
|
r
|
test_cachematrix.R
|
## This is a test file for checking the basic functions of the implementations.
## Basically, we initialize the cached matrix with
# [,1] [,2]
# [1,] 1 3
# [2,] 2 4
## First, we calculate the inverse by directly using `makeCacheMatrix`
## and check the functions in `makeCacheMatrix`.
## Afterwards, we set the cached matrix to its inverse
## and calculate the inverse with `cacheSolve`.
setwd('.')
source('cachematrix.R')
## test code
foo_mat <- matrix( 1:4, nrow=2, ncol=2)
# test 1 makeCacheMatrix
cached_mat <- makeCacheMatrix(foo_mat)
## test 1.1: get
cached_mat$get()
### expected result
# [,1] [,2]
# [1,] 1 3
# [2,] 2 4
## test 1.2: set_inverse
new_inv <- solve(foo_mat)
cached_mat$set_inverse(new_inv)
## test 1.3: get_inverse
cached_mat$get_inverse()
### expected result
# [,1] [,2]
# [1,] -2 1.5
# [2,] 1 -0.5
## test 1.4: set
foo_mat <- cached_mat$get_inverse()
cached_mat$set(foo_mat)
cached_mat$get()
### expected result
# [,1] [,2]
# [1,] -2 1.5
# [2,] 1 -0.5
cached_mat$get_inverse()
### expected result
# NULL
# test 2 cacheSolve
cacheSolve(cached_mat)
### expected result
# [,1] [,2]
# [1,] 1 3
# [2,] 2 4
cacheSolve(cached_mat)
### expected result
# getting cached data
# [,1] [,2]
# [1,] 1 3
# [2,] 2 4
|
991d1f00eb5fe9bd3e0a0a5ede6b28bb4bb22966
|
e0a4d33951863115ed7bd190e1330683c011b9ba
|
/Miniscreen05/MaGeCK_stats/S8B_c0_t1.R
|
b171bf49544e1dcbc9e28bd4df239e1a0b8a9fd6
|
[] |
no_license
|
davidchen0420/Miniscreen
|
83c04943e7a449f20e2392f236e117aeb0dd3c87
|
9269589cdf91435a9f5672382d6fad51d5bc619b
|
refs/heads/main
| 2023-03-19T12:17:31.376911
| 2021-03-16T23:00:27
| 2021-03-16T23:00:27
| 348,514,927
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 36,769
|
r
|
S8B_c0_t1.R
|
pdf(file='S8B_c0_t1.pdf',width=4.5,height=4.5);
gstable=read.table('S8B_c0_t1.gene_summary.txt',header=T)
#
#
# parameters
# Do not modify the variables beginning with "__"
# gstablename='__GENE_SUMMARY_FILE__'
startindex=3
# outputfile='__OUTPUT_FILE__'
targetgenelist=c("SOX9","LRIG1","ROSA26","KRT20","mKate2","AAVS1","CCR5","PROM1","CTRL","EGFP")
# samplelabel=sub('.\\w+.\\w+$','',colnames(gstable)[startindex]);
samplelabel='1_vs_0 neg.'
# You need to write some codes in front of this code:
# gstable=read.table(gstablename,header=T)
# pdf(file=outputfile,width=6,height=6)
# set up color using RColorBrewer
#library(RColorBrewer)
#colors <- brewer.pal(length(targetgenelist), "Set1")
colors=c( "#E41A1C", "#377EB8", "#4DAF4A", "#984EA3", "#FF7F00", "#A65628", "#F781BF",
"#999999", "#66C2A5", "#FC8D62", "#8DA0CB", "#E78AC3", "#A6D854", "#FFD92F", "#E5C494", "#B3B3B3",
"#8DD3C7", "#FFFFB3", "#BEBADA", "#FB8072", "#80B1D3", "#FDB462", "#B3DE69", "#FCCDE5",
"#D9D9D9", "#BC80BD", "#CCEBC5", "#FFED6F")
######
# function definition
plotrankedvalues<-function(val, tglist, ...){
plot(val,log='y',ylim=c(max(val),min(val)),type='l',lwd=2, ...)
if(length(tglist)>0){
for(i in 1:length(tglist)){
targetgene=tglist[i];
tx=which(names(val)==targetgene);ty=val[targetgene];
points(tx,ty,col=colors[(i %% length(colors)) ],cex=2,pch=20)
# text(tx+50,ty,targetgene,col=colors[i])
}
legend('topright',tglist,pch=20,pt.cex = 2,cex=1,col=colors)
}
}
plotrandvalues<-function(val,targetgenelist, ...){
# choose the one with the best distance distribution
mindiffvalue=0;
randval=val;
for(i in 1:20){
randval0=sample(val)
vindex=sort(which(names(randval0) %in% targetgenelist))
if(max(vindex)>0.9*length(val)){
# print('pass...')
next;
}
mindiffind=min(diff(vindex));
if (mindiffind > mindiffvalue){
mindiffvalue=mindiffind;
randval=randval0;
# print(paste('Diff: ',mindiffvalue))
}
}
plot(randval,log='y',ylim=c(max(randval),min(randval)),pch=20,col='grey', ...)
if(length(targetgenelist)>0){
for(i in 1:length(targetgenelist)){
targetgene=targetgenelist[i];
tx=which(names(randval)==targetgene);ty=randval[targetgene];
points(tx,ty,col=colors[(i %% length(colors)) ],cex=2,pch=20)
text(tx+50,ty,targetgene,col=colors[i])
}
}
}
# set.seed(1235)
pvec=gstable[,startindex]
names(pvec)=gstable[,'id']
pvec=sort(pvec);
plotrankedvalues(pvec,targetgenelist,xlab='Genes',ylab='RRA score',main=paste('Distribution of RRA scores in \\n',samplelabel))
# plotrandvalues(pvec,targetgenelist,xlab='Genes',ylab='RRA score',main=paste('Distribution of RRA scores in \\n',samplelabel))
pvec=gstable[,startindex+1]
names(pvec)=gstable[,'id']
pvec=sort(pvec);
plotrankedvalues(pvec,targetgenelist,xlab='Genes',ylab='p value',main=paste('Distribution of p values in \\n',samplelabel))
# plotrandvalues(pvec,targetgenelist,xlab='Genes',ylab='p value',main=paste('Distribution of p values in \\n',samplelabel))
# you need to write after this code:
# dev.off()
# parameters
# Do not modify the variables beginning with "__"
targetmat=list(c(1438.0302471412763,661.5066344993968),c(657.659166359277,372.032167269803),c(586.7163408336407,502.6614394853237),c(458.2523054223534,415.9236027342179),c(751.6104758391738,537.1475673502212),c(2135.954260420509,1964.6642541214314))
targetgene="SOX9"
collabel=c("S61_MNSC508_F5","S62_MNSC508_F6")
# set up color using RColorBrewer
#library(RColorBrewer)
#colors <- brewer.pal(length(targetgenelist), "Set1")
colors=c( "#E41A1C", "#377EB8", "#4DAF4A", "#984EA3", "#FF7F00", "#A65628", "#F781BF",
"#999999", "#66C2A5", "#FC8D62", "#8DA0CB", "#E78AC3", "#A6D854", "#FFD92F", "#E5C494", "#B3B3B3",
"#8DD3C7", "#FFFFB3", "#BEBADA", "#FB8072", "#80B1D3", "#FDB462", "#B3DE69", "#FCCDE5",
"#D9D9D9", "#BC80BD", "#CCEBC5", "#FFED6F")
## code
targetmatvec=unlist(targetmat)+1
yrange=range(targetmatvec[targetmatvec>0]);
# yrange[1]=1; # set the minimum value to 1
for(i in 1:length(targetmat)){
vali=targetmat[[i]]+1;
if(i==1){
plot(1:length(vali),vali,type='b',las=1,pch=20,main=paste('sgRNAs in',targetgene),ylab='Read counts',xlab='Samples',xlim=c(0.7,length(vali)+0.3),ylim = yrange,col=colors[(i %% length(colors))],xaxt='n',log='y')
axis(1,at=1:length(vali),labels=(collabel),las=2)
# lines(0:100,rep(1,101),col='black');
}else{
lines(1:length(vali),vali,type='b',pch=20,col=colors[(i %% length(colors))])
}
}
# parameters
# Do not modify the variables beginning with "__"
targetmat=list(c(2477.2467724087055,2292.8049859268194),c(1522.394688306898,1338.688781664656),c(1665.239026189598,1494.3988741455569),c(3101.351899668019,3074.490550864495),c(4407.0833640722985,4238.658624849216),c(1462.9561047583918,1411.8411741053478))
targetgene="LRIG1"
collabel=c("S61_MNSC508_F5","S62_MNSC508_F6")
# set up color using RColorBrewer
#library(RColorBrewer)
#colors <- brewer.pal(length(targetgenelist), "Set1")
colors=c( "#E41A1C", "#377EB8", "#4DAF4A", "#984EA3", "#FF7F00", "#A65628", "#F781BF",
"#999999", "#66C2A5", "#FC8D62", "#8DA0CB", "#E78AC3", "#A6D854", "#FFD92F", "#E5C494", "#B3B3B3",
"#8DD3C7", "#FFFFB3", "#BEBADA", "#FB8072", "#80B1D3", "#FDB462", "#B3DE69", "#FCCDE5",
"#D9D9D9", "#BC80BD", "#CCEBC5", "#FFED6F")
## code
targetmatvec=unlist(targetmat)+1
yrange=range(targetmatvec[targetmatvec>0]);
# yrange[1]=1; # set the minimum value to 1
for(i in 1:length(targetmat)){
vali=targetmat[[i]]+1;
if(i==1){
plot(1:length(vali),vali,type='b',las=1,pch=20,main=paste('sgRNAs in',targetgene),ylab='Read counts',xlab='Samples',xlim=c(0.7,length(vali)+0.3),ylim = yrange,col=colors[(i %% length(colors))],xaxt='n',log='y')
axis(1,at=1:length(vali),labels=(collabel),las=2)
# lines(0:100,rep(1,101),col='black');
}else{
lines(1:length(vali),vali,type='b',pch=20,col=colors[(i %% length(colors))])
}
}
# parameters
# Do not modify the variables beginning with "__"
targetmat=list(c(6882.412762818149,7288.068355448331),c(2905.7797860568057,3112.1117812625653),c(3424.4293618590928,3491.4591877764374),c(2446.5687938030246,2530.027744270205),c(1987.357801549244,2009.6007237635704),c(3213.5182589450387,3022.238841978287),c(2194.4341571375876,2024.2312022517087),c(1862.7285134636666,1796.4137515078407),c(771.7428992991516,823.4869320466426),c(2474.370711914423,2676.332529151588))
targetgene="ROSA26"
collabel=c("S61_MNSC508_F5","S62_MNSC508_F6")
# set up color using RColorBrewer
#library(RColorBrewer)
#colors <- brewer.pal(length(targetgenelist), "Set1")
colors=c( "#E41A1C", "#377EB8", "#4DAF4A", "#984EA3", "#FF7F00", "#A65628", "#F781BF",
"#999999", "#66C2A5", "#FC8D62", "#8DA0CB", "#E78AC3", "#A6D854", "#FFD92F", "#E5C494", "#B3B3B3",
"#8DD3C7", "#FFFFB3", "#BEBADA", "#FB8072", "#80B1D3", "#FDB462", "#B3DE69", "#FCCDE5",
"#D9D9D9", "#BC80BD", "#CCEBC5", "#FFED6F")
## code
targetmatvec=unlist(targetmat)+1
yrange=range(targetmatvec[targetmatvec>0]);
# yrange[1]=1; # set the minimum value to 1
for(i in 1:length(targetmat)){
vali=targetmat[[i]]+1;
if(i==1){
plot(1:length(vali),vali,type='b',las=1,pch=20,main=paste('sgRNAs in',targetgene),ylab='Read counts',xlab='Samples',xlim=c(0.7,length(vali)+0.3),ylim = yrange,col=colors[(i %% length(colors))],xaxt='n',log='y')
axis(1,at=1:length(vali),labels=(collabel),las=2)
# lines(0:100,rep(1,101),col='black');
}else{
lines(1:length(vali),vali,type='b',pch=20,col=colors[(i %% length(colors))])
}
}
# parameters
# Do not modify the variables beginning with "__"
targetmat=list(c(1853.1416451493915,1764.0176919983917),c(2573.115455551457,2479.866103739445),c(2113.904463297676,2117.2392440691597),c(1044.9686462559941,1096.2408524326497),c(3260.493913684987,3350.379573783675),c(1630.7263002582074,1489.173703256936))
targetgene="KRT20"
collabel=c("S61_MNSC508_F5","S62_MNSC508_F6")
# set up color using RColorBrewer
#library(RColorBrewer)
#colors <- brewer.pal(length(targetgenelist), "Set1")
colors=c( "#E41A1C", "#377EB8", "#4DAF4A", "#984EA3", "#FF7F00", "#A65628", "#F781BF",
"#999999", "#66C2A5", "#FC8D62", "#8DA0CB", "#E78AC3", "#A6D854", "#FFD92F", "#E5C494", "#B3B3B3",
"#8DD3C7", "#FFFFB3", "#BEBADA", "#FB8072", "#80B1D3", "#FDB462", "#B3DE69", "#FCCDE5",
"#D9D9D9", "#BC80BD", "#CCEBC5", "#FFED6F")
## code
targetmatvec=unlist(targetmat)+1
yrange=range(targetmatvec[targetmatvec>0]);
# yrange[1]=1; # set the minimum value to 1
for(i in 1:length(targetmat)){
vali=targetmat[[i]]+1;
if(i==1){
plot(1:length(vali),vali,type='b',las=1,pch=20,main=paste('sgRNAs in',targetgene),ylab='Read counts',xlab='Samples',xlim=c(0.7,length(vali)+0.3),ylim = yrange,col=colors[(i %% length(colors))],xaxt='n',log='y')
axis(1,at=1:length(vali),labels=(collabel),las=2)
# lines(0:100,rep(1,101),col='black');
}else{
lines(1:length(vali),vali,type='b',pch=20,col=colors[(i %% length(colors))])
}
}
# parameters
# Do not modify the variables beginning with "__"
targetmat=list(c(88.19918849133161,108.68355448331323),c(100.66211729988935,176.610776035384),c(97.78605680560679,159.89022919179735),c(919.3806713389894,949.9360675512665),c(265.55625230542233,238.26779252110975),c(258.8454444854297,313.5102533172497))
targetgene="mKate2"
collabel=c("S61_MNSC508_F5","S62_MNSC508_F6")
# set up color using RColorBrewer
#library(RColorBrewer)
#colors <- brewer.pal(length(targetgenelist), "Set1")
colors=c( "#E41A1C", "#377EB8", "#4DAF4A", "#984EA3", "#FF7F00", "#A65628", "#F781BF",
"#999999", "#66C2A5", "#FC8D62", "#8DA0CB", "#E78AC3", "#A6D854", "#FFD92F", "#E5C494", "#B3B3B3",
"#8DD3C7", "#FFFFB3", "#BEBADA", "#FB8072", "#80B1D3", "#FDB462", "#B3DE69", "#FCCDE5",
"#D9D9D9", "#BC80BD", "#CCEBC5", "#FFED6F")
## code
targetmatvec=unlist(targetmat)+1
yrange=range(targetmatvec[targetmatvec>0]);
# yrange[1]=1; # set the minimum value to 1
for(i in 1:length(targetmat)){
vali=targetmat[[i]]+1;
if(i==1){
plot(1:length(vali),vali,type='b',las=1,pch=20,main=paste('sgRNAs in',targetgene),ylab='Read counts',xlab='Samples',xlim=c(0.7,length(vali)+0.3),ylim = yrange,col=colors[(i %% length(colors))],xaxt='n',log='y')
axis(1,at=1:length(vali),labels=(collabel),las=2)
# lines(0:100,rep(1,101),col='black');
}else{
lines(1:length(vali),vali,type='b',pch=20,col=colors[(i %% length(colors))])
}
}
# parameters
# Do not modify the variables beginning with "__"
targetmat=list(c(2627.7606049428255,2600.045034177724),c(2324.81556621173,2386.8580619219942),c(1495.5514570269274,1552.92078809811),c(1009.4972334931759,1095.1958182549256),c(1796.5791220951678,1782.8283071974265),c(870.4876429361859,866.3333333333333),c(1425.5673183327185,1310.4728588661037),c(2719.7945407598672,2633.4861278648973),c(845.5617853190705,822.4418978689183),c(1114.9527849502028,1154.7627663852031))
targetgene="AAVS1"
collabel=c("S61_MNSC508_F5","S62_MNSC508_F6")
# set up color using RColorBrewer
#library(RColorBrewer)
#colors <- brewer.pal(length(targetgenelist), "Set1")
colors=c( "#E41A1C", "#377EB8", "#4DAF4A", "#984EA3", "#FF7F00", "#A65628", "#F781BF",
"#999999", "#66C2A5", "#FC8D62", "#8DA0CB", "#E78AC3", "#A6D854", "#FFD92F", "#E5C494", "#B3B3B3",
"#8DD3C7", "#FFFFB3", "#BEBADA", "#FB8072", "#80B1D3", "#FDB462", "#B3DE69", "#FCCDE5",
"#D9D9D9", "#BC80BD", "#CCEBC5", "#FFED6F")
## code
targetmatvec=unlist(targetmat)+1
yrange=range(targetmatvec[targetmatvec>0]);
# yrange[1]=1; # set the minimum value to 1
for(i in 1:length(targetmat)){
vali=targetmat[[i]]+1;
if(i==1){
plot(1:length(vali),vali,type='b',las=1,pch=20,main=paste('sgRNAs in',targetgene),ylab='Read counts',xlab='Samples',xlim=c(0.7,length(vali)+0.3),ylim = yrange,col=colors[(i %% length(colors))],xaxt='n',log='y')
axis(1,at=1:length(vali),labels=(collabel),las=2)
# lines(0:100,rep(1,101),col='black');
}else{
lines(1:length(vali),vali,type='b',pch=20,col=colors[(i %% length(colors))])
}
}
# parameters
# Do not modify the variables beginning with "__"
targetmat=list(c(1284.6403541128734,1284.3470044229996),c(2342.0719291774253,2373.2726176115802),c(1402.5588343784582,1351.2291917973462),c(1080.4400590188122,1047.124246079614),c(1676.7432681667283,1660.559308403699),c(1621.139431943932,1517.3896260554884),c(3141.616746587975,3250.056292722155),c(1940.3821468092956,2058.7173301166063),c(1269.3013648100332,1255.086047446723),c(1772.61195130948,1753.5673502211498))
targetgene="CCR5"
collabel=c("S61_MNSC508_F5","S62_MNSC508_F6")
# set up color using RColorBrewer
#library(RColorBrewer)
#colors <- brewer.pal(length(targetgenelist), "Set1")
colors=c( "#E41A1C", "#377EB8", "#4DAF4A", "#984EA3", "#FF7F00", "#A65628", "#F781BF",
"#999999", "#66C2A5", "#FC8D62", "#8DA0CB", "#E78AC3", "#A6D854", "#FFD92F", "#E5C494", "#B3B3B3",
"#8DD3C7", "#FFFFB3", "#BEBADA", "#FB8072", "#80B1D3", "#FDB462", "#B3DE69", "#FCCDE5",
"#D9D9D9", "#BC80BD", "#CCEBC5", "#FFED6F")
## code
targetmatvec=unlist(targetmat)+1
yrange=range(targetmatvec[targetmatvec>0]);
# yrange[1]=1; # set the minimum value to 1
for(i in 1:length(targetmat)){
vali=targetmat[[i]]+1;
if(i==1){
plot(1:length(vali),vali,type='b',las=1,pch=20,main=paste('sgRNAs in',targetgene),ylab='Read counts',xlab='Samples',xlim=c(0.7,length(vali)+0.3),ylim = yrange,col=colors[(i %% length(colors))],xaxt='n',log='y')
axis(1,at=1:length(vali),labels=(collabel),las=2)
# lines(0:100,rep(1,101),col='black');
}else{
lines(1:length(vali),vali,type='b',pch=20,col=colors[(i %% length(colors))])
}
}
# parameters
# Do not modify the variables beginning with "__"
targetmat=list(c(2048.713758760605,2075.437876960193),c(3214.4769457764664,3241.696019300362),c(2178.1364810033197,2255.1837555287493),c(3640.133898930284,3661.7997587454765),c(1924.0844706750277,2164.265782066747),c(5027.353744005902,5004.668677121029))
targetgene="PROM1"
collabel=c("S61_MNSC508_F5","S62_MNSC508_F6")
# set up color using RColorBrewer
#library(RColorBrewer)
#colors <- brewer.pal(length(targetgenelist), "Set1")
colors=c( "#E41A1C", "#377EB8", "#4DAF4A", "#984EA3", "#FF7F00", "#A65628", "#F781BF",
"#999999", "#66C2A5", "#FC8D62", "#8DA0CB", "#E78AC3", "#A6D854", "#FFD92F", "#E5C494", "#B3B3B3",
"#8DD3C7", "#FFFFB3", "#BEBADA", "#FB8072", "#80B1D3", "#FDB462", "#B3DE69", "#FCCDE5",
"#D9D9D9", "#BC80BD", "#CCEBC5", "#FFED6F")
## code
targetmatvec=unlist(targetmat)+1
yrange=range(targetmatvec[targetmatvec>0]);
# yrange[1]=1; # set the minimum value to 1
for(i in 1:length(targetmat)){
vali=targetmat[[i]]+1;
if(i==1){
plot(1:length(vali),vali,type='b',las=1,pch=20,main=paste('sgRNAs in',targetgene),ylab='Read counts',xlab='Samples',xlim=c(0.7,length(vali)+0.3),ylim = yrange,col=colors[(i %% length(colors))],xaxt='n',log='y')
axis(1,at=1:length(vali),labels=(collabel),las=2)
# lines(0:100,rep(1,101),col='black');
}else{
lines(1:length(vali),vali,type='b',pch=20,col=colors[(i %% length(colors))])
}
}
# parameters
# Do not modify the variables beginning with "__"
targetmat=list(c(1863.687200295094,1971.9794933655005),c(1857.935079306529,1922.8628870124649),c(5973.577646624862,6162.566546039405),c(1056.4728882331244,1013.6831523924407),c(1947.0929546292882,2024.2312022517087),c(3351.569162670601,3641.9441093687174),c(2282.6333456289194,2233.238037796542),c(2183.888601991885,2251.003618817853),c(1937.506086315013,2011.6907921190189),c(3300.758760604943,3279.3172496984316))
targetgene="CTRL"
collabel=c("S61_MNSC508_F5","S62_MNSC508_F6")
# set up color using RColorBrewer
#library(RColorBrewer)
#colors <- brewer.pal(length(targetgenelist), "Set1")
colors=c( "#E41A1C", "#377EB8", "#4DAF4A", "#984EA3", "#FF7F00", "#A65628", "#F781BF",
"#999999", "#66C2A5", "#FC8D62", "#8DA0CB", "#E78AC3", "#A6D854", "#FFD92F", "#E5C494", "#B3B3B3",
"#8DD3C7", "#FFFFB3", "#BEBADA", "#FB8072", "#80B1D3", "#FDB462", "#B3DE69", "#FCCDE5",
"#D9D9D9", "#BC80BD", "#CCEBC5", "#FFED6F")
## code
targetmatvec=unlist(targetmat)+1
yrange=range(targetmatvec[targetmatvec>0]);
# yrange[1]=1; # set the minimum value to 1
for(i in 1:length(targetmat)){
vali=targetmat[[i]]+1;
if(i==1){
plot(1:length(vali),vali,type='b',las=1,pch=20,main=paste('sgRNAs in',targetgene),ylab='Read counts',xlab='Samples',xlim=c(0.7,length(vali)+0.3),ylim = yrange,col=colors[(i %% length(colors))],xaxt='n',log='y')
axis(1,at=1:length(vali),labels=(collabel),las=2)
# lines(0:100,rep(1,101),col='black');
}else{
lines(1:length(vali),vali,type='b',pch=20,col=colors[(i %% length(colors))])
}
}
# parameters
# Do not modify the variables beginning with "__"
targetmat=list(c(2505.0486905201033,2649.16164053076),c(3249.9483585392845,3291.8576598311215),c(1645.1066027296201,1702.3606755126657),c(1731.3884175580968,1801.6389223964616),c(2904.821099225378,3045.2295938882185),c(3318.9738104020657,3454.8829915560914))
targetgene="EGFP"
collabel=c("S61_MNSC508_F5","S62_MNSC508_F6")
# set up color using RColorBrewer
#library(RColorBrewer)
#colors <- brewer.pal(length(targetgenelist), "Set1")
colors=c( "#E41A1C", "#377EB8", "#4DAF4A", "#984EA3", "#FF7F00", "#A65628", "#F781BF",
"#999999", "#66C2A5", "#FC8D62", "#8DA0CB", "#E78AC3", "#A6D854", "#FFD92F", "#E5C494", "#B3B3B3",
"#8DD3C7", "#FFFFB3", "#BEBADA", "#FB8072", "#80B1D3", "#FDB462", "#B3DE69", "#FCCDE5",
"#D9D9D9", "#BC80BD", "#CCEBC5", "#FFED6F")
## code
targetmatvec=unlist(targetmat)+1
yrange=range(targetmatvec[targetmatvec>0]);
# yrange[1]=1; # set the minimum value to 1
for(i in 1:length(targetmat)){
vali=targetmat[[i]]+1;
if(i==1){
plot(1:length(vali),vali,type='b',las=1,pch=20,main=paste('sgRNAs in',targetgene),ylab='Read counts',xlab='Samples',xlim=c(0.7,length(vali)+0.3),ylim = yrange,col=colors[(i %% length(colors))],xaxt='n',log='y')
axis(1,at=1:length(vali),labels=(collabel),las=2)
# lines(0:100,rep(1,101),col='black');
}else{
lines(1:length(vali),vali,type='b',pch=20,col=colors[(i %% length(colors))])
}
}
#
#
# parameters
# Do not modify the variables beginning with "__"
# gstablename='__GENE_SUMMARY_FILE__'
startindex=9
# outputfile='__OUTPUT_FILE__'
targetgenelist=c("mKate2","ROSA26","EGFP","PROM1","CTRL","AAVS1","CCR5","KRT20","LRIG1","SOX9")
# samplelabel=sub('.\\w+.\\w+$','',colnames(gstable)[startindex]);
samplelabel='1_vs_0 pos.'
# You need to write some codes in front of this code:
# gstable=read.table(gstablename,header=T)
# pdf(file=outputfile,width=6,height=6)
# set up color using RColorBrewer
#library(RColorBrewer)
#colors <- brewer.pal(length(targetgenelist), "Set1")
colors=c( "#E41A1C", "#377EB8", "#4DAF4A", "#984EA3", "#FF7F00", "#A65628", "#F781BF",
"#999999", "#66C2A5", "#FC8D62", "#8DA0CB", "#E78AC3", "#A6D854", "#FFD92F", "#E5C494", "#B3B3B3",
"#8DD3C7", "#FFFFB3", "#BEBADA", "#FB8072", "#80B1D3", "#FDB462", "#B3DE69", "#FCCDE5",
"#D9D9D9", "#BC80BD", "#CCEBC5", "#FFED6F")
######
# function definition
plotrankedvalues<-function(val, tglist, ...){
plot(val,log='y',ylim=c(max(val),min(val)),type='l',lwd=2, ...)
if(length(tglist)>0){
for(i in 1:length(tglist)){
targetgene=tglist[i];
tx=which(names(val)==targetgene);ty=val[targetgene];
points(tx,ty,col=colors[(i %% length(colors)) ],cex=2,pch=20)
# text(tx+50,ty,targetgene,col=colors[i])
}
legend('topright',tglist,pch=20,pt.cex = 2,cex=1,col=colors)
}
}
plotrandvalues<-function(val,targetgenelist, ...){
# choose the one with the best distance distribution
mindiffvalue=0;
randval=val;
for(i in 1:20){
randval0=sample(val)
vindex=sort(which(names(randval0) %in% targetgenelist))
if(max(vindex)>0.9*length(val)){
# print('pass...')
next;
}
mindiffind=min(diff(vindex));
if (mindiffind > mindiffvalue){
mindiffvalue=mindiffind;
randval=randval0;
# print(paste('Diff: ',mindiffvalue))
}
}
plot(randval,log='y',ylim=c(max(randval),min(randval)),pch=20,col='grey', ...)
if(length(targetgenelist)>0){
for(i in 1:length(targetgenelist)){
targetgene=targetgenelist[i];
tx=which(names(randval)==targetgene);ty=randval[targetgene];
points(tx,ty,col=colors[(i %% length(colors)) ],cex=2,pch=20)
text(tx+50,ty,targetgene,col=colors[i])
}
}
}
# set.seed(1235)
pvec=gstable[,startindex]
names(pvec)=gstable[,'id']
pvec=sort(pvec);
plotrankedvalues(pvec,targetgenelist,xlab='Genes',ylab='RRA score',main=paste('Distribution of RRA scores in \\n',samplelabel))
# plotrandvalues(pvec,targetgenelist,xlab='Genes',ylab='RRA score',main=paste('Distribution of RRA scores in \\n',samplelabel))
pvec=gstable[,startindex+1]
names(pvec)=gstable[,'id']
pvec=sort(pvec);
plotrankedvalues(pvec,targetgenelist,xlab='Genes',ylab='p value',main=paste('Distribution of p values in \\n',samplelabel))
# plotrandvalues(pvec,targetgenelist,xlab='Genes',ylab='p value',main=paste('Distribution of p values in \\n',samplelabel))
# you need to write after this code:
# dev.off()
# parameters
# Do not modify the variables beginning with "__"
targetmat=list(c(88.19918849133161,108.68355448331323),c(100.66211729988935,176.610776035384),c(97.78605680560679,159.89022919179735),c(919.3806713389894,949.9360675512665),c(265.55625230542233,238.26779252110975),c(258.8454444854297,313.5102533172497))
targetgene="mKate2"
collabel=c("S61_MNSC508_F5","S62_MNSC508_F6")
# set up color using RColorBrewer
#library(RColorBrewer)
#colors <- brewer.pal(length(targetgenelist), "Set1")
colors=c( "#E41A1C", "#377EB8", "#4DAF4A", "#984EA3", "#FF7F00", "#A65628", "#F781BF",
"#999999", "#66C2A5", "#FC8D62", "#8DA0CB", "#E78AC3", "#A6D854", "#FFD92F", "#E5C494", "#B3B3B3",
"#8DD3C7", "#FFFFB3", "#BEBADA", "#FB8072", "#80B1D3", "#FDB462", "#B3DE69", "#FCCDE5",
"#D9D9D9", "#BC80BD", "#CCEBC5", "#FFED6F")
## code
targetmatvec=unlist(targetmat)+1
yrange=range(targetmatvec[targetmatvec>0]);
# yrange[1]=1; # set the minimum value to 1
for(i in 1:length(targetmat)){
vali=targetmat[[i]]+1;
if(i==1){
plot(1:length(vali),vali,type='b',las=1,pch=20,main=paste('sgRNAs in',targetgene),ylab='Read counts',xlab='Samples',xlim=c(0.7,length(vali)+0.3),ylim = yrange,col=colors[(i %% length(colors))],xaxt='n',log='y')
axis(1,at=1:length(vali),labels=(collabel),las=2)
# lines(0:100,rep(1,101),col='black');
}else{
lines(1:length(vali),vali,type='b',pch=20,col=colors[(i %% length(colors))])
}
}
# parameters
# Do not modify the variables beginning with "__"
targetmat=list(c(6882.412762818149,7288.068355448331),c(2905.7797860568057,3112.1117812625653),c(3424.4293618590928,3491.4591877764374),c(2446.5687938030246,2530.027744270205),c(1987.357801549244,2009.6007237635704),c(3213.5182589450387,3022.238841978287),c(2194.4341571375876,2024.2312022517087),c(1862.7285134636666,1796.4137515078407),c(771.7428992991516,823.4869320466426),c(2474.370711914423,2676.332529151588))
targetgene="ROSA26"
collabel=c("S61_MNSC508_F5","S62_MNSC508_F6")
# set up color using RColorBrewer
#library(RColorBrewer)
#colors <- brewer.pal(length(targetgenelist), "Set1")
colors=c( "#E41A1C", "#377EB8", "#4DAF4A", "#984EA3", "#FF7F00", "#A65628", "#F781BF",
"#999999", "#66C2A5", "#FC8D62", "#8DA0CB", "#E78AC3", "#A6D854", "#FFD92F", "#E5C494", "#B3B3B3",
"#8DD3C7", "#FFFFB3", "#BEBADA", "#FB8072", "#80B1D3", "#FDB462", "#B3DE69", "#FCCDE5",
"#D9D9D9", "#BC80BD", "#CCEBC5", "#FFED6F")
## code
targetmatvec=unlist(targetmat)+1
yrange=range(targetmatvec[targetmatvec>0]);
# yrange[1]=1; # set the minimum value to 1
for(i in 1:length(targetmat)){
vali=targetmat[[i]]+1;
if(i==1){
plot(1:length(vali),vali,type='b',las=1,pch=20,main=paste('sgRNAs in',targetgene),ylab='Read counts',xlab='Samples',xlim=c(0.7,length(vali)+0.3),ylim = yrange,col=colors[(i %% length(colors))],xaxt='n',log='y')
axis(1,at=1:length(vali),labels=(collabel),las=2)
# lines(0:100,rep(1,101),col='black');
}else{
lines(1:length(vali),vali,type='b',pch=20,col=colors[(i %% length(colors))])
}
}
# parameters
# Do not modify the variables beginning with "__"
targetmat=list(c(2505.0486905201033,2649.16164053076),c(3249.9483585392845,3291.8576598311215),c(1645.1066027296201,1702.3606755126657),c(1731.3884175580968,1801.6389223964616),c(2904.821099225378,3045.2295938882185),c(3318.9738104020657,3454.8829915560914))
targetgene="EGFP"
collabel=c("S61_MNSC508_F5","S62_MNSC508_F6")
# set up color using RColorBrewer
#library(RColorBrewer)
#colors <- brewer.pal(length(targetgenelist), "Set1")
colors=c( "#E41A1C", "#377EB8", "#4DAF4A", "#984EA3", "#FF7F00", "#A65628", "#F781BF",
"#999999", "#66C2A5", "#FC8D62", "#8DA0CB", "#E78AC3", "#A6D854", "#FFD92F", "#E5C494", "#B3B3B3",
"#8DD3C7", "#FFFFB3", "#BEBADA", "#FB8072", "#80B1D3", "#FDB462", "#B3DE69", "#FCCDE5",
"#D9D9D9", "#BC80BD", "#CCEBC5", "#FFED6F")
## code
targetmatvec=unlist(targetmat)+1
yrange=range(targetmatvec[targetmatvec>0]);
# yrange[1]=1; # set the minimum value to 1
for(i in 1:length(targetmat)){
vali=targetmat[[i]]+1;
if(i==1){
plot(1:length(vali),vali,type='b',las=1,pch=20,main=paste('sgRNAs in',targetgene),ylab='Read counts',xlab='Samples',xlim=c(0.7,length(vali)+0.3),ylim = yrange,col=colors[(i %% length(colors))],xaxt='n',log='y')
axis(1,at=1:length(vali),labels=(collabel),las=2)
# lines(0:100,rep(1,101),col='black');
}else{
lines(1:length(vali),vali,type='b',pch=20,col=colors[(i %% length(colors))])
}
}
# parameters
# Do not modify the variables beginning with "__"
targetmat=list(c(2048.713758760605,2075.437876960193),c(3214.4769457764664,3241.696019300362),c(2178.1364810033197,2255.1837555287493),c(3640.133898930284,3661.7997587454765),c(1924.0844706750277,2164.265782066747),c(5027.353744005902,5004.668677121029))
targetgene="PROM1"
collabel=c("S61_MNSC508_F5","S62_MNSC508_F6")
# set up color using RColorBrewer
#library(RColorBrewer)
#colors <- brewer.pal(length(targetgenelist), "Set1")
colors=c( "#E41A1C", "#377EB8", "#4DAF4A", "#984EA3", "#FF7F00", "#A65628", "#F781BF",
"#999999", "#66C2A5", "#FC8D62", "#8DA0CB", "#E78AC3", "#A6D854", "#FFD92F", "#E5C494", "#B3B3B3",
"#8DD3C7", "#FFFFB3", "#BEBADA", "#FB8072", "#80B1D3", "#FDB462", "#B3DE69", "#FCCDE5",
"#D9D9D9", "#BC80BD", "#CCEBC5", "#FFED6F")
## code
targetmatvec=unlist(targetmat)+1
yrange=range(targetmatvec[targetmatvec>0]);
# yrange[1]=1; # set the minimum value to 1
for(i in 1:length(targetmat)){
vali=targetmat[[i]]+1;
if(i==1){
plot(1:length(vali),vali,type='b',las=1,pch=20,main=paste('sgRNAs in',targetgene),ylab='Read counts',xlab='Samples',xlim=c(0.7,length(vali)+0.3),ylim = yrange,col=colors[(i %% length(colors))],xaxt='n',log='y')
axis(1,at=1:length(vali),labels=(collabel),las=2)
# lines(0:100,rep(1,101),col='black');
}else{
lines(1:length(vali),vali,type='b',pch=20,col=colors[(i %% length(colors))])
}
}
# parameters
# Do not modify the variables beginning with "__"
targetmat=list(c(1863.687200295094,1971.9794933655005),c(1857.935079306529,1922.8628870124649),c(5973.577646624862,6162.566546039405),c(1056.4728882331244,1013.6831523924407),c(1947.0929546292882,2024.2312022517087),c(3351.569162670601,3641.9441093687174),c(2282.6333456289194,2233.238037796542),c(2183.888601991885,2251.003618817853),c(1937.506086315013,2011.6907921190189),c(3300.758760604943,3279.3172496984316))
targetgene="CTRL"
collabel=c("S61_MNSC508_F5","S62_MNSC508_F6")
# set up color using RColorBrewer
#library(RColorBrewer)
#colors <- brewer.pal(length(targetgenelist), "Set1")
colors=c( "#E41A1C", "#377EB8", "#4DAF4A", "#984EA3", "#FF7F00", "#A65628", "#F781BF",
"#999999", "#66C2A5", "#FC8D62", "#8DA0CB", "#E78AC3", "#A6D854", "#FFD92F", "#E5C494", "#B3B3B3",
"#8DD3C7", "#FFFFB3", "#BEBADA", "#FB8072", "#80B1D3", "#FDB462", "#B3DE69", "#FCCDE5",
"#D9D9D9", "#BC80BD", "#CCEBC5", "#FFED6F")
## code
targetmatvec=unlist(targetmat)+1
yrange=range(targetmatvec[targetmatvec>0]);
# yrange[1]=1; # set the minimum value to 1
for(i in 1:length(targetmat)){
vali=targetmat[[i]]+1;
if(i==1){
plot(1:length(vali),vali,type='b',las=1,pch=20,main=paste('sgRNAs in',targetgene),ylab='Read counts',xlab='Samples',xlim=c(0.7,length(vali)+0.3),ylim = yrange,col=colors[(i %% length(colors))],xaxt='n',log='y')
axis(1,at=1:length(vali),labels=(collabel),las=2)
# lines(0:100,rep(1,101),col='black');
}else{
lines(1:length(vali),vali,type='b',pch=20,col=colors[(i %% length(colors))])
}
}
# parameters
# Do not modify the variables beginning with "__"
targetmat=list(c(2627.7606049428255,2600.045034177724),c(2324.81556621173,2386.8580619219942),c(1495.5514570269274,1552.92078809811),c(1009.4972334931759,1095.1958182549256),c(1796.5791220951678,1782.8283071974265),c(870.4876429361859,866.3333333333333),c(1425.5673183327185,1310.4728588661037),c(2719.7945407598672,2633.4861278648973),c(845.5617853190705,822.4418978689183),c(1114.9527849502028,1154.7627663852031))
targetgene="AAVS1"
collabel=c("S61_MNSC508_F5","S62_MNSC508_F6")
# set up color using RColorBrewer
#library(RColorBrewer)
#colors <- brewer.pal(length(targetgenelist), "Set1")
colors=c( "#E41A1C", "#377EB8", "#4DAF4A", "#984EA3", "#FF7F00", "#A65628", "#F781BF",
"#999999", "#66C2A5", "#FC8D62", "#8DA0CB", "#E78AC3", "#A6D854", "#FFD92F", "#E5C494", "#B3B3B3",
"#8DD3C7", "#FFFFB3", "#BEBADA", "#FB8072", "#80B1D3", "#FDB462", "#B3DE69", "#FCCDE5",
"#D9D9D9", "#BC80BD", "#CCEBC5", "#FFED6F")
## code
targetmatvec=unlist(targetmat)+1
yrange=range(targetmatvec[targetmatvec>0]);
# yrange[1]=1; # set the minimum value to 1
for(i in 1:length(targetmat)){
vali=targetmat[[i]]+1;
if(i==1){
plot(1:length(vali),vali,type='b',las=1,pch=20,main=paste('sgRNAs in',targetgene),ylab='Read counts',xlab='Samples',xlim=c(0.7,length(vali)+0.3),ylim = yrange,col=colors[(i %% length(colors))],xaxt='n',log='y')
axis(1,at=1:length(vali),labels=(collabel),las=2)
# lines(0:100,rep(1,101),col='black');
}else{
lines(1:length(vali),vali,type='b',pch=20,col=colors[(i %% length(colors))])
}
}
# parameters
# Do not modify the variables beginning with "__"
targetmat=list(c(1284.6403541128734,1284.3470044229996),c(2342.0719291774253,2373.2726176115802),c(1402.5588343784582,1351.2291917973462),c(1080.4400590188122,1047.124246079614),c(1676.7432681667283,1660.559308403699),c(1621.139431943932,1517.3896260554884),c(3141.616746587975,3250.056292722155),c(1940.3821468092956,2058.7173301166063),c(1269.3013648100332,1255.086047446723),c(1772.61195130948,1753.5673502211498))
targetgene="CCR5"
collabel=c("S61_MNSC508_F5","S62_MNSC508_F6")
# set up color using RColorBrewer
#library(RColorBrewer)
#colors <- brewer.pal(length(targetgenelist), "Set1")
colors=c( "#E41A1C", "#377EB8", "#4DAF4A", "#984EA3", "#FF7F00", "#A65628", "#F781BF",
"#999999", "#66C2A5", "#FC8D62", "#8DA0CB", "#E78AC3", "#A6D854", "#FFD92F", "#E5C494", "#B3B3B3",
"#8DD3C7", "#FFFFB3", "#BEBADA", "#FB8072", "#80B1D3", "#FDB462", "#B3DE69", "#FCCDE5",
"#D9D9D9", "#BC80BD", "#CCEBC5", "#FFED6F")
## code
targetmatvec=unlist(targetmat)+1
yrange=range(targetmatvec[targetmatvec>0]);
# yrange[1]=1; # set the minimum value to 1
for(i in 1:length(targetmat)){
vali=targetmat[[i]]+1;
if(i==1){
plot(1:length(vali),vali,type='b',las=1,pch=20,main=paste('sgRNAs in',targetgene),ylab='Read counts',xlab='Samples',xlim=c(0.7,length(vali)+0.3),ylim = yrange,col=colors[(i %% length(colors))],xaxt='n',log='y')
axis(1,at=1:length(vali),labels=(collabel),las=2)
# lines(0:100,rep(1,101),col='black');
}else{
lines(1:length(vali),vali,type='b',pch=20,col=colors[(i %% length(colors))])
}
}
# parameters
# Do not modify the variables beginning with "__"
targetmat=list(c(1853.1416451493915,1764.0176919983917),c(2573.115455551457,2479.866103739445),c(2113.904463297676,2117.2392440691597),c(1044.9686462559941,1096.2408524326497),c(3260.493913684987,3350.379573783675),c(1630.7263002582074,1489.173703256936))
targetgene="KRT20"
collabel=c("S61_MNSC508_F5","S62_MNSC508_F6")
# set up color using RColorBrewer
#library(RColorBrewer)
#colors <- brewer.pal(length(targetgenelist), "Set1")
colors=c( "#E41A1C", "#377EB8", "#4DAF4A", "#984EA3", "#FF7F00", "#A65628", "#F781BF",
"#999999", "#66C2A5", "#FC8D62", "#8DA0CB", "#E78AC3", "#A6D854", "#FFD92F", "#E5C494", "#B3B3B3",
"#8DD3C7", "#FFFFB3", "#BEBADA", "#FB8072", "#80B1D3", "#FDB462", "#B3DE69", "#FCCDE5",
"#D9D9D9", "#BC80BD", "#CCEBC5", "#FFED6F")
## code
targetmatvec=unlist(targetmat)+1
yrange=range(targetmatvec[targetmatvec>0]);
# yrange[1]=1; # set the minimum value to 1
for(i in 1:length(targetmat)){
vali=targetmat[[i]]+1;
if(i==1){
plot(1:length(vali),vali,type='b',las=1,pch=20,main=paste('sgRNAs in',targetgene),ylab='Read counts',xlab='Samples',xlim=c(0.7,length(vali)+0.3),ylim = yrange,col=colors[(i %% length(colors))],xaxt='n',log='y')
axis(1,at=1:length(vali),labels=(collabel),las=2)
# lines(0:100,rep(1,101),col='black');
}else{
lines(1:length(vali),vali,type='b',pch=20,col=colors[(i %% length(colors))])
}
}
# parameters
# Do not modify the variables beginning with "__"
targetmat=list(c(2477.2467724087055,2292.8049859268194),c(1522.394688306898,1338.688781664656),c(1665.239026189598,1494.3988741455569),c(3101.351899668019,3074.490550864495),c(4407.0833640722985,4238.658624849216),c(1462.9561047583918,1411.8411741053478))
targetgene="LRIG1"
collabel=c("S61_MNSC508_F5","S62_MNSC508_F6")
# set up color using RColorBrewer
#library(RColorBrewer)
#colors <- brewer.pal(length(targetgenelist), "Set1")
colors=c( "#E41A1C", "#377EB8", "#4DAF4A", "#984EA3", "#FF7F00", "#A65628", "#F781BF",
"#999999", "#66C2A5", "#FC8D62", "#8DA0CB", "#E78AC3", "#A6D854", "#FFD92F", "#E5C494", "#B3B3B3",
"#8DD3C7", "#FFFFB3", "#BEBADA", "#FB8072", "#80B1D3", "#FDB462", "#B3DE69", "#FCCDE5",
"#D9D9D9", "#BC80BD", "#CCEBC5", "#FFED6F")
## code
targetmatvec=unlist(targetmat)+1
yrange=range(targetmatvec[targetmatvec>0]);
# yrange[1]=1; # set the minimum value to 1
for(i in 1:length(targetmat)){
vali=targetmat[[i]]+1;
if(i==1){
plot(1:length(vali),vali,type='b',las=1,pch=20,main=paste('sgRNAs in',targetgene),ylab='Read counts',xlab='Samples',xlim=c(0.7,length(vali)+0.3),ylim = yrange,col=colors[(i %% length(colors))],xaxt='n',log='y')
axis(1,at=1:length(vali),labels=(collabel),las=2)
# lines(0:100,rep(1,101),col='black');
}else{
lines(1:length(vali),vali,type='b',pch=20,col=colors[(i %% length(colors))])
}
}
# parameters
# Do not modify the variables beginning with "__"
targetmat=list(c(1438.0302471412763,661.5066344993968),c(657.659166359277,372.032167269803),c(586.7163408336407,502.6614394853237),c(458.2523054223534,415.9236027342179),c(751.6104758391738,537.1475673502212),c(2135.954260420509,1964.6642541214314))
targetgene="SOX9"
collabel=c("S61_MNSC508_F5","S62_MNSC508_F6")
# set up color using RColorBrewer
#library(RColorBrewer)
#colors <- brewer.pal(length(targetgenelist), "Set1")
colors=c( "#E41A1C", "#377EB8", "#4DAF4A", "#984EA3", "#FF7F00", "#A65628", "#F781BF",
"#999999", "#66C2A5", "#FC8D62", "#8DA0CB", "#E78AC3", "#A6D854", "#FFD92F", "#E5C494", "#B3B3B3",
"#8DD3C7", "#FFFFB3", "#BEBADA", "#FB8072", "#80B1D3", "#FDB462", "#B3DE69", "#FCCDE5",
"#D9D9D9", "#BC80BD", "#CCEBC5", "#FFED6F")
## code
targetmatvec=unlist(targetmat)+1
yrange=range(targetmatvec[targetmatvec>0]);
# yrange[1]=1; # set the minimum value to 1
for(i in 1:length(targetmat)){
vali=targetmat[[i]]+1;
if(i==1){
plot(1:length(vali),vali,type='b',las=1,pch=20,main=paste('sgRNAs in',targetgene),ylab='Read counts',xlab='Samples',xlim=c(0.7,length(vali)+0.3),ylim = yrange,col=colors[(i %% length(colors))],xaxt='n',log='y')
axis(1,at=1:length(vali),labels=(collabel),las=2)
# lines(0:100,rep(1,101),col='black');
}else{
lines(1:length(vali),vali,type='b',pch=20,col=colors[(i %% length(colors))])
}
}
dev.off()
Sweave("S8B_c0_t1_summary.Rnw");
library(tools);
texi2dvi("S8B_c0_t1_summary.tex",pdf=TRUE);
|
85543ac3ee8fbd6563f735bc399a3a0afaafd8c8
|
d14bcd4679f0ffa43df5267a82544f098095f1d1
|
/inst/apps/figure2_1/ui.R
|
686e7a58211723f4c8d52561106fc3c3661da381
|
[] |
no_license
|
anhnguyendepocen/SMRD
|
9e52aa72a5abe5274f9a8546475639d11f058c0d
|
c54fa017afca7f20255291c6363194673bc2435a
|
refs/heads/master
| 2022-12-15T12:29:11.165234
| 2020-09-10T13:23:59
| 2020-09-10T13:23:59
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 733
|
r
|
ui.R
|
ui = fluidPage(theme = SMRD::add_theme(getShinyOption("theme")),
SMRD::add_css(),
sidebarLayout(
sidebarPanel(width = 5,
shinyAce::aceEditor(fontSize = 16,
wordWrap = T,
outputId = "fig1plot",
mode = "r",
theme = "github",
height = "450px",
value =
"par(family = 'serif',font = 2)
library(package = SMRD)
distribution.plot('Weibull',
shape = c(1.7),
scale = 1,
prob.range=c(.000001,.99))"),
actionButton("evalfig1", "Evaluate")),
mainPanel(plotOutput("plotfig1"), width = 7)))
|
4ed98e5634faf7e78d7bec9759e4d9f8de461b09
|
6712bba926f2849df7121018791f1217cf8c448e
|
/tests/testthat/test-compute_rf_lp.R
|
a705fca22be134e47de60c8f27346b7033676e56
|
[] |
no_license
|
minghao2016/Rforestry
|
96277dec9d354483850056430f1ef94e872422c6
|
0fe69ef17c10a3adf3de1a887b5f8e5e177ac2e4
|
refs/heads/master
| 2023-03-28T11:51:57.771759
| 2021-04-01T18:29:34
| 2021-04-01T18:29:34
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,626
|
r
|
test-compute_rf_lp.R
|
test_that("Tests that compute the lp distances works correctly", {
context('Test lp distances')
# Set seed for reproductivity
set.seed(292313)
# Use Iris Data
test_idx <- sample(nrow(iris), 11)
x_train <- iris[-test_idx, -1]
y_train <- iris[-test_idx, 1]
x_test <- iris[test_idx, -1]
# Create a random forest
rf <- forestry(x = x_train, y = y_train, nthread = 1)
# Compute the l1 distances in the "Species" dimension
distances_1 <- compute_lp(object = rf,
feature.new = x_test,
feature = "Species",
p = 1)
# Compute the l2 distances in the "Petal.Length" dimension
distances_2 <- compute_lp(object = rf,
feature.new = x_test,
feature = "Petal.Length",
p = 2)
expect_identical(length(distances_1), nrow(x_test))
expect_identical(length(distances_2), nrow(x_test))
#set tolerance
skip_if_not_mac()
expect_equal(distances_1,
c(0.74127647652339, 0.56269154186560, 0.66700207007833, 0.48143305071905,
0.42691537245113, 0.79361471149614, 0.69064814060102, 0.60005881782247,
0.77731344373143, 0.53970499669885, 0.67328392159715),
tolerance = 1e-12)
expect_equal(distances_2,
c(2.3726809930918, 2.4972611231916, 2.7047479310938, 1.9000801210562,
1.6384876050554, 2.4063455932161, 2.1012051982558, 2.4272638737974,
3.0785442045313, 2.4121460046764, 2.2978840528426),
tolerance = 1e-12)
})
|
7acca9a8abf666041d7e62a19dbf9a7b3be7f156
|
305d48a0ec1fc0829e53751aa11bf69471942edb
|
/CO2project.R
|
dd0d634cf61050a7aef1627d35835b76692043e8
|
[] |
no_license
|
nathanbrunner/timeseries
|
e6146dbfea5186a42dbedf1a5d64e17b5b56f16f
|
9ab29bebd6c6d5139d610dcd355b797b77b646d3
|
refs/heads/master
| 2020-04-29T19:30:12.136083
| 2019-03-19T19:00:16
| 2019-03-19T19:00:16
| 176,356,942
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 465
|
r
|
CO2project.R
|
## We read the table
jubany <- read.table("JubanyCleaned.txt", header = TRUE, sep="\t")
## We make the time series
?ts
co2 <- ts(jubany[,2],frequency=12,start=c(1994,3))
## We plot the data
plot(co2,main="CO2 in Jubany",xlab="Years", ylab="Atmospheric CO2 (ppmv)")
# STL decomposition with approximation for missing values
install.packages("zoo")
library("zoo")
stl<-stl(co2, na.action = na.approx, s.window=12)
plot(stl,main="STL decomposition")
|
02dfe15a3631d524929620f4cb871ce90438fd2e
|
c9ad1f541dcc62dd8731e616d57d7aa3c9415a57
|
/scripts/Cleaning and Organize.R
|
4a6f979e594084ebe3014ce32f3d8df656bf1472
|
[] |
no_license
|
xiaofeifei1800/Car-price-Prediction
|
b941cc97303cc06eea112786b8c3772a30b1f183
|
4cd35f8c4ac871611f19e5ece0a0e86b50a8d011
|
refs/heads/master
| 2020-04-15T16:35:41.188500
| 2016-02-01T18:27:58
| 2016-02-01T18:27:58
| 50,702,280
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,903
|
r
|
Cleaning and Organize.R
|
```{r setup}
library(knitr) # for changing workspace
library(stringr) # for using some regex functions
opts_chunk$set(root.dir = 'I:/R Data/141')
options(width = 110)
load("vehicles.rda")
vehicle = vposts # make a copy of the orignial data
body = vehicle$body
model_google = read.csv("model-google.csv")
```
```{r function}
value = function(regex,data)
{
regex_fun = regex
# It will store all the matched values in the posts return as a list
result_fun= str_extract_all(data, ignore.case(regex_fun))
# If there are multiple values in the list, get the first one out, if there is no value, give it NA
value_fun = sapply(result_fun,function(x) {
if (length(x)>0) x[1]
else NA
})
# how many posts contain the pattern
number = as.numeric(table(is.na(value_fun))[1])
# the posts that don't have that pattern
check = data[is.na(value_fun)]
return(list(value_fun,number,check))
}
```
**Extract the price being asked for the vehicle from the body column, if it is present, and check if it agrees with the actual price in the price column.**
```{r}
# regular expression for price
price = "\\$?(\\d{0,3}[,.]?)*\\d{1,3}"
price_value = value(price,body)
# how many posts have price in body
price_value[2]
# remove "$", "," or "." and then transform to numeric
price_value = as.numeric(gsub("\\$|,|.", "", price_value[[1]]))
# compare the price from "body" and from price column
table(price_value == vposts$price)
```
**Extract a Vehicle Identication Number (VIN) from the body, if it is present. We could use this to both identify details of the car (year it was built, type and model of the car, safety features, body style, engine type, etc.) and also use it to get historical information about the particular car. Add the VIN, if available,to the data frame. How many postings include the VIN?**
```{r}
# regular expression for vin number
vin = "VIN[ :-]? ?\\d*\\w{1,7}\\d{1,3}\\w{1,5}\\d{4,8}"
vin_num = value(vin,body)
# how many posts have price in body
vin_num[2]
```
```{r}
# rough way to get the vin number
vin = "[A-HJ-NPR-Z0-9]{17}"
vin_num1 = value(vin,body)
# how many posts have price in body
vin_num1[2]
# look it the value
VIN = data.frame(vin_num1[[1]])
#View(VIN)
vposts$vin = vin_num[[1]]
```
**Extract phone numbers from the body column, and again add these as a new column. How many posts include a phone number?**
```{r}
# regular expression for vin number
phone = "\\(?\\d{3}\\)?[ ]?[ -]?\\d{3}-? ?\\d{4}"
phone_num = value(phone,body)
# how many posts have price in body
phone_num[2]
```
**Extract email addresses from the body column, and again add these as a new column. How many posts include an email address?**
```{r}
# regular expression for vin number
email = "[[:alnum:]|[:punct:]]+@[[:alnum:]|[:punct:]]+?\\.(com|net|org|edu|gov){1}"
email_add = value(email,body)
# how many posts have price in body
email_add[2]
# grep the posts have "email"
email = body[grepl("email", body, ignore.case = TRUE)]
# how many posrs have email
length(email)
# look at some posts have "email"
email[1:3]
```
**Find the year in the description or body and compare it with the value in the year column.**
```{r}
# regular expression for year
year = " ?(19|20)\\d{2} ?"
year_b = value(year,body)
# how many posrs have year
year_b[2]
# compare to the year column
year_num = as.numeric(year_b[[1]])
table(year_num == vposts$year)
```
**Determine the model of the car, e.g., S60, Boxter, Cayman, 911, Jetta. This includes correcting mis-spelled or abbreviated model names. You may find the agrep() function useful. You should also use statistics, i.e., counts to see how often a word occurs in other posts and if such a spelling is reasonable, and whether this model name has been seen with that maker often. When doing these questions, you will very likely have to iterate by developing a regular expression, and seeing what results it gives you and adapting it. Furthermore, you will probably have to use two or more strategies when looing for a particular piece of information. This is expected; the data are not nice and regularly formatted.**
```{r}
# get the model from the "title" column
#title = "\\d{2,4} ([A-z]+[:punct:]?[A-z]+) ([A-z0-9]+)"
#new_title = value(title,vposts$title)
#model = gsub(".*\\d{2,4} ([A-z]+) ([A-z0-9]+).*" , "\\2", new_title[[1]])
#model = casefold(model, upper = F)
# save it as csv (won't do it again, since I already saved it)
#vposts$model = model
#write.csv(vposts[,c(1,28)], file = "model.csv")
# read the sort data from google refine
#model_google = read.csv("model-google.csv")
# redefine model column
vposts$model = model_google[,3]
```
|
a4a8fe0d3b7bef0a709dc7f88fb26cd564cbe445
|
047a9613723e6477e68624cb9c3aeb08bc1d4c8a
|
/gui/server_logic/align.R
|
c08022bf3efb70820bf8537f2a46890230f98530
|
[
"MIT"
] |
permissive
|
phoenixding/cellar
|
512ca1c8fe580081b49ceadf7dfa81648739658d
|
4736b207e2b40a135f88cc2e677af620ea5ef20d
|
refs/heads/master
| 2023-02-22T05:42:30.902617
| 2021-01-31T16:39:20
| 2021-01-31T16:39:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,180
|
r
|
align.R
|
align <- function(input, output, session, adata, selDatasetAlign,
replot, reset, relabel, resubset, reinfo,
second_plot_path, double_plot) {
adataAlign <- reactiveVal(0)
observeEvent(input$align_btn, {
if (py_to_r(is_active(adata())) == FALSE) {
showNotification("Please load the data first.")
return()
}
if (input$folder_align == 'user_uploaded') {
req(input$reference_dataset)
isolate(selDatasetAlign(input$reference_dataset$datapath))
} else if (input$folder_align == 'server') {
path = input$server_dataset_align
path = paste0('datasets/annotated/', path)
isolate(selDatasetAlign(path))
} else if (input$folder_align == 'side_plot') {
if (double_plot() == FALSE) {
showNotification("No Side Plot found.")
return()
}
path = isolate(second_plot_path())
isolate(selDatasetAlign(path))
} else {
showNotification("Dataset Group not found.")
return()
}
withProgress(message = "Running Label Transfer", value = 0, {
n <- 5
if (input$align_method == 'SingleR') {
incProgress(1 / n, detail = paste("Step: Reading data"))
isolate(adataAlign(safe_load_file(selDatasetAlign())))
if (py_to_r(is_str(adataAlign()))) {
msg <- "Incorrect file format."
return()
}
incProgress(1 / n, detail = paste("Step: Running SingleR"))
# transpose rows and cols for SingleR
labels = py_to_r(get_labels(adataAlign()))
if (labels == "No labels found") {
msg <- "No labels found. Please populate adata.obs['labels'] key."
return()
}
x1 = t(py_to_r(get_x(adata())))
rownames(x1) = py_to_r(get_var_names(adata()))
colnames(x1) = py_to_r(get_obs_names(adata()))
x2 = t(py_to_r(get_x(adataAlign())))
rownames(x2) = py_to_r(get_var_names(adataAlign()))
colnames(x2) = py_to_r(get_obs_names(adataAlign()))
print("Running SingleR.")
pred <- SingleR(test = x1, ref = x2,
labels = labels)
pred = as.numeric(pred$pruned.labels)
# low quality cells
pred[is.na(pred)] <- max(pred[!is.na(pred)]) + 1
msg <- cellar$safe(store_labels,
adata = adata(),
labels = as.integer(pred),
method = 'SingleR')
if (!is_error(msg)) {
msg <- cellar$safe(merge_cluster_names,
adata = adata(),
ref = adataAlign())
}
} else {
incProgress(1 / n, detail = paste("Step: Running Ingest. This may take a while."))
msg <- cellar$safe(cellar$transfer_labels,
x = adata(),
ref = selDatasetAlign(),
method = input$align_method,
inplace = TRUE
)
}
isolate(adataAlign(0))
if (is_error(msg)) return()
incProgress(1 / n, detail = "Converting names")
msg <- cellar$safe(cellar$name_genes,
x = adata(),
inplace = TRUE
)
if (is_error(msg)) return()
incProgress(1 / n, detail = "Visualizing")
msg <- cellar$safe(cellar$reduce_dim_vis,
x = adata(),
method = input$vis_method,
dim = 2,
use_emb = TRUE,
inplace = TRUE,
check_if_exists = TRUE)
if (is_error(msg)) return()
})
if (is_error(msg, notify=TRUE)) return()
replot(replot() + 1)
reset(reset() + 1)
resubset(resubset() + 1)
})
}
|
f9147ba325364354b3c0457f0bf38573e152eafa
|
3f5bb254c513db86480f5e38dafb3dc58b6d652a
|
/app.R
|
830313061adb1a674104220fe47b81e347e42d8f
|
[] |
no_license
|
Karagul/finance-1
|
28f79a7c8e03f0038041eba005cd1ed13403a86b
|
167cfebc7f6ac929f2945f0a0b9d74232237c6d1
|
refs/heads/master
| 2020-06-15T08:45:35.645011
| 2019-06-13T17:53:17
| 2019-06-13T17:53:17
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,686
|
r
|
app.R
|
#
# This is a Shiny web application. You can run the application by clicking
# the 'Run App' button above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
library(dplyr)
library(ggplot2)
library(scales)
library(ggthemes)
library(reshape2)
base <- read.csv2("base.csv", check.names = FALSE)
fill <- c("#40b8d0", "#b2d183", "#56B4E9", "#26A4E9", "#f2d183")
basegg <- select(base, EMPRESA, ANO, `Remuneração de Capitais de Terceiros`,`Remuneração de Capitais Próprios`, `Impostos, Taxas e Contribuições`, Pessoal)
basegg[basegg < 0] <- 0
basegg$total <- (basegg$`Remuneração de Capitais de Terceiros` + basegg$`Remuneração de Capitais Próprios` + basegg$`Impostos, Taxas e Contribuições` + basegg$Pessoal)
basegg[,3:6] <- basegg[,3:6] / basegg[,7]
basegg <- basegg[,-7]
basegg <- melt(basegg, id = c("EMPRESA", "ANO"))
print(str(base))
# Define UI for application that draws a histogram
ui <- fluidPage(
# Application title
titlePanel("Estrutura de Capital"),
# Sidebar with a slider input for number of bins
sidebarLayout(
sidebarPanel(selectInput("empresas", "Empresas",
choices = unique(basegg$EMPRESA), selected = "Ambev S/A")),
mainPanel(plotOutput("coolplot")))
)
# Define server logic required to draw a histogram
server <- function(input, output) {
output$coolplot <- renderPlot({
filtered <-
basegg %>%
filter(EMPRESA == input$empresas
)
ggplot() + theme_economist() + scale_fill_economist() +
theme(plot.title=element_text(family="OfficinaSanITC-Book"),
text=element_text(family="OfficinaSanITC-Book")) +
geom_bar(aes(y = value*100, x = ANO, fill = variable), data = filtered, stat="identity") +
geom_text(data=filtered, aes(x = ANO, y = round(value*100), group = variable, label = paste0(round(value*100,0),"%")),
colour="white", family="OfficinaSanITC-Book", size= 5 , position = position_stack(vjust = 0.5)) +
theme(legend.position="bottom", legend.direction="horizontal",
legend.title = element_blank()) +
scale_x_continuous(breaks=seq(min(filtered$ANO),max(filtered$ANO),1)) +
scale_y_continuous(labels = dollar_format(suffix = "%", prefix = "")) +
labs(x="Ano", y="Porcentagem") +
ggtitle("Estrutura de Capital de Empresas") + theme(legend.text=element_text(size=8))
})
}
# Run the application
shinyApp(ui = ui, server = server)
|
a9259a360225842cfc09816c370f2c0dcc2fae1e
|
38d166ede31183e2121388be0f66fe9d7ac4e93a
|
/man/parse_taxonomy_amptk.Rd
|
0a11518806e4f1cf2d232dccf7db033130b52173
|
[
"MIT"
] |
permissive
|
vmikk/metagMisc
|
a01151347b620745b278265700e503dc74669af5
|
310b1a40951de46348084e150d7471ed66feb0c8
|
refs/heads/master
| 2023-08-31T08:41:27.684905
| 2023-08-28T10:09:50
| 2023-08-28T10:09:50
| 76,531,351
| 38
| 12
|
MIT
| 2019-07-29T06:12:12
| 2016-12-15T06:40:05
|
R
|
UTF-8
|
R
| false
| true
| 1,606
|
rd
|
parse_taxonomy_amptk.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/parse_amptk_taxonomy_table.R
\name{parse_taxonomy_amptk}
\alias{parse_taxonomy_amptk}
\title{Parse taxonomy string from AMPtk (for single OTU)}
\usage{
parse_taxonomy_amptk(x, custom_ranks = NULL)
}
\arguments{
\item{x}{Character vector of length 1 with taxonomic annotation of single OTU/species.}
\item{custom_ranks}{Named vector with customized prefixes for taxonomic ranks}
}
\value{
Named character vector with taxonomic ranks of OTU or species.
}
\description{
Parse taxonomy string from AMPtk (for single OTU)
}
\details{
This function will split
}
\examples{
parse_taxonomy_amptk("k:Fungi,p:Zygomycota,o:Mortierellales,f:Mortierellaceae,g:Mortierella,s:Mortierella parvispora")
parse_taxonomy_amptk("k:Fungi,p:Ascomycota,g:Chalara")
# With customized ranks
parse_taxonomy_amptk(
x = "do:Eukaryota,su:Amorphea,cd:Obazoa,gr:Opisthokonta,sg:Holomycota(Nucletmycea),ki:Fungi,sk:Dikarya,ph:Ascomycota,sd:Saccharomycotina,cl:Saccharomycetes,sc:Saccharomycetidae,or:Saccharomycetales,fa:Saccharomycetaceae,ge:Saccharomyces,sp:Saccharomyces cerevisiae,st:Saccharomyces cerevisiae var boulardii",
custom_ranks = c(do = "Domain", su = "Supergroup", cd = "Clade", gr = "Group", sg = "Subgroup", ki = "Kingdom", sk = "Subkingdom", ph = "Phylum", sd = "Subdivision", cl = "Class", sc = "Subclass", or = "Order", fa = "Family", ge = "Genus", sp = "Species", st = "Strain"))
}
\references{
AMPtk: Amplicon ToolKit for NGS data (formally UFITS): https://github.com/nextgenusfs/amptk
}
\seealso{
\code{\link{parse_taxonomy_qiime}}
}
|
99d0636cd25d716efbcaf4ba79191521c7721b62
|
c2cda77b5f11d371184ab932dbea876085fdb1da
|
/R/slices.R
|
4380846ed8f878e1dc87e75e217573795fe02b50
|
[] |
no_license
|
kkholst/neurocdf
|
2b14d25d7c5546ea3971a496f26143c957af8eba
|
44578e586d7f4c493c1b489a3f69c29696b5c8e2
|
refs/heads/master
| 2021-01-23T13:30:30.527274
| 2014-11-13T12:08:37
| 2014-11-13T12:08:37
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 10,208
|
r
|
slices.R
|
##' @export
slices <- function(object,...) UseMethod("slices")
##' Plot neuroimaging slices in three planes
##'
##' Wrapper of misc3d::slices3d
##' @title Plot neuroimaging slices in three planes
##' @param object Volume
##' @param olay Overlay (optional)
##' @param rlim1 Limits of voxel-values to plot of volume
##' @param rlim2 Limits of voxel-values to plot of overlay
##' @param col1 Color of volume voxels
##' @param col2 Color of overlay voxels
##' @param center Choice of color scale. 0: color range from 0 to maximum value;
##' 1: color range minimum to maximum value;
##' 2: color range symmetric: (-1;1)*(signed maximum value);
##' or a vector with the min and max value.
##' @param rev.col2 Reverse col2
##' @param center.global color ramge from slice or volume
##' @param main Main label
##' @param scale scale
##' @param alpha transparency
##' @param cross if TRUE a cross is plotted at active voxel
##' @param layout Layout
##' @param origin origin (conversion to real-world coordinates)
##' @param voxelsize voxel-size (conversion to real-world coordinates)
##' @param start Start at voxel
##' @param var Variable in neuro netCDF file
##' @param id Image number in 'Variable'
##' @param digits number of digits to show in legend
##' @param ... Additional arguments to lower level arguments
##' @export
##' @method slices default
slices.default <- function (object, olay = NULL, rlim1 = c(-Inf, Inf),
rlim2 = NULL, col1 = gray.colors(255), col2 =
rev(rainbow(15,start=0,end=0.69)), center=1, rev.col2=FALSE, center.global=TRUE, main = "Three Planes View", scale =
0.8, alpha = 0.5, cross = TRUE, layout = c(
"clockwise","counterclockwise"), origin=c(45,63,36), voxelsize=c(2,2,2), start,
var="GlobalImage", id=1, digits=2, ...)
{
if (!require(tkrplot)) stop("tkrplot required")
if (is.character(object)) {
dd <- neurocdf:::dim.neurocdf(object)
voxelsize <- dd$voxelsize
origin <- dd$origin
if (!is.null(olay) && length(dim(olay))<3)
olay <- mkNeuro(object,olay)
object <- fetch(object,var=var,id=id,...)
}
if (!is.null(olay) & center.global) {
mi.glob <- min(olay,na.rm=TRUE)
ma.glob <- max(olay,na.rm=TRUE)
}
if (rev.col2) col2 <- rev(col2)
mkimg <- function(which) {
switch(which,
x = {
i <- 1
j <- 2
k <- 3
}, y = {
i <- 2
j <- 1
k <- 3
}, z = {
i <- 3
j <- 1
k <- 2
})
f <- function() {
opar = par(mar = c(0, 0, 0, 0))
on.exit(par(opar))
if (!(is.array(col)))
image(misc3d:::vslice(vol, which, bb[i], bb[4]), col = col,
zlim = rlim1)
else {
v <- switch(which, x = matrix(1:(d[2] * d[3]),
nrow = d[2]), y = matrix(1:(d[1] * d[3]), nrow = d[1]),
z = matrix(1:(d[1] * d[2]), nrow = d[1]))
image(v, col = misc3d:::vslice(col, which, bb[i], bb[4]))
}
lines(rep(bb[j]/d[j], 100), seq(0, 1, len = 100))
lines(seq(0, 1, len = 100), rep(bb[k]/d[k], 100))
}
tkrplot(tt, f, hscale = 0.8, vscale = 0.8)
}
mkscale <- function(i) {
f <- function(...) {
b <- as.numeric(tclvalue(bbv[[i]]))
if (b != bb[i]) {
bb[i] <<- b
if (cross || i == 4)
for (j in 1:3) tkrreplot(img[[j]])
else tkrreplot(img[[i]])
tkrreplot(infobox)
## tkconfigure(le, textvariable = bb[i])
}
}
fr <- tkframe(tt)
s <- tkscale(fr, command = f, from = 1, to = d[i], resolution = 1,
variable = bbv[[i]], showvalue = FALSE, orient = "horiz")
le <- tkentry(fr,textvariable=bbv[[i]],width=4)
l1 <- tklabel(fr, text = dn[i])
l2 <- tkbutton(fr,text="Goto",command=f)
tkgrid(l1, s, le, l2)
fr
}
move <- function(which) {
if (lay == "clockwise") {
switch(which, x = {
i <- 1
j <- 2
k <- 3
}, y = {
i <- 2
j <- 1
k <- 3
}, z = {
i <- 3
j <- 1
k <- 2
})
}
else {
switch(which, y = {
i <- 1
j <- 2
k <- 3
}, x = {
i <- 2
j <- 1
k <- 3
}, z = {
i <- 3
j <- 1
k <- 2
})
}
tkbind(img[[i]], "<Button-1>", function(x, y) {
wid <- as.integer(tkwinfo("width", img[[i]]))
hei <- as.integer(tkwinfo("height", img[[i]]))
if (lay == "clockwise" || which == "z")
bb[j] <<- round(as.numeric(x)/wid * d[j])
else bb[i] <<- round(as.numeric(x)/wid * d[i])
bb[k] <<- d[k] - round(as.numeric(y)/hei * d[k])
##bb <- round(bb)
for (j in 1:3) {
tclvalue(bbv[[j]]) <<- as.character(round(bb[j]))
tkrreplot(img[[j]])
}
tkrreplot(infobox)
})
}
overlay <- function(object, olay, rlim1, rlim2, col1, col2,
alpha) {
choose1 <- !is.na(object) & (object <= rlim1[2] & object >= rlim1[1])
object <- floor((length(col1) - 0.01) * (object - min(object,na.rm=TRUE))/
(max(object,na.rm=TRUE) - min(object,na.rm=TRUE)) + 1)
objectc <- col1[object]
objectc[!choose1] <- "white"
choose2 <- !is.na(olay) & (olay <= rlim2[2] & olay >= rlim2[1])
if (center.global) {
mi <- mi.glob; ma <- ma.glob
} else {
mi <- min(olay[choose2]); ma <- max(olay[choose2])
}
m <- max(abs(c(mi,ma)))
if (center==0) {
if (ma<0) colrg <- (olay+mi)/mi else colrg <- olay/ma
}
if (center==1) colrg <- (olay - mi)/(ma-mi)
if (center==2) {
colrg <- (olay + m)/(2*m)
}
olay <- floor((length(col2) - 0.01) * colrg + 1)
olayc <- col2[olay]
olayc[!choose2] <- "transparent"
alpha <- as.vector(ifelse(choose2, alpha, 0))
col <- t(col2rgb(objectc))*(1-alpha) + t(col2rgb(olayc))*alpha
array(rgb(col, maxColorValue = 255), dim = dim(object))
}
if (!require(tkrplot))
stop("tkrplot is required.")
if (missing(rlim1))
rlim1 <- range(object, na.rm = TRUE)
if (is.null(olay)) {
vol <- object
col <- col1
}
else {
if (!all(dim(object == olay)))
stop("two layers have to have the same dimensions")
if (missing(rlim2)) {
rlim2 <- range(olay, na.rm = TRUE)
}
choose2 <- !is.na(olay) & (olay <= rlim2[2] & olay >= rlim2[1])
if (center.global) {
mi <- mi.glob; ma <- ma.glob
} else {
mi <- min(olay[choose2]); ma <- max(olay[choose2])
}
attributes(rlim2)$min <- mi
attributes(rlim2)$max <- ma
m <- max(abs(c(mi,ma)))
if (center==2) {
attributes(rlim2)$min <- -m
attributes(rlim2)$max <- m
}
if (center==0) {
if (ma>0) {
attributes(rlim2)$min <- 0
attributes(rlim2)$max <- ma
} else {
attributes(rlim2)$min <- mi
attributes(rlim2)$max <- 0
}
}
col <- overlay(object, olay, rlim1, rlim2, col1, col2,
alpha)
vol <- array(0, dim = dim(object))
}
lay <- match.arg(layout)
layout <- switch(lay, counterclockwise = c(2, 1, 3,4), clockwise = c(1,
2, 3 ,4))
direct <- c("x", "y", "z")
d <- dim(vol)
dn <- c(direct, "t")
tt <- tktoplevel()
tktitle(tt) <- main
if (missing(start)) {
bb <- c(round(d[1:3]/2), 1)
} else bb <- start
bbv <- lapply(bb, tclVar)
s <- lapply(layout[1:3], mkscale)
suppressWarnings(img <- lapply(direct[layout[1:3]], mkimg))
tkgrid(img[[1]], img[[2]])
tkgrid(s[[1]], s[[2]])
info <- function() {
op <- par(mar=c(0,0,0,0))
on.exit(par(op))
bb <- round(bb)
mm <- vxmm(rbind(bb[1:3]))
y1 <- object[rbind(bb[1:3])]
y2 <- NA
if (!is.null(olay))
y2 <- olay[rbind(bb[1:3])]
plot(0,0,type="n",xlab="",ylab="",axes=FALSE)
mytext <- paste("vx = (",paste(bb[1:3],collapse=","),")",sep="")
text(0.1,0.7,mytext,pos=NULL)
mytext <- paste("mm = (",paste(mm[1:3],collapse=","),")",sep="")
text(0.1,0.5,mytext,pos=NULL)
text(0.1,0.3,paste("Volume =", y1))
if (!is.na(y2)) {
text(0.1,0.1,paste("Overlay =", formatC(y2)))
## mi <- min(olay,na.rm=TRUE); ma <- max(olay,na.rm=TRUE)
## colrg <- (olay - mi)/(ma-mi)
nlut <- length(col2)
delta <- 0.8
scale <- (2*delta)/nlut
M <- max(abs(rlim2))
dM <- (attributes(rlim2)$max-attributes(rlim2)$min)/nlut
for (i in seq_len(nlut+1)-1) {
xx = -delta + (i-1)*scale
if (i>0)
rect(xx,-0.3,xx+scale,-0.4, col=col2[i], border=NA)
rnd <- 10^digits
rund <- round((attributes(rlim2)$min+dM*i)*rnd)/rnd
if (i%%round(nlut/10)==1 || i==nlut) {
text(xx+(1+0.5)*scale,-0.2,rund,cex=0.7,srt=45)
}
}
}
}
suppressWarnings(infobox <- tkrplot(tt,info,hscale=0.8,vscale=0.8))
tkgrid(img[[3]],infobox)
if (length(d) == 4 && d[4] > 1)
tkgrid(s[[3]], mkscale(4))
else tkgrid(s[[3]])
lapply(direct[layout[1:3]], move)
invisible(environment())
}
|
f26da85556bfbcc2ab24e85f1d78c62b7e636b3e
|
c8e592289288db7d3de88d2b05a1fe48439b7386
|
/src/03-eda-abovegroud_biomass.R
|
e417412955e38203dd8f24aefe800eab0ecebe59
|
[] |
no_license
|
garlandxie/roots_greenroofs
|
0845f61ecf18f687f765f2ab6c73ececd14b41a7
|
c60dd74fc52931b6cfd1cab39a28783782e98ac1
|
refs/heads/master
| 2020-03-22T15:57:26.677634
| 2019-06-10T00:36:44
| 2019-06-10T00:36:44
| 140,291,996
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,990
|
r
|
03-eda-abovegroud_biomass.R
|
# libraries ----
library(here)
library(tidyverse)
library(visdat)
# import ----
abov_bm_df <- readRDS(here("data/project_data/working",
"above_biomass_clean.rds"))
# check packaging ----
glimpse(abov_bm_df)
head(abov_bm_df, n = 5)
tail(abov_bm_df, n = 5)
# data validation ----
# visualize missing data ----
# missing values in VAMA + EMNI - the plants died prior to biomass harvest
vis_dat(abov_bm_df)
vis_miss(abov_bm_df)
# check your n's ----
# how many unique block ID's ?
abov_bm_df %>%
pull(block) %>%
n_distinct()
# how many unique species (incl. soil)?
abov_bm_df %>%
pull(species) %>%
n_distinct()
# ???
d <- abov_bm_df %>%
group_by(block, species, treatment) %>%
summarize(n = n())
# how many modules per treatment per block?
abov_bm_df %>%
group_by(block, treatment) %>%
summarize(n = n())
# print out species code
abov_bm_df %>%
pull(species) %>%
unique %>%
print
# plot ----
abov_bm_df %>%
filter(species != "SC") %>%
ggplot(aes(x = species, y = above_dry_g)) +
geom_boxplot() +
coord_flip() +
facet_wrap(~treatment) +
labs(x = "Species Code",
y = "Aboveground Dry Biomass (g)")
abov_bm_df %>%
filter(species != "SC") %>%
ggplot(aes(x = species, y = above_dry_g)) +
geom_boxplot() +
coord_flip() +
facet_wrap(~block, ncol = 5) +
labs(x = "Species Code",
y = "Aboveground Dry Biomass (g)")
abov_bm_df %>%
filter(species != "SC") %>%
ggplot(aes(x = species, y = above_dry_g)) +
geom_boxplot() +
coord_flip() +
facet_wrap(~treatment) +
labs(x = "Species Code",
y = "Aboveground Dry Biomass (g)")
# analysis of variance ----
# NOTE: this is NOT confirmatory data analysis
# p-value < 0.001: statistically clear that there is interspecific variation
anova(lm(formula = above_dry_g ~ species, data= abov_bm_df))
# high p-value: unclear that exp treatment influence aboveground biomass
anova(lm(formula = above_dry_g ~ treatment, data= abov_bm_df))
|
914f711b0412f645e81318cc65ee30a578b78ad3
|
7999c9b45958805c0bbd80f2c5903e55428d33bc
|
/code/ssgsea.analysis.R
|
006634af0b720ac7937d00507d6fb0c560b6ce44
|
[] |
no_license
|
dreamfishes/CancerToCellLine
|
bac53cf45a8e7a365a8390d872199e0c41c50f52
|
b44285c9f907b54ad64f8330a500b17f0639c081
|
refs/heads/main
| 2023-06-17T14:29:11.651886
| 2021-07-11T18:56:39
| 2021-07-11T18:56:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,440
|
r
|
ssgsea.analysis.R
|
### R code to perform ssGSEA analysis ###
require(GSVA)
require(GSA)
require(org.Hs.eg.db)
require(dplyr)
require(stringr)
require(foreach)
load('server-side/RData/CCLE.RData')
load('server-side/RData/MET500.RData')
load('server-side/RData/TCGA.breast.cancer.RData')
load('client-side/output/CCLE.breast.cancer.cell.line.meta.R.output/CCLE.breast.cancer.cell.line.meta.RData')
load('client-side/output/MET500.breast.cancer.meta.R.output/MET500.breast.cancer.meta.RData')
load('client-side/output/organize.breast.cancer.organoid.data.R.output/organoid.RData')
## Run ssGSEA
common.genes <- intersect(rownames(MET500.log2.fpkm.matrix),rownames(CCLE.log2.rpkm.matrix))
#common.genes <- intersect(common.genes,rownames(TCGA.breast.cancer.log2.fpkm.matrix)) # good bye, TCGA
common.genes <- intersect(common.genes,rownames(organoid.log2.rpkm.matrix))
combined.expr.matrix <- cbind(MET500.log2.fpkm.matrix[common.genes,MET500.breast.cancer.polyA.sample],
CCLE.log2.rpkm.matrix[common.genes,CCLE.breast.cancer.cell.line],
#TCGA.breast.cancer.log2.fpkm.matrix[common.genes,], # good bye, TCGA
organoid.log2.rpkm.matrix[common.genes,]
)
ensemble.to.entrez.mapping <- revmap(org.Hs.egENSEMBL) %>% as.list
common.genes <- common.genes[common.genes %in% names(ensemble.to.entrez.mapping)]
gene.id.list <- ensemble.to.entrez.mapping[common.genes]
l <- sapply(gene.id.list,length)
common.genes <- common.genes[l == 1]
combined.expr.matrix <- combined.expr.matrix[common.genes,]
rownames(combined.expr.matrix) <- ensemble.to.entrez.mapping[common.genes]
msigdb <- GSA.read.gmt("client-side/meta.data/c6.all.v6.1.entrez.gmt")
genesets <- msigdb$genesets
names(genesets) <- msigdb$geneset.names
oncogenic.geneset.gsea.results <- gsva(combined.expr.matrix, genesets, method = 'ssgsea') #ggsea
msigdb <- GSA.read.gmt("client-side/meta.data/h.all.v6.1.entrez.gmt")
genesets <- msigdb$genesets
names(genesets) <- msigdb$geneset.names
hallmark.geneset.gsea.results <- gsva(combined.expr.matrix , genesets, method = 'ssgsea') #ggsea
# Compare between MET500 and cell line, MET500 and organoid
MET500.breast.cancer.polyA.non.Basal.sample <- c(MET500.breast.cancer.polyA.LumB.sample,MET500.breast.cancer.polyA.LumA.sample,MET500.breast.cancer.polyA.Her2.sample)
MET500.breast.cancer.polyA.Basal.sample <- c(MET500.breast.cancer.polyA.Basal.sample)
CCLE.non.Basal.cell.line <- c(CCLE.breast.cancer.Her2.cell.line,CCLE.breast.cancer.LumA.cell.line,CCLE.breast.cancer.LumB.cell.line)
non.Basal.organoid <- c(LumA.organoid,LumB.organoid,Her2.organoid)
# well, it is called aov.p.value, but actually I used wilcoxon rank test to get the p-value. Pay attention!
non.basal.gsea.results <- hallmark.geneset.gsea.results[,c(MET500.breast.cancer.polyA.non.Basal.sample,non.Basal.organoid,CCLE.non.Basal.cell.line)]
non.basal.aov.p.value <- foreach(i = 1:nrow(non.basal.gsea.results),.combine='rbind') %do% {
df1 <- data.frame(gsea.score =hallmark.geneset.gsea.results[i,MET500.breast.cancer.polyA.non.Basal.sample],
data.source = 'MET500'
)
df2 <- data.frame(gsea.score =hallmark.geneset.gsea.results[i,non.Basal.organoid],
data.source = 'organoid'
)
df3 <- data.frame(gsea.score =hallmark.geneset.gsea.results[i,CCLE.non.Basal.cell.line],
data.source = 'cell.line'
)
c( wilcox.test(df1$gsea.score,df2$gsea.score)$p.value, wilcox.test(df1$gsea.score,df3$gsea.score)$p.value)
# well, to do DA analysis, let us stick on wilcoxon rank test
# df.o <- rbind(df1,df2)
# df.o$data.source <- factor(df.o$data.source,level=c('organoid','MET500'))
# df.c <- rbind(df1,df3)
# df.c$data.source <- factor(df.c$data.source,level=c('cell.line','MET500'))
#
# res.aov.o <- aov(gsea.score ~ data.source, data = df.o)
# res.aov.c <- aov(gsea.score ~ data.source, data = df.c)
#
# c(summary(res.aov.o)[[1]][["Pr(>F)"]][1],summary(res.aov.c)[[1]][["Pr(>F)"]][1])
}
non.basal.aov.fdr <- cbind(-1 * log10(p.adjust(non.basal.aov.p.value[,1],method='fdr')),
-1 * log10(p.adjust(non.basal.aov.p.value[,2],method='fdr'))
)
rownames(non.basal.aov.fdr) <- rownames(non.basal.gsea.results)
colnames(non.basal.aov.fdr) <- c('organoid','cell.line')
basal.gsea.results <- hallmark.geneset.gsea.results[,c(MET500.breast.cancer.polyA.Basal.sample,Basal.organoid,CCLE.breast.cancer.Basal.cell.line)]
basal.aov.p.value <- foreach(i = 1:nrow(basal.gsea.results),.combine='rbind') %do% {
df1 <- data.frame(gsea.score =hallmark.geneset.gsea.results[i,MET500.breast.cancer.polyA.Basal.sample],
data.source = 'MET500'
)
df2 <- data.frame(gsea.score =hallmark.geneset.gsea.results[i,Basal.organoid],
data.source = 'organoid'
)
df3 <- data.frame(gsea.score =hallmark.geneset.gsea.results[i,CCLE.breast.cancer.Basal.cell.line],
data.source = 'cell.line'
)
c( wilcox.test(df1$gsea.score,df2$gsea.score)$p.value, wilcox.test(df1$gsea.score,df3$gsea.score)$p.value)
# df.o <- rbind(df1,df2)
# df.o$data.source <- factor(df.o$data.source,level=c('organoid','MET500'))
# df.c <- rbind(df1,df3)
# df.c$data.source <- factor(df.c$data.source,level=c('cell.line','MET500'))
#
# res.aov.o <- aov(gsea.score ~ data.source, data = df.o)
# res.aov.c <- aov(gsea.score ~ data.source, data = df.c)
#
# c(summary(res.aov.o)[[1]][["Pr(>F)"]][1],summary(res.aov.c)[[1]][["Pr(>F)"]][1])
}
basal.aov.fdr <- cbind(-1 * log10(p.adjust(basal.aov.p.value[,1],method='fdr')),
-1 * log10(p.adjust(basal.aov.p.value[,2],method='fdr'))
)
rownames(basal.aov.fdr) <- rownames(basal.gsea.results)
colnames(basal.aov.fdr) <- c('organoid','cell.line')
save(file='client-side/output/ssgsea.analysis.R.output/ssgsea.analysis.RData',list=c('oncogenic.geneset.gsea.results','hallmark.geneset.gsea.results','non.basal.gsea.results','basal.gsea.results','basal.aov.fdr','non.basal.aov.fdr'))
|
3991cb44379c19483c259ee3fb4e4cc5d156b044
|
eb74dde34a3b6b9f337e33a033ca27a119034245
|
/R/Loan_amortization.R
|
c4ae34f6ec23021b9e6cc12b72e72102e9961b3d
|
[] |
no_license
|
cran/DetLifeInsurance
|
3c9a1632fb0ddd78ae9c7dcc4a633ef429e281d8
|
d0b5d3a696c5bc72ce0692d6cf7d4e9921336cfc
|
refs/heads/master
| 2022-12-17T07:44:43.324629
| 2020-09-12T08:20:07
| 2020-09-12T08:20:07
| 278,226,350
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,346
|
r
|
Loan_amortization.R
|
#' @title Loan Amortization
#' @description Calculates the amortization schedule.
#' @param V0 A numeric type value. Loan value.
#' @param n A numeric type value. The number of payments.
#' @param i A numeric type value or a vector of them. The interest rate of the loan.
#' @param i2 A numeric type value. The interest rate of the saving account.
#' @param alic A numeric type value. Interest tax rate.
#' @param ins A numeric type value. The rate of V0 to be paid in each period.
#' @param method A string. Amortization method used ("constant_installment","interest_only", "constant_principal", "interest_only_wsavings_account" or "constant_installment_varintrate" ).
#' @return Returns a data.frame object containing Period, Payment, Pure Payment, Intrest, Amortization, Insurance, TAX and Outstanding debt.
#' @export
#' @keywords Loan Amortization
#' @return NULL
#' @examples
#' Loan_amortization(1000,12,0.04,0,0.21,0.01,"constant_installment")
#' Loan_amortization(12000,15,0.04,0,0.21,0.01,"interest_only")
#' Loan_amortization(13000,10,0.04,0,0.21,0.01,"constant_principal")
#' Loan_amortization(15000,20,0.04,0.05,0.21,0.01,"interest_only_wsavings_account")
#' Loan_amortization(5000,5,0.04,0,0.21,0.01,"constant_installment_varintrate")
#'
#'
Loan_amortization<-function(V0,n,i,i2=0,alic=0,ins=0,method){
dig<-getOption("digits")
on.exit(options(digits = dig))
options(digits = 15)
if(V0>0 && n>0 && is_integer(n)==1 && i>=0 && i2>=0 && alic>=0 && ins>=0){
if(method=="constant_installment"){
c<-V0/af(1,n,i)
Period<-c(0,1:n)
Prev_outstanding_debt<-V0
Int<-c(0)
Amort<-c(0)
Outstanding_debt<-c(V0)
TAX<-c(0)
Pure_Payment<-c(0,rep(c,n))
Payment<-c(0)
Insurance<-c(0)
for(j in 1:n){
I<-Prev_outstanding_debt*i
tax<-I*alic
Int<-c(Int,I)
TAX<-c(TAX,tax)
insurance<-Prev_outstanding_debt*ins
Insurance<-c(Insurance,insurance)
amort<-c-I
Amort<-c(Amort,amort)
Prev_outstanding_debt<-Prev_outstanding_debt-amort
Outstanding_debt<-c(Outstanding_debt,Prev_outstanding_debt)
cc<-c+tax+insurance
Payment<-c(Payment,cc)
}
loan.info<-data.frame(Period,Payment,Pure_Payment,Int,Amort,TAX,Insurance,Outstanding_debt)
return(loan.info)
}else if(method=="interest_only"){
Period<-c(0,1:n)
int<-V0*i
tax<-int*alic
TAX<-c(0,rep(tax,n))
Int<-c(0,rep(int,n))
insurance<-V0*ins
Insurance<-c(0,rep(insurance,n))
Payment<-c(0,rep(int+tax+insurance,(n-1)),(int+tax+insurance+V0))
Amort<-c(rep(0,n),V0)
Outstanding_debt<-c(rep(V0,n),(V0-(Payment[n+1]-(int+tax+insurance))))
loan.info<-data.frame(Period,Payment,Int,TAX,Amort,Insurance,Outstanding_debt)
return(loan.info)
}else if(method=="constant_principal"){
amort<-V0/n
Period<-c(0,1:n)
Amort<-c(0,rep(amort,n))
Payment<-c(0)
Pure_Payment<-c(0)
Int<-c(0)
Outstanding_debt<-c(V0)
Prev_outstanding_debt<-V0
TAX<-c(0)
Insurance<-c(0)
for(j in 1:n){
cc<-amort+(V0-amort*(j-1))*i
int<-(V0-amort*(j-1))*i
tax<-int*alic
TAX<-c(TAX,tax)
Int<-c(Int,int)
insurance<-Prev_outstanding_debt*ins
Insurance<-c(Insurance,insurance)
Prev_outstanding_debt<-Prev_outstanding_debt-amort
Outstanding_debt<-c(Outstanding_debt,Prev_outstanding_debt)
payment=cc+tax+insurance
Payment<-c(Payment,payment)
Pure_Payment<-c(Pure_Payment,cc)
}
loan.info<-data.frame(Period,Payment,Pure_Payment,Int,Amort,TAX,Insurance,Outstanding_debt)
return(loan.info)
}else if(method=="interest_only_wsavings_account"){
cah<-V0/sf(0,n,i2)
Period<-c(0,1:n)
int<-V0*i
tax<-int*alic
TAX<-c(0,rep(tax,n))
Int<-c(0,rep(int,n))
insurance<-V0*ins
Insurance<-c(0,rep(insurance,n))
Payment<-c(0,rep(int+tax+insurance,n))
Amort<-c(rep(0,n),V0)
Outstanding_debt<-rep(V0,n)
Real_outstanding_debt<-c(0)
Outstanding_debt_with_savings <-(0)
for(j in 1:20){
Savings<-cah*sf(0,j,i2)
real<-Outstanding_debt[j]-Savings
Real_outstanding_debt<-c(Real_outstanding_debt,real)
Outstanding_debt_with_savings <-c(Outstanding_debt_with_savings,Savings)
}
Outstanding_debt<-c(Outstanding_debt,(V0-Outstanding_debt_with_savings [n+1]-(int+tax+insurance))+Payment[n+1])
loan.info<-data.frame(Period,Payment,Int,TAX,Amort,Insurance,Outstanding_debt,Real_outstanding_debt,Outstanding_debt_with_savings)
return(loan.info)
}else if(method=="constant_installment_varintrate"){
irate<-c(i,rep(i[length(i)],(n-length(i))))
Period<-c(0,1:n)
Prev_outstanding_debt<-V0
c=V0*((1+i[1])^(n)*(irate[1]))/((1+i[1])^(n)-1)
Pure_Payment<-c(0,c)
int<-Prev_outstanding_debt*irate[1]
Int<-c(0,int)
tax<-int*alic
TAX<-c(0,tax)
amort<-c-int
Amort<-c(0,amort)
insurance<-Prev_outstanding_debt*ins
Prev_outstanding_debt<-V0-amort
Outstanding_debt<-c(V0,Prev_outstanding_debt)
Insurance<-c(0,insurance)
Rate<-c(0,i)
Payment<-c(0,c+tax+insurance)
for(j in 2:n){
if(irate[j]==irate[j-1]){
c=c
}else{
c=Prev_outstanding_debt*((1+irate[j])^(n-(j-1))*(irate[j]))/((1+irate[j])^(n-(j-1))-1)
}
Pure_Payment<-c(Pure_Payment,c)
I<-Prev_outstanding_debt*(irate[j])
tax<-I*alic
Int<-c(Int,I)
insurance<-Prev_outstanding_debt*ins
Insurance<-c(Insurance,insurance)
cc<-c+tax+insurance
Payment<-c(Payment,cc)
TAX<-c(TAX,tax)
amort<-c-I
Amort<-c(Amort,amort)
Prev_outstanding_debt<-Prev_outstanding_debt-amort
Outstanding_debt<-c(Outstanding_debt,Prev_outstanding_debt)
}
loan.info<-data.frame(Period,Rate,Payment,Pure_Payment,Int,Amort,Insurance,TAX,Outstanding_debt)
return(loan.info)
} else{
stop("Check method")
}
}else{
stop("Check values")
}
}
|
248c964aece4a379bec9365e6e0ac8a5bba630c8
|
9b4fe46e6cc9a16669ef721e44d5d995b5fdb885
|
/helpers/find-coords.R
|
08db522a1c8a9a02ed3fcfa70c15f5aa2961178d
|
[
"MIT"
] |
permissive
|
edridgedsouza/photogenic-finder
|
ca3f13f730c769dc6ed7d136f2b7231756aa0b83
|
4e4bd9502fe27f111bb17f6e8948bae542e7676d
|
refs/heads/master
| 2021-01-12T00:48:08.301810
| 2017-01-08T00:30:34
| 2017-01-08T00:30:34
| 78,295,869
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 765
|
r
|
find-coords.R
|
library(dplyr)
library(httr)
filename <- "./GOOGLE_TOKEN.txt"
apikey <- readChar(filename, file.info(filename)$size)
# Use Google Places API to find the coordinates given a place's name
getCoords <- function(placeName) {
params <- list(key = apikey, query = placeName)
r <-
GET("https://maps.googleapis.com/maps/api/place/textsearch/json",
query = params)
parsed <- content(r, "parsed")
if (parsed$status != "ZERO_RESULTS") {
data <- parsed$data
lat <- parsed$results[[1]]$geometry$location$lat
long <- parsed$results[[1]]$geometry$location$lng
coords <- data.frame(latitude = lat, longitude = long)
return(coords)
}
else {
return(FALSE)
}
}
# Then finally use these coordinates to plug into makePlot()
|
9d525b756353a81f744dff9d592acee24edc681e
|
f89c621ea18f70f6ac2462eae1c43122b9a26d28
|
/drill/data_types_matrices.R
|
6258a6434358cc43aa43405d16804aaf0043e13c
|
[] |
no_license
|
smstaneva/datasciencecoursera
|
5c828ac4467ffa76286eafd0709edca2d842ea52
|
5a74432696ee41e543f9e04faa59b9b0503d44ef
|
refs/heads/master
| 2021-09-11T19:32:07.391859
| 2018-04-11T14:39:46
| 2018-04-11T14:39:46
| 75,734,746
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 176
|
r
|
data_types_matrices.R
|
m <- matrix(nrow = 2, ncol = 3)
m
dim(m)
attributes(m)
m <- matrix(1:6, nrow = 2, ncol = 3)
m
m <- 1:10
m
dim(m) <- c(2, 5)
m
x <- 1:3
y <- 10:12
cbind(x, y)
rbind(x, y)
|
006806fca72400ff79ad8d260c6fe7462454b8ba
|
e82166f722db2c1d2ba531a4d40fa700f9e46339
|
/raw_data/source_data/SR_sample_and_resample.R
|
14ab5b269e7b055ae995883d9d3a577c863dbd04
|
[] |
no_license
|
amscheel/positive_result_rates
|
c730f9540cc57e280d7b2475c0e5ab7e27d20abc
|
442ef4c78c921ffa8f5b3ec1d0f2c55a852467be
|
refs/heads/master
| 2022-04-29T00:03:34.439599
| 2022-03-10T08:24:42
| 2022-03-10T08:24:42
| 188,072,964
| 2
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 566
|
r
|
SR_sample_and_resample.R
|
# Load all 1919 search results
allSRs <- read.csv("SR_search_results_WoS_20190107.csv")
# Draw a random sample of 150 papers and put them into a new dataframe
set.seed(20190120)
x <- sample(c(1:1919), 150)
SRsample <- allSRs[x, ]
# Sample 8 additional papers and put them into a new dataframe
set.seed(20190120)
replacement <- sample(c(1:1919), 158)[151:158]
SRreplacement <- allSRs[replacement, ]
# Sample 1 additional paper and put it into a new dataframe
set.seed(20190120)
replacement2 <- sample(c(1:1919), 159)[159]
SRreplacement2 <- allSRs[replacement2, ]
|
1f2626bcef3a16a1939bca68f986c456484449de
|
74c89c59d46180835543e00818181b47a0c7e423
|
/man/Generate_Monte_Carlo_Inputs.Rd
|
87cd3ce6258a0de824137288d184787288f2d36f
|
[
"CC0-1.0"
] |
permissive
|
JerryHMartin/MonteCarloInputs
|
a22e8ec81cae5a8f7f419aa2d249b5020080b623
|
384850c708d709317b8d69bc94a62f5a38222ef5
|
refs/heads/master
| 2022-12-02T02:10:34.186125
| 2020-07-30T02:41:23
| 2020-07-30T02:41:23
| 283,578,698
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,276
|
rd
|
Generate_Monte_Carlo_Inputs.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Generate_Monte_Carlo_Inputs.R
\name{Generate_Monte_Carlo_Inputs}
\alias{Generate_Monte_Carlo_Inputs}
\title{Generate Monte Carlo Inputs}
\usage{
Generate_Monte_Carlo_Inputs(
base_values,
deviations,
n,
distribution = "normal"
)
}
\arguments{
\item{base_values}{dataframe which holds Monte Carlo base values}
\item{deviations}{dataframe containing deviations analysis}
\item{n}{number of iterations for analysis}
\item{distribution}{either "normal or "uniform"}
}
\value{
a list of dataframes with the purturbed values with the same
structure as base_value
}
\description{
Generates inputs for a Monte Carlo analysis.
}
\details{
The inputs for a Monte Carlo Analysis are perturbed around a base value.
This command accepts a base_value and a deviations dataframe then returns
a list of dataframes with elements perturbed according to deviations.
For a normal distribution the deviation is the standard deviation.
For a uniform distribution the deviation is the absolute deviation.
}
\examples{
base_values = list(
speed = c(10, 20),
position = c(30, 40)
)
deviations = list(
speed = c(0.5, 50),
position = 2
)
Generate_Monte_Carlo_Inputs(base_values, deviations, 2)
}
|
2dd1c748136d355cf8d73e8065f368f6595e3e22
|
17b5cfdd983bcfb6f2426e4eebdf27477e34dce9
|
/run_analysis.R
|
8b3882077d39ef7742c85b8acceb85c2648d45ae
|
[] |
no_license
|
alexthomas93/Getting-and-Cleaning-Data-Course-Project
|
f47933a84a95ec420dcaf12572593d64dec4adbe
|
1a0d3d89639bed6eedecb4f9417ea2d36cceb304
|
refs/heads/master
| 2020-07-28T22:36:47.332272
| 2019-09-23T10:30:34
| 2019-09-23T10:30:34
| 209,563,679
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,487
|
r
|
run_analysis.R
|
# 1. Merge the training and the test sets to create one data set
# Load the feature names
features <- read.table("features.txt")$V2
# Load the training features using the feature names as the column names
X_train <- read.table("train/X_train.txt", col.names = features)
# Load the activity column of the training data
y_train <- read.table("train/y_train.txt", col.names = "activity")
# Load the subject column of the training data
subject_train <- read.table("train/subject_train.txt", col.names = "subject")
# Merge the training data into one dataframe
train <- cbind(X_train, y_train, subject_train)
# Load the test features using the feature names as the column names
X_test <- read.table("test/X_test.txt", col.names = features)
# Load the activity column of the test data
y_test <- read.table("test/y_test.txt", col.names = "activity")
# Load the subject column of the test data
subject_test <- read.table("test/subject_test.txt", col.names = "subject")
# Merge the test data into one dataframe
test <- cbind(X_test, y_test, subject_test)
# Merge the train and test datasets
data <- rbind(train, test)
# 2. Extract only the measurements on the mean and standard deviation for each measurement
cols <- make.names(c(grep(pattern = "mean\\(\\)|std\\(\\)", features, value = TRUE), "activity", "subject"))
data <- data[cols]
# 3. Use descriptive activity names to name the activities in the data set
# Load the table that maps the activity number e.g. 1 to the activity name e.g. WALKING
activity_labels <- gsub("_", " ", tolower(read.table("activity_labels.txt")$V2))
# Replace the activity numbers in the data with activity names
data$activity <- factor(activity_labels[data$activity], ordered = FALSE)
# 4. Label the data set with descriptive variable names
cnames <- colnames(data)
# Remove dots from the column names
cnames <- gsub("\\.", "", cnames)
# Capitalise 'mean' and 'std' in column names
cnames <- sub("mean", "Mean", cnames)
cnames <- sub("std", "Std", cnames)
# Replace 't' and 'f' with 'time' and 'freq' at the beginning of column names
cnames <- sub("^t", "time", cnames)
cnames <- sub("^f", "freq", cnames)
colnames(data) <- cnames
# 5. Create a second, independent tidy data set with the average of each variable for each activity and each subject
tidy <- aggregate(. ~ activity + subject, data = data, drop = TRUE, FUN = mean, simplify = TRUE)
write.table(tidy, "tidy.txt", row.names = FALSE)
|
a6a793a67d8734a80997d4d9951ed5f8b5b7388e
|
ba31fbf49621850f74b663290b5a72c7f0373c8f
|
/script/kmer_analysis/analysis.R
|
998fdeaac5f9d04211f13251d115cc71b7ebaec2
|
[] |
no_license
|
bschiffthaler/aspen-v2
|
fa650976a6cd85ae3bede42f35b6590fc858f9c0
|
081fcb255e580f1315568a4ee1580c9256888561
|
refs/heads/master
| 2021-12-23T01:18:53.257473
| 2021-09-19T08:15:13
| 2021-09-19T08:15:13
| 214,961,462
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,607
|
r
|
analysis.R
|
setwd("/mnt/picea/projects/aspseq/nstreet/kmer_ml/sex/selected_kmers/")
library(tidyverse)
library(magrittr)
library(ranger)
library(caret)
library(GenomicAlignments)
library(see)
library(UpSetR)
counts <- scan("count_data.txt", "")
ss1 <- read_delim("../meta/sex_set_metadata.tsv", "\t")
ss2 <- read_delim("../meta/scotasp.txt", "\t")
ss1 %<>% mutate(Sex = na_if(Sex, "excluded"))
unique(ss1$Sex)
unique(ss2$PCR_Sex)
ss1$SampleSet <- "SwAsp_UmAsp"
ss2 %<>% select(NGI_ID, PCR_Sex)
colnames(ss2) <- c("Sample", "Sex")
ss2$SampleSet <- "ScotAsp"
meta <- bind_rows(ss1, ss2)
#
meta$ShortName <- sapply(str_split(meta$Sample, "_"), function(x) {
str_replace(paste(x[1], x[2], x[3], sep = "_"), "(_1.jf)|(_NA)", "")
})
assertthat::assert_that(! any(duplicated(meta$ShortName)))
meta$CountFile <- unlist(sapply(meta$ShortName, function(x) {
res <- counts[which(grepl(x, counts, fixed = TRUE))]
if (length(res) == 0) {
return(NA)
} else {
return(res)
}
}))
meta_f <- filter(meta, !is.na(Sex) & !is.na(CountFile))
meta_f$Sex <- as.factor(meta_f$Sex)
meta_f$Coverage <- str_replace(meta_f$CountFile, "\\.count", ".cov")
# Read all the data into a vector
count_matrix <- scan(
pipe(
paste("cat", paste(meta_f$CountFile, collapse = " ")
)
)
)
# Cast to matrix
count_matrix <- matrix(count_matrix, nrow = nrow(meta_f), byrow = TRUE)
coverage <- scan(
pipe(
paste("cat", paste(meta_f$Coverage, collapse = " ")
)
)
)
size_factor <- coverage / max(coverage)
count_matrix_norm <- sweep(count_matrix, 1, size_factor, "/")
# Need names for ranger()
kmers <- scan("top_k.txt", "")
colnames(count_matrix_norm) <- kmers
rownames(count_matrix_norm) <- meta_f$ShortName
train_x <- count_matrix_norm[which(meta_f$SampleSet == "SwAsp_UmAsp"), ]
test_x <- count_matrix_norm[which(meta_f$SampleSet != "SwAsp_UmAsp"), ]
train_y <- meta_f$Sex[which(meta_f$SampleSet == "SwAsp_UmAsp")]
test_y <- meta_f$Sex[which(meta_f$SampleSet != "SwAsp_UmAsp")]
head(train_x[, 1:6])
model <- ranger(x = train_x, y = train_y, num.trees = 50000,
importance = "permutation", verbose = TRUE,
oob.error = TRUE, classification = TRUE)
save(model, file = "model.RData")
preds <- predict(model, data = test_x, predict.all = TRUE)
weights <- rowSums(preds$predictions == 1) / 50000
outcomes <- ifelse(weights > 0.5, 0, 1)
confusionMatrix(data = preds$predictions, reference = test_y)
rox <- pROC::roc(response = as.numeric(factor(test_y)),
predictor = weights)
plot(rox)
tibble(Specificity = rox$specificities, Sensitivity = rox$sensitivities) %>%
ggplot(aes(x = Specificity, y = Sensitivity)) +
geom_path()+
scale_x_reverse() +
geom_abline(slope = 1, intercept = 1, lty = 2, col = "red") +
theme_bw()
ggsave("~/roc.pdf", height = 9, width = 9, dpi = 300)
model2 <- ranger(x = train_x, y = train_y, num.trees = 50000,
importance = "impurity_corrected", verbose = TRUE,
oob.error = TRUE, classification = TRUE)
save(model2, file = "model2.RData")
imp_pvals <- importance_pvalues(model2)
imp_pvals[imp_pvals[, 2] < 0.05, ]
hist(imp_pvals[, 2])
# alignments
potra_v2 <- readGAlignments("../alignments/Potra_v2_sorted.bam", use.names = TRUE)
mcols(potra_v2)$importance <- model$variable.importance[names(potra_v2)]
w52 <- readGAlignments("../alignments/W52_sorted.bam", use.names = TRUE)
mcols(w52)$importance <- model$variable.importance[names(w52)]
# Calculate various sets
miss_potra <- setdiff(rownames(imp_pvals), names(potra_v2))
miss_w52 <- setdiff(rownames(imp_pvals), names(w52))
aln_potra <- intersect(rownames(imp_pvals), names(potra_v2))
aln_w52 <- intersect(rownames(imp_pvals), names(w52))
miss_both <- intersect(miss_w52, miss_potra)
miss_w52_only <- setdiff(miss_w52, miss_potra)
miss_potra_only <- setdiff(miss_potra, miss_w52)
aln_both <- intersect(aln_potra, aln_w52)
aln_w52_only <- setdiff(aln_w52, aln_potra)
aln_potra_only <- setdiff(aln_potra, aln_w52)
upset(data = fromList(list("Aligned to Male" = names(w52), "Aligned to Female" = names(potra_v2), "Kmer superset" = rownames(imp_pvals))))
as_tibble(colSums(count_matrix_norm[, miss_both] > 0)) %>%
ggplot(aes(x = value)) +
geom_histogram(binwidth = 10)
ggsave("~/kmer_hist_missing.pdf", width = 16, height = 9)
list(
tibble(Pvalue = imp_pvals[miss_potra, 2], Kmer = miss_potra, Genome = "asp201", Type = "Missing"),
tibble(Pvalue = imp_pvals[miss_w52, 2], Kmer = miss_w52, Genome = "W52", Type = "Missing"),
tibble(Pvalue = imp_pvals[aln_potra, 2], Kmer = aln_potra, Genome = "asp201", Type = "Aligned"),
tibble(Pvalue = imp_pvals[aln_w52, 2], Kmer = aln_w52, Genome = "W52", Type = "Aligned")
) %>% bind_rows() -> imp_alns
filter(imp_alns, Pvalue < 0.1) %>%
ggplot(aes(x = Genome, y = -log10(Pvalue + .Machine$double.eps), fill = Type)) +
geom_violin() +
scale_fill_material()
imp_alns %>% filter(Pvalue < 0.1) %>% group_by(Genome, Type) %>%
summarise(N = length(Pvalue), Min = min(Pvalue), Max = max(Pvalue),
Mean = mean(Pvalue), Median = median(Pvalue))
## Manhattan plot for Potra
potra_v2_chr <- potra_v2[str_detect(seqnames(potra_v2), "^chr")]
w52_chr <- w52[str_detect(seqnames(w52), "^chr")]
preds2 <- predict(model, data = test_x)
baseline <- confusionMatrix(data = preds2$predictions, reference = test_y)
baseline
#######
## Anything below here was experimental and not included
#######
model3 <- ranger(x = train_x[, ! colnames(train_x) %in% chr1kmers],
y = train_y, num.trees = 50000,
verbose = TRUE, oob.error = TRUE, classification = TRUE)
save(model3, file = "model3.RData")
preds3 <- predict(model3, data = test_x)
no_chr_1 <- confusionMatrix(data = preds3$predictions, reference = test_y)
model4 <- ranger(x = train_x[, ! colnames(train_x) %in% chr19kmers],
y = train_y, num.trees = 50000,
verbose = TRUE, oob.error = TRUE, classification = TRUE)
save(model4, file = "model4.RData")
preds4 <- predict(model4, data = test_x)
no_chr_19 <- confusionMatrix(data = preds4$predictions, reference = test_y)
model5 <- ranger(x = train_x[, ! colnames(train_x) %in% miss_both],
y = train_y, num.trees = 50000,
verbose = TRUE, oob.error = TRUE, classification = TRUE)
save(model5, file = "model5.RData")
preds5 <- predict(model5, data = test_x)
no_mb <- confusionMatrix(data = preds4$predictions, reference = test_y)
model6 <- ranger(x = train_x[, ! colnames(train_x) %in% c(miss_both, chr19kmers, chr1kmers)],
y = train_y, num.trees = 50000,
verbose = TRUE, oob.error = TRUE, classification = TRUE)
save(model6, file = "model6.RData")
preds6 <- predict(model6, data = test_x)
no_mb <- confusionMatrix(data = preds4$predictions, reference = test_y)
pred_tbl <- tibble(
Sample = meta_f$Sample[which(meta_f$SampleSet != "SwAsp_UmAsp")],
Predicted = as.character(preds6$predictions),
Actual = test_y,
Match = as.character(preds6$predictions) == test_y
)
writexl::write_xlsx(pred_tbl, "~/test_set_predictions.xlsx")
as_tibble(imp_pvals, rownames = "Kmer") -> imps
as_tibble(count_matrix_norm[, head(arrange(imps, desc(importance))$Kmer, 16)]) -> tmp
tmp$Sex <- meta_f$Sex
tmp$Sample <- meta_f$ShortName
pivot_longer(tmp, -c(Sample, Sex)) -> plot_df
left_join(plot_df, pred_tbl) %>%
ggplot(aes(x = Sex, y = value + .Machine$double.eps, label = Sample)) +
geom_violin(outlier.alpha = 0) +
geom_jitter(aes(col = Match), width = 0.2) +
facet_wrap(~name, scales = "free_y") +
theme_bw() +
scale_y_log10()
plotly::ggplotly()
|
81d2855f858d4b383f2ba11457c55ac11ab06078
|
3853c7c2aec2afda0e14bf57c76aae4408ee74d2
|
/R/data_partition.R
|
39b4269c7f3799114d104daf7dd608d5d0a20411
|
[] |
no_license
|
lenamax2355/shinyr
|
9342a439046517f4dd836f0b38dcc41b2d3dd6e0
|
5f1194b9ca6f39a2446aed166c31b68196bec108
|
refs/heads/master
| 2023-06-09T01:53:26.610183
| 2021-06-23T04:38:34
| 2021-06-23T04:38:34
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 679
|
r
|
data_partition.R
|
#' dataPartition
#' @title Data Partition
#' @description Partition data for training and test
#' @author Jayachandra N
#' @param df data.frame which need to be devided into train and test subset
#' @param train_data_perc numeric value between 1 to 100
#' @return list of length 2 which contains Train data and Test data
#' @export
#' @examples
#' dataPartition(iris, 80)
dataPartition <- function(df, train_data_perc) {
train_data_size <- as.numeric(train_data_perc) / 100
trainingRowIndex <- sample(1:nrow(df), train_data_size * nrow(df))
trainingData <- df[trainingRowIndex, ]
testData <- df[-trainingRowIndex, ]
list(Test = testData, Train = trainingData)
}
|
d51ddffe0d53b1efe454713ff91828a89f828764
|
fd9a1c997e3671e103328a0b501903c439f9282e
|
/Twitter and the cross section of stock returns.R
|
9d54085d869f7771a0473a9cb71e6b9fd9b77d59
|
[] |
no_license
|
filippotesta24/twitter-R-analysis
|
f2e59c452153487b15bd48d24344d0e4dad52e80
|
7d55a867de97c2548adb7ba024715469daecd67c
|
refs/heads/main
| 2023-03-03T18:32:45.641513
| 2021-02-08T10:12:31
| 2021-02-08T10:12:31
| 337,033,439
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 14,295
|
r
|
Twitter and the cross section of stock returns.R
|
#PREPARE THE ENVIRONMENT
BBdate_A_U<-as.Date("YYYY-MM-DD") #The user sets the date in which start the bullish and
#bearish and the over and under-tweeted analysis.
#The Twitter API doesn't allow to access datas older
#then about a week.
BBdate_A_T<-BBdate_A_U+1
number_days_return_analysis<- X #Number of days in which the user wants to track the
#performances of the portfolios.
num_tweet_A<- XXX #Number of tweets to research.
#all the packages required
install.packages("twitteR")
install.packages("tidyquant")
install.packages("tidyverse")
install.packages("quantmod")
install.packages("plyr")
install.packages("dplyr")
install.packages("sentimentr")
install.packages("readxl")
install.packages("forecast")
install.packages("ggfortify")
install.packages("survMisc")
install.packages("labstatR")
install.packages("openxlsx")
#load library
library(twitteR)
library(tidyquant)
library(tidyverse)
library(quantmod)
library(plyr)
library(dplyr)
library(sentimentr)
library(readxl)
library(labstatR)
library(survMisc)
library(ggfortify)
library(forecast)
library(openxlsx)
#load credentials
consumer_key<-"8xXzct0o0NbdUV0Rex4EVzYCJ"
consumer_secret<- "9PjaGd2BVmgaeSmrDrJnLOuY2iTuykY7w79IeKYYbxbDag3hpH"
access_token <- "1331292392977272837-ARmYkbXpNsYADfy1y0Wsi5vA0gZbqT"
access_secret <- "Qa6oQOlnOWboxiImZ4ECkSInfNDdzCRvmZBUHG9zDkjwQ"
#set up to authenticate
setup_twitter_oauth(consumer_key, consumer_secret,access_token,access_secret)
############################################
#Obtain the current weights of the S&P 500 index
#we limit the analysis to the largest companies that make up 80% of the S&P 500 index
all_stocks<-tq_index("SP500")
stocks<-data.frame()
z=0
i=1
while(z<0.8){
z=z+all_stocks[i,5]
stocks<-rbind(stocks,all_stocks[i,])
i=i+1
}
#Create a vector with the ticker of the firms analyzed
ticker_firms<-pull(stocks[1])
ticker_firms<-gsub("\\.", "-",ticker_firms) #we use this string to remove the "." from
#some stock name and replace with "-"
#CREATION OF THE RELATIVE WEIGHT
sp500_W<-pull(stocks[5]) #the weight of the firms in S&P500
sp500_W_R<-matrix(NA,ncol=1,nrow=length(sp500_W))
rownames(sp500_W_R)<-ticker_firms
for( i in 1:length(sp500_W)){
sp500_W_R[i,]<-sp500_W[i]/sum(sp500_W)
}
#create the CASHTAG names (add the $ to the ticker)
cashtag<-matrix()
for( i in 1: nrow(stocks)) {
cashtag[i]= paste("$",stocks[i,1],sep="")
}
#CREATION OF RETURN'S MATRIX
#create a matrix with the firm's name and the correlated return in the sample days
#took in analysis
since_data_A <-BBdate_A_T+1
until_data_A <-BBdate_A_T+5
A_Days<-as.character(seq(from=since_data_A, to=until_data_A,by=1))
AAreturn<-getSymbols(ticker_firms[1],from=since_data_A-1 ,to=until_data_A+1, auto.assign = FALSE)
returns_matrix<-matrix(ncol =length(ticker_firms),nrow = nrow(AAreturn)-1)
names.of.a <- as.character(index(AAreturn[-1]))
row.names(returns_matrix) <- names.of.a
colnames(returns_matrix) <- c(ticker_firms)
for (l in 1 : length(ticker_firms)){
for(k in 1:(nrow(AAreturn)-1)) {
return<-getSymbols.yahoo(ticker_firms[l],from=as.character(since_data_A-1),to=as.character(until_data_A+1),auto.assign = FALSE)
return<-as.vector(return[,6])
returns_matrix[k,l]<-(return[(k+1)]/return[k])-1
}
}
#CREATION OF THE MARKET PORTFOLIO
#Return of the market portfolio
weight_R1<-matrix(NA,ncol=length(ticker_firms),nrow=nrow(returns_matrix))
for (o in 1: length(names.of.a)){
for(w in 1:length(ticker_firms)){
weight_R1[o,w]<- returns_matrix[o,w]*sp500_W[w]
}
}
market_portfolio_R<-matrix(rowSums(weight_R1),ncol=1,nrow =nrow(returns_matrix))
rownames(market_portfolio_R)<- c(names.of.a)
colnames(market_portfolio_R)<-"Market Returns"
for(o in 1: length(names.of.a)){
market_portfolio_R[o,]<-sum(weight_R1[o,],na.rm = T)
}
######################################
#DATA GENERATING PROCESS
#Download the tweets about the companies in the index using their cashtag
#Produce a daily time series of the number of tweets about each company
#Set the dates to research the tweets and the number to research
#since_data <-as.Date("YYYY-MM-DD")
#until_data <-as.Date("YYYY-MM-DD")
#n_tweet_to_research<-XXX
#Create an empty matrix of days and companies that we fill with the daily number of tweets
#vector_Days<-as.character(seq(from=since_data, to=until_data,by=1))
#tweets_Matrix<-matrix(c(NA),ncol=length(vector_Days)-1,nrow=length(cashtag))
#rownames(tweets_Matrix)<-cashtag
#colnames(tweets_Matrix)<-as.character(vector_Days[-length(vector_Days)])
#for(z in 1:length(cashtag) ){
#for(k in 1:(length(vector_Days)-1)){
# rm(tweet)
#tweet<-searchTwitter(cashtag[z], n=n_tweet_to_research,lang="en",since=paste0(vector_Days[k]),until=vector_Days[k+1])
#if (length(tweet)!=0){
#tweets_Matrix[z,k]<-length(strip_retweets(tweet))
#}else{
#tweets_Matrix[z,k]<-NA
#}
#Sys.sleep(22) #Suspend execution of R expressions for a specified time, to avoid problem with the Tweeter API
#}
#}
#we save the data as csv
#write.csv(XXX,"XXX.csv")
##############################
#ANALYSIS OF THE CYCLICALITY
firms_in_analysis<- read_excel("Companies.xlsx")
Tweets_Matrix_cyclicality <- read_excel("Daily_time_series_of_all_tweets.xlsx")
colnames(Tweets_Matrix_cyclicality) <- convertToDate(colnames(Tweets_Matrix_cyclicality))
rownames(Tweets_Matrix_cyclicality) <- t(firms_in_analysis)
Tweets_Matrix_cyclicality <- as.matrix(Tweets_Matrix_cyclicality)
dates <- colnames(Tweets_Matrix_cyclicality)
Daily_number_of_tweets <- colSums(Tweets_Matrix_cyclicality)
tweets_database<-as.ts(Daily_number_of_tweets)
#ANALYSIS ON CYCLICALITY
#Compute statistical indicators
Indicators_vector<-c(mean(tweets_database), min(tweets_database), max(tweets_database),
sd(tweets_database), var(tweets_database), mad(tweets_database))
#First plot with trend
plot(as.Date(dates), as.vector(Daily_number_of_tweets), "l",xlab = "Days", ylab = "Number of tweets", main = "Time series of all tweets")
trend_days<-c(1:length(Daily_number_of_tweets))
reg <- lm(Daily_number_of_tweets~trend_days)
abline(reg,col="red")
#auto correlation test
ggAcf(tweets_database)
Box.test(tweets_database, lag=1, type = "Ljung")
########################################################
#DATA GENERATING PROCESS
#Download the tweets about the companies in the index using their cashtag
tweet_text_firm<-matrix(NA,ncol=length(cashtag),nrow=num_tweet_A)
colnames(tweet_text_firm)<-c(cashtag)
for (z in 1:length(cashtag)){
singlefirm_tweet<-searchTwitter(cashtag[z],n=num_tweet_A, lang="en", since=as.character(BBdate_A_U),until=as.character(BBdate_A_T))
if (length(singlefirm_tweet)!=0 & length(singlefirm_tweet)!=1){
strip_retweets(singlefirm_tweet)
text_tweet<-sapply(singlefirm_tweet,function(j) j$getText())
clear1<-gsub("(RT|via)((?:\\b\\W*@\\w+)+)","",text_tweet)
clear2<-gsub("http[^[:blank:]]+","",clear1)#remove the link from the tweets
clear3<-gsub("@\\w+","",clear2)
clear4<-gsub("[[:punct:]]"," ",clear3) #remove the punctuation character from the tweets
text<-gsub("[^[:alnum:]]"," ",clear4) #remove all the alphanumeric characters
length(text)<-num_tweet_A
tweet_text_firm[,z]<-text #set the length of the vector to avoid dimensional error
}else if(length(singlefirm_tweet)==1) {
text_tweet<-sapply(singlefirm_tweet,function(j) j$getText())
clear1<-gsub("(RT|via)((?:\\b\\W*@\\w+)+)","",text_tweet)
clear2<-gsub("http[^[:blank:]]+","",clear1)#remove the link from the tweets
clear3<-gsub("@\\w+","",clear2)
clear4<-gsub("[[:punct:]]"," ",clear3) #remove the punctuation character from the tweets
text<-gsub("[^[:alnum:]]"," ",clear4) #remove all the alphanumeric characters
length(text)<-num_tweet_A
tweet_text_firm[,z]<-text
} else {
tweet_text_firm[z,]<-NA
}
Sys.sleep(22)
}
###################################################################
#OVER AND UNDER-TWEETED ANLYSIS
#CREATION OF THE TWITTER INDEX
tweet_V<-matrix(ncol=length(BBdate_A_U),nrow=length(ticker_firms))
rownames(tweet_V)<-c(ticker_firms)
colnames(tweet_V)<-BBdate_A_U
for (i in 1:length(ticker_firms)){
tweet_V[i,]<-length(na.omit(tweet_text_firm[,i]))
}
twitter_index<-matrix(NA,nrow=length(tweet_V),ncol=1)
rownames(twitter_index)<-c(ticker_firms)
colnames(twitter_index)<-A_Days[1]
for (i in 1: length(ticker_firms)){
twitter_index[i,]<-tweet_V[i,]/sum(tweet_V[,1])
}
#We create two portfolio of UNDER-tweeted and OVER-tweeted firms with each return
undertweeted_firm<-c()
undertweeted_return<-matrix(NA,ncol=1,nrow=nrow(returns_matrix))
overtweeted_firm<-c()
overtweeted_return<-matrix(NA,ncol=1,nrow=nrow(returns_matrix))
for(q in 1:length(ticker_firms)){
if (twitter_index[q,]>sp500_W_R[q]){
overtweeted_firm<-cbind(overtweeted_firm,colnames(returns_matrix)[q])
overtweeted_return<-cbind(overtweeted_return,returns_matrix[,q])}
else{
undertweeted_firm<-cbind(undertweeted_firm,colnames(returns_matrix)[q])
undertweeted_return<-cbind(undertweeted_return,returns_matrix[,q])}
}
portfolio_return_UO<-matrix(NA,ncol=nrow(returns_matrix),nrow=2)
rownames(portfolio_return_UO)<-c("OVERtwitted portfolio returns","UNDERtwitted portfolio returns")
for (t in 1:nrow(returns_matrix)){
portfolio_return_UO[1,t] <- sum(overtweeted_return[t,],na.rm = T)/length(overtweeted_firm)
portfolio_return_UO[2,t] <- sum(undertweeted_return[t,],na.rm = T)/length(undertweeted_firm)
}
portfolio_return_UO<-rbind(portfolio_return_UO,t(market_portfolio_R))
#The user can plot the number of firms in each portfolio
print(c("The number of OVER tweeted firms is:",print(length(overtweeted_firm))))
print(c("The number of UNDER tweeted firms is:",print(length(undertweeted_firm))))
#plot the results
c<-portfolio_return_UO[1,]
v <- as.Date(colnames(portfolio_return_UO))
q<-portfolio_return_UO[3,]
f<-portfolio_return_UO[2,]
plot(v,c,type="p",xaxt='n',lwd=5,col="black",pch = 16,main="Return of the two portfolio UNDER and OVER tweeted",xlab="Dates",ylab="Return",ylim=c(min(portfolio_return_UO),max(portfolio_return_UO)))
lines(v,q,type="l",col="red",lwd=1)
axis(1, at=d, labels=d)
lines(v,f,type="p",col="blue",lwd=5,pch = 16)
#Assign the average return of the three portfolios
average_overtweeted_R<-mean(portfolio_return_UO[1,])
average_undertweeted_R<-mean(portfolio_return_UO[2,])
average_market_R<-mean(portfolio_return_UO[3,])
###########################################################
#BULLISH AND BEARISH ANALYSIS
#we compute the sentimental analysis and we divide the tweets by polarity in the
#bearish and bullish vector with the respective returns
sentiment_matrix<-matrix(ncol=length(cashtag),nrow=num_tweet_A)
Bullish_firms<-matrix(NA,ncol=length(cashtag),nrow=nrow(returns_matrix))
colnames(Bullish_firms)<-c(cashtag)
rownames(Bullish_firms)<-c(rownames(returns_matrix))
Bearish_firms<-matrix(NA,ncol=length(cashtag),nrow=nrow(returns_matrix))
rownames(Bearish_firms)<-c(rownames(returns_matrix))
colnames(Bearish_firms)<-c(cashtag)
neitherBB_firms<-matrix(NA,ncol=length(cashtag),nrow=nrow(returns_matrix))
rownames(neitherBB_firms)<-c(rownames(returns_matrix))
colnames(neitherBB_firms)<-c(cashtag)
for (f in 1:length(cashtag)){
for (w in 1:num_tweet_A){
sentiment<-sentiment_by(tweet_text_firm[w,f])
sentiment_matrix[w,f]<-sentiment$ave_sentiment#the matrix with the average score of the sentimental analysis
}
if ((sum(sentiment_matrix[,f]>0,na.rm = T))>(sum(sentiment_matrix[,f]<0,na.rm = T))) {
Bullish_firms[,f]<-returns_matrix[,f]#we add the respective return for days in analysis
}else if ((sum(sentiment_matrix[,f]>0,na.rm = T))<(sum(sentiment_matrix[,f]<0,na.rm = T))) {
Bearish_firms[,f]<-returns_matrix[,f]
}else {
neitherBB_firms[,f]<-returns_matrix[,f]
}
}
Bullish_firms<-Bullish_firms[, colSums(is.na( Bullish_firms)) != nrow( Bullish_firms)]#we remove all the empty columns
Bearish_firms<-Bearish_firms[, colSums(is.na( Bearish_firms)) != nrow( Bearish_firms)]
neitherBB_firms<-neitherBB_firms[, colSums(is.na( neitherBB_firms)) != nrow( neitherBB_firms)]
#We create a matrix with the total return of each firm in the days analyzed
Port.RetBB<-matrix(NA,ncol=2,nrow=nrow(returns_matrix))
rownames(Port.RetBB)<-rownames(returns_matrix)
colnames(Port.RetBB)<-c("BULLish Portfolio","BEARish Portfolio")
for (l in 1: nrow(returns_matrix)){
Port.RetBB[l,1]<-sum(Bullish_firms[l,],na.rm = T)/ncol(Bullish_firms)
Port.RetBB[l,2]<-sum(Bearish_firms[l,],na.rm=T)/ncol(Bearish_firms)
}
Port.RetBB<-cbind(Port.RetBB,market_portfolio_R)
#The user can print the number of firms in the portfolios
print(c("The number of bullish firm is:",print(ncol(Bullish_firms))))
print(c("The number of bullish firm is:",print(ncol(Bearish_firms))))
print(c("The number of firm neither bearish neither bullish is:",print(ncol(neitherBB_firms))))
#The user can plot in a graph the return of the portfolio for the subsequent days
z<-Port.RetBB[,1]
d <- as.Date(row.names(Port.RetBB))
h<-Port.RetBB[,2]
l<-Port.RetBB[,3]
plot(d,z,type="p",lwd=5,xaxt='n',col="Black",main="Return of the two BEARISH and BULLISH portfolio",xlab="Dates",ylab="Return",ylim=c(min(Port.RetBB),max(Port.RetBB)))
lines(d,l,type="l",lwd=1,xaxt='n',col="red")
axis(1, at=d, labels=d)
lines(d, h,type="p",lwd=5,col="blue")
#if we want to compute a more detailed analysis on a firm
#single_analysis<-sentiment_by(tweet_text_firm[,1])
#summary(single_analysis$ave_sentiment)
#qplot(single_analysis$ave_sentiment,geom="histogram",main="Distribution of the sentiment polarity for AAPL",xlab="Sentiment score",ylab="Number of tweets")
#Assign the average return of the three portfolio
average_BULL_R<-mean(Port.RetBB[,1])
average_BEAR_R<-mean(Port.RetBB[,2])
average_marketBB_R<-mean(Port.RetBB[,3])
#####################################################
#We hereby certify that
#– We have written the program ourselves except for clearly marked pieces of code
#– We have tested the program and it ran without crashing
#Corti Matteo, Simone Giay, Filippo Testa, Andrea Giacobbo
|
1cea555da43918591cb79f80d9144c2c9a21dc84
|
b0012f570faf3405b6d2309f1ddf72273db150e1
|
/man/plus-.sparse.matrix.Rd
|
3a016369dd9f82fe8733ef99a02c2c47c2bb9f7a
|
[] |
no_license
|
ShiyingWang1014/bis557
|
1336fd232ea0141f0be840c717e4bacef666310e
|
59632c22835722743fa3e9d025409e5c237d6e99
|
refs/heads/master
| 2021-08-09T01:50:49.950809
| 2018-12-20T06:03:43
| 2018-12-20T06:03:43
| 148,364,495
| 0
| 0
| null | 2018-09-11T18:51:00
| 2018-09-11T18:51:00
| null |
UTF-8
|
R
| false
| true
| 612
|
rd
|
plus-.sparse.matrix.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/sparse.matrix.R
\name{+.sparse.matrix}
\alias{+.sparse.matrix}
\title{Use for adding two "sparse.matrix" objects}
\usage{
\method{+}{sparse.matrix}(a, b)
}
\arguments{
\item{a}{A "sparse.matrix object"}
\item{b}{A "sparse.matrix object"}
}
\value{
A "sparse.matrix" object
}
\description{
This function is used for adding two "sparse.matrix" objects
}
\examples{
sm0 <- sparse.matrix(i = c(1, 2), j = c(1, 1), x = c(2, 1),dims=c(2,3))
sm1 <- sparse.matrix(i = c(1, 2, 2), j = c(1, 1, 2), x = c(4.4, 1.2, 3),dims = c(2, 3))
sm0+sm1
}
|
6c6da99035c7ba709261de66cddcf34f6d7ced4b
|
b0741e778ed928d690fe053dcbc2651208957154
|
/man/confirmed_cases_data.Rd
|
6d559f8cecadfc84b050ae5d51fdf29c7af415f2
|
[] |
no_license
|
Auburngrads/CHAD3
|
7c9f9469874156ee5edab70e45b4b854d22fdd6e
|
d8e4413150fac16bc3322705059448df771aaadb
|
refs/heads/master
| 2022-04-23T21:23:40.231738
| 2020-04-27T05:00:53
| 2020-04-27T05:00:53
| 255,694,216
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 281
|
rd
|
confirmed_cases_data.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/confirmed_cases_data.R
\name{confirmed_cases_data}
\alias{confirmed_cases_data}
\title{Reads in and prepares COVID data}
\usage{
confirmed_cases_data()
}
\description{
Reads in and prepares COVID data
}
|
bc8544ae5b17afce81a87fd718a545c611625d8e
|
3299cd3d84a4dd486d75fc6c918d6c13f6e56731
|
/Machine Learning A-Z Template Folder/Part 5 - Association Rule Learning/Section 28 - Apriori/Apriori-R/apriori.R
|
67ec0c1d78302d2ed5ff3b8f4a41c3870a2c9c8b
|
[] |
no_license
|
knohyou/MachineLearningUdemy
|
dd085fadfb3de0be408152c7cbae3f7777e4cb77
|
de7872aca2a06e1fabbbcc0c4e0a53cc1f9dea66
|
refs/heads/master
| 2021-09-03T02:06:55.162204
| 2018-01-04T19:27:03
| 2018-01-04T19:27:03
| 113,613,535
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,487
|
r
|
apriori.R
|
# Apriori
# Data Preprocessing
# Extract DAta
dataset = read.csv('Market_Basket_Optimisation.csv', header = FALSE)
# Header = FALSE First row is not the header
# Apriori train using sparse matrix
# Have a product for each column 120 products
# rows correspond a 1 or 0 whether the product was purchased for that particular transaction
#install.packages('arules')
library(arules)
dataset = read.transactions('Market_Basket_Optimisation.csv', sep = ',',
rm.duplicates = TRUE)
# Separator is a comma. Not the default for read.transactions
# How to handle duplilcate
# 5 duplicates with 1 transaction?
summary(dataset)
# Density proportion of non-zero values
# Frequency plot
itemFrequencyPlot(dataset, topN = 10)
# First 100 most purchased by customers
#################################3333
# Build Apriori Model
rules = apriori(data = dataset,
parameter = list(support = 0.004, confidence = 0.2))
# What is miniminum support
# Number of product purchased/ Total number of products
# Want to set product purchased 3 times a day over the week
# 4*7/7500
# Confidence set a certain value and decrease = 0.8
# Important to see how many rules created
# minimum confidence of 0.8. 80 percent of transaction need to have the rule
# Visualize the rules
# Sort with decreasing lift
inspect(sort(rules, by = 'lift')[1:10])
# Note: the baskets may contain products like chocolate or mineral water that are highly purchased. (High support)
|
e2fb657dbd4efabbffc326213f094f4c1b6c8547
|
49ff0bc7c07087584b907d08e68d398e7293d910
|
/mbg/mbg_core_code/mbg_central/LBDCore/R/get_admin_shapefile.R
|
ed524ee54d275edc111b9efc7e1ff932bfe48799
|
[] |
no_license
|
The-Oxford-GBD-group/typhi_paratyphi_modelling_code
|
db7963836c9ce9cec3ca8da3a4645c4203bf1352
|
4219ee6b1fb122c9706078e03dd1831f24bdaa04
|
refs/heads/master
| 2023-07-30T07:05:28.802523
| 2021-09-27T12:11:17
| 2021-09-27T12:11:17
| 297,317,048
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,451
|
r
|
get_admin_shapefile.R
|
## get_admin_shapefile
#' @title Return path to world admin shapefile at specified admin level
#'
#' @description
#' Returns path to world admin shapefile given \code{admin_level}. stop()s if
#' no file exists at that admin_level. Defaults to returning the ".shp" file
#' path, but will substitute any other file \code{suffix} provided.
#'
#' @param admin_level Valid admin level we have a shapefile for. Current 0/1/2.
#'
#' @param suffix '.shp' by default, provide any other suffix to e.g., get the .dbf file
#' associated with the admin shapefile.
#'
#' @param raking boolean, default F. If TRUE pulls subnational raking shapefile.
#'
#' @examples
#' \dontrun{
#' get_admin_shapefile(2)
#' get_admin_shapefile(2, suffix = '.dbf')
#'
#' }
#' @export
get_admin_shapefile <- function(admin_level = 0, suffix = ".shp", type = "admin", version = "current", raking = F) {
if (raking) type <- "raking" # backwards compatibility
base_dir <- get_admin_shape_dir(version)
if (type == "admin") {
path <- paste0(base_dir, "lbd_standard_admin_", admin_level, suffix)
} else if (type == "raking") {
path <- paste0(base_dir, "lbd_standard_raking", suffix)
} else if (type == "disputed_mask") {
path <- paste0(base_dir, "lbd_disputed_mask", suffix)
} else {
stop(paste("Unknown admin shapefile type '", type, "'"))
}
if (!file.exists(path)) {
stop(paste("Could not locate admin shapefile (", path, ")"))
}
return(path)
}
|
63c92d22a73a19aca3177f4bb79681fac7713449
|
d8e1ee9e53ed0890dec9c5245590de173c20c117
|
/code/6summary.R
|
8b8e68901db993e3bd9f7e8309e566b57e1bc170
|
[] |
no_license
|
jhelvy/solar-learning-2021
|
c9b9af2c7056c76c35caae65be4016c2f4132aac
|
cb730d490eab03a22c709190c171ace0fc4da06d
|
refs/heads/main
| 2023-07-25T15:50:25.197543
| 2023-07-06T15:30:09
| 2023-07-06T15:30:09
| 373,634,772
| 5
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,310
|
r
|
6summary.R
|
# Load libraries, functions, and dir paths
source(here::here('code', '0setup.R'))
data <- readRDS(dir$data_formatted)
# Global installed capacity over period -----
cat(
"China, Germany, and the U.S. combined comprised",
data$irenaCumCapacityMw %>%
filter(year == year_model_world_max) %>%
mutate(result = scales::percent((usa + china + germany) / world)) %>%
pull(result),
"of all global installed PV capacity between 2007 - 2020\n\n"
)
# Learning rates -----
lr <- readRDS(dir$lr_models)
cat(
"Learning rates:\n",
"U.S.: ", scales::percent(lr$lr_us), "\n",
"China: ", scales::percent(lr$lr_china), "\n",
"Germany: ", scales::percent(lr$lr_germany), "\n\n"
)
# Historical cost implications -----
cost <- readRDS(dir$scenarios_hist)
# Comparison of 2020 costs under global vs national learning
cat(get_cost_summary_hist(cost$cost))
# Savings in each country
cat(get_savings_summary_hist(cost$savings))
# Future cost projections -----
# Projection groth rates (CAGR)
data$rates %>%
mutate(rate = scales::percent(rate, accuracy = 1))
proj <- readRDS(dir$scenarios_proj)
cat(get_cost_summary_proj(proj$nat_trends, proj$sus_dev))
# Savings in each country
cat(get_savings_summary_proj(proj$savings_nat_trends, proj$savings_sus_dev))
|
9f2fcf6eb0b97c7cf6e7145fc0fec6be767ac3b7
|
04a907126ffc34ae1e3aacde07c133e41436649d
|
/man/getCABCM.Rd
|
5aaa2399f9985f191f4eba9d0bdad8a8849e5358
|
[
"MIT"
] |
permissive
|
mbjoseph/climateR
|
a21bfb03d3f697cff1c77d59dc63ad61fee6a5e2
|
98327be6c3ad2b6f86c7fdcab3b4967853c981fc
|
refs/heads/master
| 2021-06-27T12:42:02.393323
| 2021-01-14T16:19:11
| 2021-01-14T16:19:11
| 201,987,234
| 1
| 0
|
MIT
| 2021-01-14T16:05:53
| 2019-08-12T18:31:31
|
R
|
UTF-8
|
R
| false
| true
| 1,238
|
rd
|
getCABCM.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/getCABCM.R
\name{getCABCM}
\alias{getCABCM}
\title{Get CABCM Climate Data for an Area of Interest}
\usage{
getCABCM(
AOI,
param,
model = "CCSM4",
scenario = "rcp85",
startDate,
endDate = NULL
)
}
\arguments{
\item{AOI}{a spatial polygon object (sf or sp)}
\item{param}{a meterological parameter (see `param_meta$cabcm``)}
\item{model}{GMC model name (see `model_meta$cabcm`)}
\item{scenario}{a climate scenario pathway (see `model_meta$cabcm`)}
\item{startDate}{a start date given as "YYYY-MM-DD"}
\item{endDate}{an end date given as "YYYY-MM-DD"}
}
\value{
if AOI is an areal extent a list of rasterStacks, if AOI is a point then a data.frame of modeled records.
}
\description{
The Basin Characterization Model (BCM) dataset provides historical and projected climate and hydrology data at a 270 meter resolution,
which is relevant for watershed-scale evaluation and planning.
These data have formed the basis for multiple research projects and vulnerability assessments applying
climate change projections to conservation decision-making, providing a common base-layer and set of
assumptions across these projects.
}
\author{
Mike Johnson
}
|
98047223d80c3b719df484fa00023febbf31e929
|
5a6dba0953606fb675620888bbbd0a66b12192ab
|
/R/scraps.r
|
3c7f6de9477d11a73a330a3b8813ef78dd9e0914
|
[] |
no_license
|
peterkuriyama/mrag_indicators
|
36298f35fd758a982af1a4b3fc3d527d7c7f63e4
|
912414cff73d6fc59657c582ecc32972c86d52ca
|
refs/heads/master
| 2020-05-25T09:40:58.582097
| 2014-09-16T19:18:15
| 2014-09-16T19:18:15
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,102
|
r
|
scraps.r
|
# data <- subset(data, year <= years[2] & year >= years[1])
# all.stocks <- data %.% group_by(region, year) %.% summarise(arit.b.ratio = mean(b.ratio),
# arit.f.ratio = mean(f.ratio, na.rm = T),
# gm.b.ratio = gm_mean(b.ratio),
# gm.f.ratio = gm_mean(f.ratio))
rebuilding.stocks
summarise(arit.b.ratio = mean(b.ratio),
arit.f.ratio = mean(f.ratio, na.rm = T),
gm.b.ratio = gm_mean(b.ratio),
gm.f.ratio = gm_mean(f.ratio) )
arit.b.ratio = mean(b.ratio),
arit.f.ratio = mean(f.ratio, na.rm = T),
gm.b.ratio = gm_mean(b.ratio),
gm.f.ratio = gm_mean(f.ratio)
all.stocks <- data %.% group_by(region, year) %.%
summarise(tot.nrow = length(overfished),
n.row = length(grep('Overfished', overfished)),
prop.overfished = n.row / tot.nrow,
arit.b.ratio = mean(b.ratio),
arit.f.ratio = mean(f.ratio, na.rm = T),
gm.b.ratio = gm_mean(b.ratio),
gm.f.ratio = gm_mean(f.ratio))
#Below Msy
summarise(n.row = length(grep('Overfished', )))
nrow(x))
grep('Overfished')
#Overfished
#Depleted
#Average biomass for overfished stocks
# B/bmsy ratio time series
#Catch shares went in in 2010
#Calculate
#Stocklong.x goes with f
#stocklong.y goes with b
as.data.frame(data %.% group_by(stocklong.x, f.ts) %.%
summarise(range(year)[1], range(year)[2]),
function(x) )
data <- subset(data, year <= years[2] & year >= years[1])
all.avg.year <- data %.% group_by(region, year) %.% summarise(arit.b.ratio = mean(b.ratio),
arit.f.ratio = mean(f.ratio, na.rm = T),
gm.b.ratio = gm_mean(b.ratio),
gm.f.ratio = gm_mean(f.ratio))
# make_plots <- function(data.type = 'biomass',
# years = c(2002, 2011),
# bad.crit = 0.5){
# # print('data.type can be "biomass" or "fishing mortality"')
# if(data.type == 'biomass') data <- b_ratios()
# if(data.type == 'fishing mortality') data <- f_ratios()
# data <- subset(data, tsyear <= years[2] & tsyear >= years[1])
# data$ratio < bad.crit
# data$overfished <-
# #Average Ratios by year
# all.avg.year <- data %.% group_by(region, tsyear) %.% summarise(avg.ratio = mean(ratio))
# #Overfished Stocks
# head()
# }
# biomass_plots <- function(years = c(2002, 2011)){
# #Load Biomass
# b <- b_ratios()
# #Split Data
# b.west <- east_west(data = b)$west
# b.ne <- east_west(data = b)$ne
# }
# east_west(f)
# #Classify Stocks
# b <- b_ratios()
# b$overfished <- 'not overfished'
# b[b$ratio <= 0.5, 'overfished'] <- 'overfished'
# overfished_stocks <- function(){
# b <- b_ratios()
# b$overfished <- 'not overfished'
# b[b$ratio <= 0.5, 'overfished'] <- 'Overfished'
# b$s.o <- paste(b$overfished, b$stocklong)
# unique(b[grep('Overfished', b$s.o), 'stocklong'])
# }
# #Biomass Ratios
# unique(nas$stocklong.x)
# unique(b.ratios$stocklong.x)
# unique(nas$stocklong.x)[unique(nas$stocklong.x) %in% unique(b.ratios$stocklong.x) == FALSE]
# unique(b.ratios$stocklong.x)
# b.ratios$stocklong.x[b.ratios$stocklong.x %in% nas$stocklong.x]
# unique(b.ratios$stocklong.x)
# is.na(b.ratios$stocklong.y)
# to.check <- b.ratios[is.na(b.ratios$ratio), ]
# unique(to.check$stocklong.x)
# write.csv(b.ratios, 'output/check_b_ratios.csv')
# unique(b.ratios[is.na(b.ratios$ratio), 'stocklong.y'])
# b.ratios$stocklong.x == b.ratios$stocklong.y
# is.na(b.ratios$ratio) == FALSE
# unq.ref.mas <- unique(b.refs$metric.and.spp)
# #Remove MSY
# unq.ref.mas[grep('msy', unq.ref.mas)]
# #have no msy?
# unq.ref.mas[1:107 %in% grep('msy', unq.ref.mas) == FALSE]
# #A lot of B0 and SSB0.
# # Longspine thornyhead has 'SSB-MSY'
# unq.ref.mas.nomsy <- gsub('msy','', unq.ref.mas)
# unq.ts.mas %in% unq.ref.mas
# ts.dd <- unique(ts$tsid)
# ts.dd[ts.dd %in% ts.dd[grep('B', ts.dd)] == FALSE]
# b.ts$metric.and.spp <- paste(b.ts$tsid)
# b.refs[duplicated(b.refs$bioid), ]
# grep('msy', refs$bioid)
# gsub('msy') refs$bioid
# ##Calculate B Ratios
# #Time Series
# fs <- ts[grep('F', ts$tsid), ]
# us <- ts[grep('U', ts$tsid), ]
# ers <- ts[grep('ER', ts$tsid), ]
# f <- rbind(fs, us, ers)
# #Reference Points
# fs.ref <- refs[grep('F', refs$bioid), ]
# us.ref <- refs[grep('U', refs$bioid), ]
# ers.ref <- refs[grep('ER', refs$bioid), ]
# f.ref <- rbind(fs.ref, us.ref, ers.ref)
# #Merge time series and reference points
# f.ratios <- merge(f, f.ref, all = TRUE, by = 'stocklong')
# #Remove NAs
# f.ratios <- f.ratios[-which(is.na(f.ratios$tsvalue) == TRUE), ]
# f.ratios <- f.ratios[-which(f.ratios$tsvalue == ''), ]
# f.ratios <- f.ratios[-which(is.na(f.ratios$biovalue) == TRUE), ]
# #Convert value columns to numeric
# f.ratios$tsvalue <- as.numeric(f.ratios$tsvalue)
# #Calculate Ratio
# f.ratios$ratio <- f.ratios$tsvalue / f.ratios$biovalue
# #Move Columns Around
# f.ratios <- f.ratios[, c('stocklong', 'stockid.x', 'assessid.x', 'tsyear', 'tsvalue',
# 'biovalue', 'ratio')]
# f.ratios <- rename(f.ratios, c('stockid.x' = 'stockid',
# 'assessid.x' = 'assessid'))
# write.csv(f.ratios, file = 'output/f_ratios.csv')
# return(f.ratios)
# #Change stocklong to all lowercase
# ##Function to Split into NE and West Coast Stocks
# east <- f[grep('NE', f$assessid), ]
# 'cod gulf of maine'
# 'cod Georges Bank'
# 'plaice'
# 'white hake'
# 'redfish'
# 'witch'
# 'pollock'
# 'yellowtail flounder'
# 'haddock'
# 'winter'
# west <- rbind(f[grep('W', f$assessid) ,], f[grep('PF', f$assessid) ,])
# unique(refs$bioid)
# #Calculate Ratios
# ##Check to see which species don't have ratios
# ts.spp <- unique(ts$stocklong)
# refs.spp <- unique(refs$stocklong)
# ts.spp[ts.spp %in% refs.spp == FALSE]
# refs.spp[refs.spp %in% ts.spp == FALSE]
# ts.id <- unique(ts$assessid)
# refs.id <- unique(refs$assessid)
# check <- merge(ts, refs, by = 'stocklong', all = TRUE)
# unique(check[which(check$assessid.x != check$assessid.y), 'stocklong'])
# # ts.id[ts.id %in% refs.id == FALSE]
# # head(ts)
|
91ba410eda0989f9a9af650304b671e7a58d891a
|
8595a36008553d29feb6bdc5e95012d7a4324404
|
/CommonItemEquating.R
|
e9d975496a0f9f2a958fe094ef95a164426bc53a
|
[] |
no_license
|
toby270205/Equating
|
0baab5beed6d1949d41b0dab7bc575ca211e3243
|
652a497feb0896be7dffbf8371db8a9306b0f740
|
refs/heads/master
| 2021-01-23T04:38:58.040787
| 2017-09-06T00:38:38
| 2017-09-06T00:38:38
| 102,442,868
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,307
|
r
|
CommonItemEquating.R
|
rm(list=ls()) #remove all current variables
library(TAM)
setwd("C:\\G_MWU\\R Projects\\Equating")
#read in the data files
resp1 <- read.csv("test1.csv")
resp2 <- read.csv("test2.csv")
#separately scale each test
mod1 <- tam(resp1)
mod1$xsi #check item parameter
mod2 <- tam(resp2)
#find common items between test 1 and test 2 through common item names
iname <- colnames(resp1) %in% colnames(resp2)
link1 <- mod1$xsi[iname,]
link2 <- mod2$xsi[row.names(link1),]
#Adjust test 2 common items to have the same mean as test 1 common items
xsi2.adj <- link2$xsi - mean(link2$xsi) + mean(link1$xsi)
#Form confidence lines
p1 <- (link1$xsi+xsi2.adj)/2-sqrt(link1$se.xsi^2+link2$se.xsi^2)
p2 <- (link1$xsi+xsi2.adj)/2+sqrt(link1$se.xsi^2+link2$se.xsi^2)
plot(link1$xsi,xsi2.adj)
lines(p1,p2)
lines(p2,p1)
text(link1$xsi,xsi2.adj,labels=rownames(link1),cex=0.7,pos=3)
#Compute the differences between pairs of common item parameters
diff <- link1$xsi-xsi2.adj
diff
#Consolidat into one data frame
linked <- data.frame(link1,link2,xsi2.adj,diff,p1,p2)
#remove some items
rem <- c("S16","S19","S24","S35")
link1 <- link1[!rownames(link1) %in% rem,] #recompute link set
link2 <- link2[!rownames(link2) %in% rem,]
#replot link set with some items removed
xsi2.adj <- link2$xsi - mean(link2$xsi) + mean(link1$xsi)
p1 <- (link1$xsi+xsi2.adj)/2-sqrt(link1$se.xsi^2+link2$se.xsi^2)
p2 <- (link1$xsi+xsi2.adj)/2+sqrt(link1$se.xsi^2+link2$se.xsi^2)
plot(link1$xsi,xsi2.adj)
lines(p1,p2)
lines(p2,p1)
text(link1$xsi,xsi2.adj,labels=rownames(link1),cex=0.7,pos=3)
diff <- link1$xsi-xsi2.adj
shift <- - mean(link2$xsi) + mean(link1$xsi)
linked <- data.frame(link1,link2,xsi2.adj,diff,p1,p2)
#Anchor method. Find common items
x <- which(rownames(mod1$xsi) %in% rownames(linked))
y <- which(rownames(mod2$xsi) %in% rownames(linked))
xsi.fixed <- cbind(y,mod1$xsi$xsi[x]) #anchor set 2 common items to set 1 common item parameters
mod3 <- tam(resp2,xsi.fixed=xsi.fixed) #anchor parameters
#Concurrent equating. Merge two data files.
#However, first some common items may need to be renamed to be treated as different items.
colnames(resp1)[c(16,19,24,35)]<-c("S16a","S19a","S24a","S35a")
library(plyr)
resp4 <- rbind.fill(resp1,resp2)
testname <- c(rep(1,nrow(resp1)),rep(2,nrow(resp2)))
mod4 <- tam(resp4,Y=testname)
|
d3982e4f9d182d638457f359e7ada34b381b6d7f
|
b8c43e421f7216167380682c06ed9040db053627
|
/scripts/go_summary.R
|
e1c35dd5d8d21f09a7568c2e07ab2fcf5290fa44
|
[] |
no_license
|
hmtzg/geneexp_mouse
|
5a896cb4722794c85f464a75d459caf84021ffa0
|
1f2434f90404a79c87d545eca8723d99b123ac1c
|
refs/heads/master
| 2022-02-22T13:31:09.135196
| 2022-02-02T09:02:15
| 2022-02-02T09:02:15
| 267,553,488
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,573
|
r
|
go_summary.R
|
library(tidyverse)
#library(clusterProfiler)
sgo = readRDS('./data/processed/raw/dc_gse.rds')
#ddc = readRDS('./data/processed/raw/dev_divergent_genes_dc_rank.rds')
#sum(sapply(sgo@geneSets, function(x) sum(is.na(x)))) # no empty go
## get all genes for all go groups:
allgo = reshape2::melt(sgo@geneSets) %>%
set_names('gene','ID')
## get only significant go:
signifGO = sgo@result %>%
filter(qvalues < 0.1 & NES<0)
# filter only signif go :
signifGO_genes = signifGO %>%
select(ID) %>%
left_join(allgo)
## gene category matrix:
gogenemat = signifGO_genes %>%
mutate(value=1) %>%
spread(ID, value, fill=0)
#gogenemat = as.data.frame(gogenemat)
rownames(gogenemat) = gogenemat$gene
gogenemat$gene = NULL
gogenemat = as.matrix(gogenemat)
# jaccard similarity:
jaccardsim = apply(gogenemat, 2, function(x){
apply(gogenemat, 2, function(y){
sum( x == 1 & y == 1) / sum( x == 1 | y == 1)
})
})
## define go clusters with highest similarity with threshold:
#gocat = unique(signifGO$ID)
k = min(which(sapply(1:nrow(jaccardsim), function(i){
cl = cutree(hclust(dist(jaccardsim)),i)
#cl = cutree(hclust(as.dist(1-jaccardsim)),i)
all(sapply(1:i, function(k){
xx = names(which(cl==k))
if(length(xx)==1){
return(1)
} else {
xx = jaccardsim[xx,xx]
median(xx[upper.tri(xx)])
}
}) >= 0.2)
})))
k
#k = ifelse(k==1, nrow(jaccardsim), k)
treex = hclust(dist(jaccardsim))
treecl = cutree(treex, k)
# get representative go group for go clusters:
reps = sapply(1:k, function(i){
xx = names(which(treecl == i))
if(length(xx)>1){
xx = jaccardsim[xx,xx]
names(which.max(rowMeans(xx)))
} else{
xx
}
})
reps
repclus = setNames(lapply(1:k, function(i) names(which(treecl==i))), reps)
newdf = reshape2::melt(repclus) %>%
set_names('ID', 'rep') %>%
arrange(rep) %>%
left_join(signifGO) %>%
select(-core_enrichment)
head(newdf)
dico_go = newdf %>%
filter(ID==rep) %>%
mutate(termname = ifelse(nchar(Description)>40,
paste(substr(Description,1,37),'...',sep=''), Description)) %>%
ggplot(aes(x=NES, y=reorder(termname, -NES))) +
geom_bar(stat='identity') +
theme(axis.text = element_text(size=6)) +
ylab('')
ggsave('results/figure4/fig4g.pdf', dico_go, units = 'cm', width = 16, height = 16, useDingbats=F)
newdf[newdf$Description=='leukocyte chemotaxis',]
newdf[newdf$Description=='chemotaxis',] # GO:0006935
newdf[newdf$Description=='leukocyte chemotaxis',] # GO:0030595
newdf[newdf$Description=='granulocyte chemotaxis',] # GO:0071621
# repclus[names(repclus)%in%'GO:0006935']
# repclus[names(repclus)%in%'GO:0030595']
# repclus[names(repclus)%in%'GO:0071621']
# cor(jaccardsim[,'GO:0042330'],jaccardsim[,'GO:0006935'])
# cor(jaccardsim[,'GO:0030595'],jaccardsim[,'GO:0097529'])
# cor(jaccardsim[,'GO:0071621'],jaccardsim[,'GO:0030593'])
# cor(jaccardsim[,'GO:0071621'],jaccardsim[,'GO:0097530'])
# cor(jaccardsim[,'GO:0071621'],jaccardsim[,'GO:0042330'])
# head(repclus2)
# int = intersect(names(repclus), names(repclus2) )
# head(repclus[int])
# head(repclus2[int])
# a = 'GO:0006935'
# b = 'GO:0030595'
# sum(gogenemat[,a]==1 & gogenemat[, b] == 1) /
# sum(gogenemat[,a]==1 | gogenemat[, b] == 1)
# d1 = dist(jaccardsim)
# d2 = as.dist(1-jaccardsim)
# head(d1)
# head(d2)
# cor.test(d1,d2)
# plot(d1/max(d1), d2)
######################################
#
signifGO = sgo@result[,1:10] %>%
filter(qvalues < 0.1 & NES < 0) %>%
select(ID, NES)
write.table(signifGO, file = 'results/figure4/signifGO_DiCo.csv', row.names = F, quote = F)
|
a301fc12551750c2563a79b2f9b58a073455e339
|
bcf1f90c437ffbb6d114b4822d4aa77fcea73245
|
/add_three.R
|
c1f229ff3dca3ccfe1218eafbcdd4d6d03a199df
|
[] |
no_license
|
kmcguire2/chem160module10
|
65086d58769699fb54d4592f9e26021c21b871a2
|
c133af90e9c37450b3a7ba50aa09c067385ddf81
|
refs/heads/main
| 2023-01-01T15:31:03.221964
| 2020-10-08T19:11:27
| 2020-10-08T19:11:27
| 302,438,178
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 52
|
r
|
add_three.R
|
add_three <- function(a,b,c) {
return(a + b + c)
}
|
3200828d8b757db8bb1f9be9e7563d088370a349
|
ce5146b3ee83cc72a440702a1571c05f3770d9b1
|
/Figures.R
|
f026a030cf4a1a2d920b9e4c4bb1dc988af3153b
|
[] |
no_license
|
YusukeKinari/Kurokawa-et-al.-2021-
|
aae3a4af056c29d9fa3ff65d40db4bee335e2907
|
35391e2c85938474e9c5fffa60d46ea78382fd8e
|
refs/heads/main
| 2023-03-30T01:10:41.923818
| 2021-04-07T00:37:09
| 2021-04-07T00:37:09
| 343,637,122
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 13,311
|
r
|
Figures.R
|
setwd("C:/Users/Hirofumi Kurokawa/Box/201710_オキシトシン実験/Paper/TrustGame/Submission/FNeuroendocrine/R2/DataCode")
setwd("C:/Users/Kurokawa/Box Sync/201710_オキシトシン実験/Paper/TrustGame/Submission/FNeuroendocrine/R2/DataCode")
if(!require("ggplot2")){
install.packages("ggplot2")
}
if(!require("ggsgnif")){
install.packages("ggsignif")
}
if(!require("Hmisc")){
install.packages("Hmisc")
}
library(ggplot2)
library(ggsignif)
library(Hmisc)
if(!require("devtools")){
install.packages("devtools")
}
devtools::install_github("trinker/plotflow")
library("plotflow")
library("gridExtra")
if(!require("ggthemes")){
install.packages("ggthemes")
}
library(ggthemes)
library(tidyverse)
library(haven)
data_oxt <- read_dta("Data/Data.dta")
# Figure 1 Transfer ----------------------------------------------OK
Fig1_transfer_trust<-
ggplot(data_oxt, aes(x = oxt, y = transfer_ave)) +
geom_boxplot()+
geom_jitter(, width=0.25)+
theme_classic()+
labs(title="(a) Trust experiment", y="Transfer (MU)", x="Group")+
geom_signif(stat = "identity",
data = data.frame(x = c(1.3),
xend = c(1.8),
y = c(12.5),
annotation = c("t = 1.40, p = 0.08")),
aes(x = x, xend = xend, y = y, yend = y, annotation = annotation),
col = "black")
Fig1_transfer_risk<-
ggplot(data_oxt, aes(x = oxt, y = transfer_risk_ave)) +
geom_boxplot()+
geom_jitter(, width=0.25)+
theme_classic()+
labs(title="(b) Risk experiment", y="Transfer (MU)", x="Group")+
geom_signif(stat = "identity",
data = data.frame(x = c(1.3),
xend = c(1.8),
y = c(12.5),
annotation = c("t = 0.41, p = 0.34")),
aes(x = x, xend = xend, y = y, yend = y, annotation = annotation),
col = "black")
Fig1 <- gridExtra::grid.arrange(Fig1_transfer_trust, Fig1_transfer_risk, nrow = 1)
ggsave(file="Figures/Fig1_transfer.tiff", Fig1)
# Figure 2 Transfer: Relative frequency ----------------------------------------OK
data_freq <-
read.csv("Data/Data_freq.csv", header = TRUE, fileEncoding="UTF-8-BOM")
data_freq <-
transform(data_freq, Group= factor(Group, levels = c("Placebo", "Oxytocin")))
data_freq_trust <-
data_freq %>%
filter(Game== "Trust")
Fig2_transfer_trust<-
ggplot(data_freq_trust, aes(x = transfer, y = freqency, fill = Group))+
geom_bar(stat = "identity", position = "dodge")+
scale_y_continuous(limits = c(0, 0.65))+
scale_x_continuous(breaks=seq(0,12,by=1),limits=c(0,12.5))+
labs(title="(a) Trust game", y="Relative frequency", x="Average transfer per investor (MU)")+
theme_classic()+
scale_fill_grey()+
annotate("text", x=12, y=0.48, label="Fisher, p = 0.04")+
annotate("segment", x=11.5,xend=12.5, y=0.45,yend=0.45,
colour="black", size=0.5)
data_freq_risk <-
data_freq %>%
filter(Game== "Risk")
Fig2_transfer_risk<-
ggplot(data_freq_risk, aes(x = transfer, y = freqency, fill = Group))+
geom_bar(stat = "identity", position = "dodge")+
scale_y_continuous(limits = c(0, 0.65))+
scale_x_continuous(breaks=seq(0,12,by=1),limits=c(0,12.5))+
labs(title="(b) Risk game", y="Relative frequency", x="Average transfer per investor (MU)")+
theme_classic()+
scale_fill_grey()+
annotate("text", x=12, y=0.63, label="Fisher, p = 0.24")+
annotate("segment", x=11.5,xend=12.5, y=0.6,yend=0.6,
colour="black", size=0.5)
Fig2 <- gridExtra::grid.arrange(Fig2_transfer_trust, Fig2_transfer_risk, nrow = 2)
ggsave(file="Figures/Fig2_transfer_RF.tiff", Fig2)
# Figure 3 Back Transfer ----------------------------------------------OK
data_oxt_recieve <- read_dta("Data/Data_receive.dta")
labeli <- as_labeller(c(`0` = "Transfer = 0",
`4` = "Transfer = 4",
`8` = "Transfer = 8",
`12` = "Transfer = 12"))
data_oxt_recieve0 <-
data_oxt_recieve %>%
filter(receive== 0)
Fig3_recieve0<-
ggplot(data_oxt_recieve0, aes(x = oxt, y = return)) +
geom_boxplot()+
geom_jitter(, width=0.25)+
scale_y_continuous(limits = c(0, 50))+
labs(title="Transfer=0", y="Back transfer from the trustee (MU)", x="Group")+
theme_classic()+
annotate("text", x=1.45, y=17, label="t = -1.82, p = 0.04", size=2.5)+
annotate("segment", x=1.2,xend=1.7, y=15,yend=15,
colour="black", size=0.5)
data_oxt_recieve4 <-
data_oxt_recieve %>%
filter(receive== 4)
Fig3_recieve4<-
ggplot(data_oxt_recieve4, aes(x = oxt, y = return)) +
geom_boxplot()+
geom_jitter(, width=0.25)+
scale_y_continuous(limits = c(0, 50))+
labs(title="Transfer=4", y="Back transfer from the trustee (MU)", x="Group")+
theme_classic()+
annotate("text", x=1.45, y=17, label="t = -1.06, p = 0.85", size=2.5)+
annotate("segment", x=1.2,xend=1.7, y=15,yend=15,
colour="black", size=0.5)
data_oxt_recieve8 <-
data_oxt_recieve %>%
filter(receive== 8)
Fig3_recieve8<-
ggplot(data_oxt_recieve8, aes(x = oxt, y = return)) +
geom_boxplot()+
geom_jitter(, width=0.25)+
scale_y_continuous(limits = c(0, 50))+
labs(title="Transfer=8", y="Back transfer from the trustee (MU)", x="Group")+
theme_classic()+
annotate("text", x=1.45, y=22, label="t = -0.34, p = 0.63", size=2.5)+
annotate("segment", x=1.2,xend=1.7, y=20,yend=20,
colour="black", size=0.5)
data_oxt_recieve12 <-
data_oxt_recieve %>%
filter(receive== 12)
Fig3_recieve12<-
ggplot(data_oxt_recieve12, aes(x = oxt, y = return)) +
geom_boxplot()+
geom_jitter(, width=0.25)+
scale_y_continuous(limits = c(0, 50))+
labs(title="Transfer=12", y="Back transfer from the trustee (MU)", x="Group")+
theme_classic()+
annotate("text", x=1.45, y=30, label="t = -0.34, p = 0.63", size=2.5)+
annotate("segment", x=1.2,xend=1.7, y=28,yend=28,
colour="black", size=0.5)
Fig3 <- gridExtra::grid.arrange(Fig3_recieve0, Fig3_recieve4, Fig3_recieve8, Fig3_recieve12, ncol = 4)
ggsave(file="Figures/Fig3_back_transfer.tiff", Fig3)
# Figure 4 Administration-induced increases in the concentrations of oxytocin ----記載不要?
ggplot(data_oxt, aes(x = oxt, y = lnoxt_diff31)) +
geom_boxplot()+
geom_jitter(, width=0.25)+
theme_classic()+
labs(title="Administration-induced increases in the concentrations of oxytocin", subtitle="After RE (72 minutes) - Before administration",y="Logarithmic oxytocin level (pg/ml)", x="Group")+
ggsave(file="Figures/Fig4_OTdiff.tiff")
# Figure 5 Transfer: Relative frequency by OT Sensitivity ----------------------------------------OK
data_freq_ot <-
read.csv("Data/Data_freq_ot.csv", header = TRUE, fileEncoding="UTF-8-BOM")
data_freq_ot <-
transform(data_freq_ot, Group= factor(Group, levels = c("Placebo", "Oxytocin: low sensitivity", "Oxytocin: high sensitivity")))
data_freq_ot_trust <-
data_freq_ot %>%
filter(Game== "Trust")
Fig5_transfer_trust<-
ggplot(data_freq_ot_trust, aes(x = transfer, y = freqency, fill = Group))+
geom_bar(stat = "identity", position = "dodge")+
scale_y_continuous(limits = c(0, 0.85))+
scale_x_continuous(breaks=seq(0,12,by=1),limits=c(0,12.55))+
labs(title="(a) Trust game", y="Relative frequency", x="Average transfer per investor (MU)")+
theme_classic()+
scale_fill_grey()+
annotate("text", x=12, y=0.48, label="Fisher, p = 0.03", size=2.5)+
annotate("segment", x=11.5,xend=12.5, y=0.44,yend=0.44,
colour="black", size=0.5)+
annotate("text", x=11, y=0.42, label="Fisher, p = 0.34", size=2.5)+
annotate("segment", x=11.5,xend=11.9, y=0.39,yend=0.39,
colour="black", size=0.5)
data_freq_ot_risk <-
data_freq_ot %>%
filter(Game== "Risk")
Fig5_transfer_risk<-
ggplot(data_freq_ot_risk, aes(x = transfer, y = freqency, fill = Group))+
geom_bar(stat = "identity", position = "dodge")+
scale_y_continuous(limits = c(0, 0.85))+
scale_x_continuous(breaks=seq(0,12,by=1),limits=c(0,12.5))+
labs(title="(b) Risk game", y="Relative frequency", x="Average transfer per investor (MU)")+
theme_classic()+
scale_fill_grey()+
annotate("text", x=12, y=0.77, label="Fisher, p = 0.41", size=2.5)+
annotate("segment", x=11.5,xend=12.5, y=0.74,yend=0.74,
colour="black", size=0.5)+
annotate("text", x=11, y=0.72, label="Fisher, p = 0.11", size=2.5)+
annotate("segment", x=11.5,xend=11.9, y=0.69,yend=0.69,
colour="black", size=0.5)
Fig5 <- gridExtra::grid.arrange(Fig5_transfer_trust, Fig5_transfer_risk, nrow = 2)
ggsave(file="Figures/Fig5_transfer_RF.tiff", Fig5)
# Figure 6 Transfer by OT Sensitivity ----------------------------------------------OK
Fig6_transfer_trust<-
ggplot(data_oxt, aes(x = oxt_sensitivity, y = transfer_ave)) +
geom_boxplot()+
geom_jitter(, width=0.25)+
theme_classic()+
labs(title="(a) Trust experiment", y="Transfer (MU)", x="Group")+
geom_signif(stat = "identity",
data = data.frame(x = c(1.2, 2.0),
xend = c(2.8, 2.8),
y = c(14, 12.5),
annotation = c("t = 1.55, p = 0.06", "t = 0.36, p =0.36")),
aes(x = x, xend = xend, y = y, yend = y, annotation = annotation),
col = "black")
Fig6_transfer_risk<-
ggplot(data_oxt, aes(x = oxt_sensitivity, y = transfer_risk_ave)) +
geom_boxplot()+
geom_jitter(, width=0.25)+
theme_classic()+
labs(title="(b) Risk experiment", y="Transfer (MU)", x="Group")+
geom_signif(stat = "identity",
data = data.frame(x = c(1.2, 2.0),
xend = c(2.8, 2.8),
y = c(14, 12.5),
annotation = c("t = 0.13, p = 0.45", "t = 0.82, p =0.21")),
aes(x = x, xend = xend, y = y, yend = y, annotation = annotation),
col = "black")
Fig6 <- gridExtra::grid.arrange(Fig6_transfer_trust, Fig6_transfer_risk, nrow = 1)
ggsave(file="Figures/Fig6_transfer_OTSensitivity.tiff", Fig6)
# Figure 7 Oxtocin Time Course ----------------------------------------------OK
Fig7_before_ad<-
ggplot(data_oxt, aes(x = oxt, y = oxytocin1_log)) +
geom_boxplot()+
geom_jitter(, width=0.25)+
scale_y_continuous(limits = c(2, 8))+
theme_classic()+
labs(title="Before administration", subtitle="",y="Logarithmic oxytocin level (pg/ml)", x="Group")+
geom_signif(stat = "identity",
data = data.frame(x = c(1.2),
xend = c(1.8),
y = c(8),
annotation = c("t = 1.17, p = 0.12")),
aes(x = x, xend = xend, y = y, yend = y, annotation = annotation),
col = "black")
Fig7_before_tg<-
ggplot(data_oxt, aes(x = oxt, y = oxytocin2_log)) +
geom_boxplot()+
geom_jitter(, width=0.25)+
scale_y_continuous(limits = c(2, 8))+
theme_classic()+
labs(title="Before TG", subtitle="60 minutes after administration", y="Logarithmic oxytocin level (pg/ml)", x="Group")+
geom_signif(stat = "identity",
data = data.frame(x = c(1.2),
xend = c(1.8),
y = c(8),
annotation = c("t = 2.27, p = 0.02")),
aes(x = x, xend = xend, y = y, yend = y, annotation = annotation),
col = "black")
Fig7_after_rg<-
ggplot(data_oxt, aes(x = oxt, y = oxytocin3_log)) +
geom_boxplot()+
geom_jitter(, width=0.25)+
scale_y_continuous(limits = c(2, 8))+
theme_classic()+
labs(title="After RE", subtitle="72 minutes after administration", y="Logarithmic oxytocin level (pg/ml)", x="Group")+
geom_signif(stat = "identity",
data = data.frame(x = c(1.2),
xend = c(1.8),
y = c(8),
annotation = c("t = 8.90, p = 0.00")),
aes(x = x, xend = xend, y = y, yend = y, annotation = annotation),
col = "black")
Fig7 <- gridExtra::grid.arrange(Fig7_before_ad, Fig7_before_tg, Fig7_after_rg, nrow = 1)
ggsave(file="Figures/Fig7_OT_courese.tiff", Fig7)
# Figure 8 Oxytocin Time Course by AQ ----------------------------------------------推定結果の記載なし
data_oxt_aq_ot <- read_dta("Data/Data_aq_ot.dta")
ggplot(data_oxt_aq_ot, aes(x = group, y = lnoxt_diff31)) +
geom_boxplot()+
geom_jitter(, width=0.25)+
scale_x_discrete(labels=c(expression(Oxytocin: AQ<26), expression(Oxytocin: AQ>=26), expression(Placebo: AQ<26), expression(Placebo: AQ>=26)))+
labs(title="Administration-induced increases in the concentrations of oxytocin",
subtitle="After RE (72 minutes) - Before administration",
y="Logarithmic oxytocin level (pg/ml)",
x="Group",
labeller = label_parsed)+
theme_classic()+
ggsave(file="Figures/Fig8_OTcourse_AQ.tiff")
|
d02923ed9b83149ae291b231198d382db8d458b5
|
aca57b591b2125b715f2805bcfab9248c1962918
|
/man/dataVerlaat.Rd
|
48c67db05366539212644f1eb3de644503c98f19
|
[] |
no_license
|
markvdwiel/GRridge
|
2e673f4140d67373ff4f8c3037a4cc7e67373a07
|
ef706afe241d95223ad473d1716e33a45dd73307
|
refs/heads/master
| 2021-01-17T17:52:22.203747
| 2019-04-16T10:23:05
| 2019-04-16T10:23:05
| 56,672,534
| 3
| 3
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,331
|
rd
|
dataVerlaat.Rd
|
\name{dataVerlaat}
\alias{dataVerlaat}
\alias{datcenVerlaat}
\alias{CpGann}
\alias{diffmeanFarkas}
\alias{respVerlaat}
\alias{pvalFarkas}
\docType{data}
\title{
Contains 5 R-objects, including the data and the binary response
}
\description{
The five objects are:
datcenVerlaat: methylation data for cervix samples (arcsine-transformed beta values); respVerlaat: binary response;
diffmeanFarkas: effect size external study, Cases minus Controls; pvalFarkas: p-values from external study;
and CpGann: annotation of probes according to location
}
\usage{data(dataVerlaat)}
\format{
The formats are:
datcenVerlaat: data frame [1:9691,1:44];
respVerlaat: numeric [1:44], 0 = Normal, 1 = Precursor;
diffmeanFarkas: numeric [1:44] ;
pvalFarkas: numeric [1:44];
CpGann: Factor w/ 6 levels "CpG-Island", "North-Shelf", "South-Shelf", "North-Shore", "South-Shore", "Distant"
}
\details{
This data is used for illustration in the statistical paper below.
}
\value{
Five R objects (see description)
}
\references{
Mark van de Wiel, Tonje Lien, Wina Verlaat, Wessel van Wieringen, Saskia Wilting. (2016).
Better prediction by use of co-data: adaptive group-regularized ridge regression.
Statistics in Medicine, 35(3), 368-81.
}
\examples{
data(dataVerlaat)
}
\keyword{datasets}
|
a93142a37020d25c3a3eb3396a30a8d3e3bb4e70
|
747d15b4f5e63734c3f8d1f2c3d1ae4884b77edd
|
/plot2.R
|
de9555584e5b2caa424d32f26d4d97a8b417195b
|
[] |
no_license
|
mbmiola/ExData_Plotting1
|
95959b40144024f7c386f3dc989dcf64f798cf63
|
23ca6d1a6d9bcf62ef73122c6c8785adbec73df3
|
refs/heads/master
| 2021-01-22T09:26:31.468827
| 2014-09-02T18:30:28
| 2014-09-02T18:30:28
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 181
|
r
|
plot2.R
|
source("load.R")
##Plot 2
png("plot2.png",width=480,height=480)
plot(dataset$Global_active_power~dataset$Datetime,type="l",ylab="Global Active Power (kilowatts)",xlab="")
dev.off()
|
45741da654c3d3da8d8e78ffd38805e3bb0ee704
|
6d490e7ebd7df0c9aded876b0056662bf798f0ec
|
/tests/testthat.R
|
c237bbb2e6e8a1826bf5c6649fd6e2521694666c
|
[
"MIT"
] |
permissive
|
seanpm2001/TrendMiner_TrendMiner
|
4a82a6e78de0dfe59fd968fd6071414d40a3bd84
|
ed986c3ce8a0a9d42deacfeebf851b5b5a51aa8c
|
refs/heads/master
| 2023-08-08T08:06:46.641185
| 2020-01-29T21:32:10
| 2020-01-29T21:32:10
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 64
|
r
|
testthat.R
|
library(testthat)
library(trendminer)
test_check("trendminer")
|
7c0adeeb00a3c21c8cb952dd63e14d88983e60b6
|
2adbca9f422aa7ce8f6da6066d45f2d636f2691f
|
/reactive/ui.R
|
731a2ec8695dd4d2af32e237f93fbca9c0d5f618
|
[] |
no_license
|
brianstamper/ShinyApps
|
ba030f5200c47cc01b806ef45861cbb6187d6cdb
|
42efc91a483cf2cf8db03ac88239ebdcadc0143e
|
refs/heads/master
| 2021-01-10T12:55:50.394926
| 2016-03-23T16:59:16
| 2016-03-23T16:59:16
| 50,589,432
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 567
|
r
|
ui.R
|
library(shiny)
shinyUI(fluidPage(
sidebarLayout(
sidebarPanel(
sliderInput("sliderA",
"A:",
min = -50,
max = 50,
value = a_new),
sliderInput("sliderB",
"B:",
min = -50,
max = 50,
value = b_new),
sliderInput("sliderC",
"C:",
min = -50,
max = 50,
value = c_new)
),
mainPanel(
tableOutput("mytable")
)
)
))
|
3a7fe16d98e9caf3d6f86b5a857ee088607931dd
|
8c01a0252181ce53a94e0cc69fc48b288c9c8f91
|
/ipl_script.R
|
5f4831240fa520f25f54f89a177d9259c26559ab
|
[] |
no_license
|
Sk2307/ipl_data
|
131080e41dfb638523a3019fb68ea25b5153320b
|
a5aa00d1d1548abf98983f412c11e9bdd8723439
|
refs/heads/master
| 2020-03-23T13:23:40.048630
| 2018-07-19T18:51:43
| 2018-07-19T18:51:43
| 141,615,646
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 19,952
|
r
|
ipl_script.R
|
# IPL Project
# Importing Libraries
library(ggplot2)
library(readr)
library(dplyr)
library(gridExtra)
library(treemap)
library(RColorBrewer)
library(tidyr)
library(radarchart)
# Importing Dataset
deliveries <- read.csv('deliveries.csv')
matches <- read.csv('matches.csv')
matches <- matches[matches$result == 'normal' , ]
# Closeness of matches when Team Batting First wins
matches[which(as.character(matches$team2)==as.character(matches$winner)),"loser"]<- matches[which(as.character(matches$team2)==as.character(matches$winner)),"team1"]
matches[which(as.character(matches$team1)==as.character(matches$winner)),"loser"]<- matches[which(as.character(matches$team1)==as.character(matches$winner)),"team2"]
matches1<-matches[matches$win_by_runs!=0,]
closeness<-function(x,y = "gold" ){
data1<-matches1[matches1$winner==x|matches1$loser==x,]
data1[data1$loser==x,"win_by_runs"]<- -data1[data1$loser==x,"win_by_runs"]
ggplot(data1,aes(1:nrow(data1),win_by_runs))+
geom_area(fill=y)+ggtitle(x)+
ylab("Runs")+
xlab("Matches")+
geom_ribbon(aes(ymin=-5, ymax=5),fill="red",alpha=0.4) +
geom_ribbon(aes(ymin=-15, ymax=15),fill="red",alpha=0.1) +
guides(fill=FALSE)+
scale_alpha(guide = 'none')+
coord_cartesian(ylim = c(-100, 100))
}
a<-closeness("Chennai Super Kings")
b<-closeness("Kolkata Knight Riders","purple")
c<-closeness("Sunrisers Hyderabad","orange")
d<-closeness("Mumbai Indians","blue2")
e<-closeness("Royal Challengers Bangalore","red3")
f<-closeness("Delhi Daredevils","firebrick3")
g<-closeness("Rajasthan Royals","blueviolet")
h<-closeness("Kings XI Punjab","salmon")
grid.arrange(a,b,c,e,d,f,g,h,ncol=2)
# Number of Matches played in different cities
ggplot(matches[which(!is.na(matches$city)),],aes(city,fill= city,rm.na=T)) +
geom_bar() +
theme(axis.text.x = element_text(angle = 90, hjust = 1))+
ylab("Number of Matches Played") +
guides(fill=FALSE)
# Number of Matches played in different Stadiums
ggplot(matches,aes(venue, rm.na=T)) +
geom_bar(fill="#0072B2") +
theme(axis.text.x = element_text(angle = 90, hjust = 1))+
ylab("Number of Matches Played")
# Advantage of winning toss
matches$toss_match <- ifelse(as.character(matches$toss_winner)==as.character(matches$winner),"Won","Lost")
ggplot(matches[which(!is.na(matches$toss_match)),],aes(toss_match, fill = toss_match))+
geom_bar()+
xlab("Toss") +
ylab("Number of matches won")+
ggtitle("How much of a advantage is winning the toss")
# Advantage of playing in homecity
Data<-matches[matches$season!="2009",]
Data$date<- as.Date(Data$date)
Data1<-Data[Data$date < as.Date("2014-04-16") | Data$date > as.Date("2014-04-30"),]
Data1$home_team[Data1$city=="Bangalore"]<- "Royal Challengers Bangalore"
Data1$home_team[Data1$city=="Chennai"]<- "Chennai Super Kings"
Data1$home_team[Data1$city=="Delhi"]<- "Delhi Daredevils"
Data1$home_team[Data1$city=="Chandigarh"]<- "Kings XI Punjab"
Data1$home_team[Data1$city=="Jaipur"]<- "Rajasthan Royals"
Data1$home_team[Data1$city=="Mumbai"]<- "Mumbai Indians"
Data1$home_team[Data1$city=="Kolkata"]<- "Kolkata Knight Riders"
Data1$home_team[Data1$city=="Kochi"]<- "Kochi Tuskers Kerala"
Data1$home_team[Data1$city=="Hyderabad" & Data1$season <=2012]<- "Deccan Chargers"
Data1$home_team[Data1$city=="Hyderabad" & Data1$season >2012]<- "Sunrisers Hyderabad"
Data1$home_team[Data1$city=="Ahmedabad"]<- "Rajasthan Royals"
Data1$home_team[Data1$city=="Dharamsala"]<- "Kings XI Punjab"
Data1$home_team[Data1$city=="Visakhapatnam" & Data1$season== 2015]<- "Sunrisers Hyderabad"
Data1$home_team[Data1$city=="Ranchi" & Data1$season== 2013]<- "Kolkata Knight Riders"
Data1$home_team[Data1$city=="Ranchi" & Data1$season > 2013]<- "Chennai Super Kings"
Data1$home_team[Data1$city=="Rajkot" ]<- "Gujarat Lions"
Data1$home_team[Data1$city=="Kanpur" ]<- "Gujarat Lions"
Data1$home_team[Data1$city=="Raipur" ]<- "Delhi Daredevils"
Data1$home_team[Data1$city=="Nagpur" ]<- "Deccan Chargers"
Data1$home_team[Data1$city=="Indore" ]<- "Kochi Tuskers Kerala"
Data1$home_team[Data1$city=="Pune" & Data1$season!= 2016]<- "Pune Warriors"
Data1$home_team[Data1$city=="Pune" & Data1$season== 2016]<- "Rising Pune Supergiants"
Data1<-Data1[ which(!is.na(Data1$home_team)),]
Data1$win_host <- ifelse(as.character(Data1$winner)==as.character(Data1$home_team),"Home","Away")
ggplot(Data1[which(!is.na(Data1$win_host)),],aes(win_host,fill= win_host))+
geom_bar()+
ggtitle("Is home advantage a real thing in IPL?")+
xlab("Team")+
ylab("Number of Matches won")+labs(aesthetic="Winner")
# Number of Matches played by each team
ggplot(as.data.frame(table(matches$team2) + table(matches$team1)),aes(reorder(Var1,-Freq),Freq,fill = Var1)) +
geom_bar(stat = "identity")+
theme(axis.text.x = element_text(angle = 90, hjust = 1))+
xlab("Teams")+
ylab("Number of Matches") +guides(fill=FALSE)
# Number of matches won by each team
ggplot(matches,aes(winner)) +
geom_bar(fill="#0072B2") +
theme(axis.text.x = element_text(angle = 90, hjust = 1))+
xlab("Team")+
ylab("Matches won")
# Win percentage of each team
matches_won<-as.data.frame(table(matches$winner))
colnames(matches_won)[2]<-"Won"
matches_played<-as.data.frame(table(matches$team2) + table(matches$team1))
colnames(matches_played)[2]<-"Played"
ggplot(left_join(matches_played,matches_won ),aes(reorder(Var1,-Won/Played),Won*100/Played,fill = Var1)) +
geom_bar(stat = "identity")+
theme(axis.text.x = element_text(angle = 90, hjust = 1))+
xlab("Team")+
ylab("Win Percentage") +
guides(fill=FALSE)+
coord_cartesian(ylim = c(0, 100))
# Margin of victories
# 1. Batting First
ggplot(matches[matches$win_by_runs!=0,],aes(id,win_by_runs,col= winner )) +
geom_point() +
ylab("Runs won by ") +
xlab("Matches won by team batting first")+
ggtitle("Margin of Victories(Won by team batting first)")+
scale_y_continuous(breaks=c(0,25,50,75,100))+
geom_hline(yintercept = mean(matches[matches$win_by_runs!=0,]$win_by_runs),col="blue")
# 2. Bowling First
ggplot(matches[matches$win_by_wickets!=0,],aes(id,win_by_wickets,col= winner )) +
geom_point() +
ylab("Wickets won by ") +
xlab("Matches won by team bowling first")+
ggtitle("Margin of Victories(Won by team bowling first)")+
scale_y_continuous(breaks=c(2,4,6,8,10))+
geom_hline(yintercept = mean(matches[matches$win_by_wickets!=0,]$win_by_wickets),col="blue")
# Top Batsmen
df<- deliveries %>%
group_by(batsman)%>%
summarise(runs=sum(batsman_runs)) %>%
arrange(desc(runs)) %>%
filter(runs > 3000)
df %>%
ggplot(aes(reorder(batsman,-runs),runs,fill=batsman)) +
geom_bar(stat = "identity") +
xlab("Batsman")+
ylab("Runs")+
theme(axis.text.x = element_text(angle = 90, hjust = 1))+ xlab("Player")+
ggtitle("Top Batsmen")+
guides(fill=F)
# Top Batsmen - - Non- Striker's end
deliveries %>%
group_by(non_striker) %>%
summarise(Runs= sum(total_runs)) %>%
top_n(n=10,wt= Runs) %>%
ggplot(aes(reorder(non_striker,-Runs),Runs,fill=non_striker)) +
geom_bar(stat="identity")+
xlab("Players") +
theme(axis.text.x = element_text(angle = 75, hjust = 1)) +
guides(fill=F) +
ggtitle("Top Batsmen - - Non-striker's end")
# Top Bowlers
df<-deliveries %>%
group_by(bowler) %>%
filter(player_dismissed!="") %>%
summarise(wickets= length(player_dismissed)) %>%
top_n(n=10,wt=wickets)
df %>%
ggplot(aes(reorder(bowler,-wickets),wickets,fill=bowler))+
geom_bar(stat = "identity") +
ylab("Wickets")+
theme(axis.text.x = element_text(angle = 90, hjust = 1))+
xlab("Player")+
ggtitle("Top Bowlers")+
guides(fill=F)
# Number of runs scored in each delivery of over
df<-deliveries %>%
group_by(ball) %>%
summarise(Runs = mean(total_runs)) %>%
filter(ball<7)
print(df)
df %>%
ggplot(aes(ball,Runs,fill=ball)) +
geom_bar(stat = "identity")+
scale_x_continuous(breaks = c(1,2,3,4,5,6))+
guides(fill=F) +
xlab("Ball") +
ylab("Runs scored") +
ggtitle("Number of runs scored in each delivery ofover")
# Runs scored in each over
df <- deliveries %>%
group_by(over) %>%
filter(is_super_over==0) %>%
summarise(Runs= mean(total_runs)*6)
df %>%
ggplot(aes(over,Runs,fill=over))+
geom_bar(stat = "identity")+
scale_x_continuous(breaks = 1:20)+
guides(fill=F) +
xlab("Over") +
ylab("Runs scored") +
ggtitle("Number of runs scored in each over of the innings")
# Wicket in each over
deliveries %>%
group_by(over) %>%
filter(is_super_over==0) %>%
summarise(Wickets= length(player_dismissed)) %>%
ggplot(aes(over,Wickets,fill=over))+
geom_bar(stat = "identity") +
scale_x_continuous(breaks = 1:20)+
guides(fill=F) +
xlab("Over") +
ylab("Total wickets taken") +
ggtitle("Wickets in each over of the innings")
# Batsmen with top Strike Rate
deliveries %>%
group_by(batsman) %>%
filter(length(total_runs)>500) %>%
summarise(strike_rate= mean(batsman_runs)*100) %>%
top_n(n=10,wt=strike_rate) %>%
ggplot(aes(reorder(batsman,-strike_rate),strike_rate,fill=batsman))+
geom_bar(stat="identity")+
xlab("Batsman") +
ylab("Strike Rate") +
ggtitle("Batsmen with top strike rate")+
theme(axis.text.x = element_text(angle = 75, hjust = 1)) +
guides(fill=F)
# Comparing all-time Top Players:
# 1. V Kohli
# 2. SK Raina
# 3. RG Sharma
# 4. G Gambhir
# How usefull have the Runs been Scored
colnames(matches)[1]<- "match_id"
deliveries %>%
left_join(matches) %>%
filter(batsman=="V Kohli"| batsman=="SK Raina" |batsman=="RG Sharma"|batsman=="G Gambhir") %>%
filter(result=="normal") %>%
group_by(batsman,match_id,batting_team,winner) %>%
summarise(Runs=sum(batsman_runs)) %>%
ggplot(aes(match_id,Runs))+geom_line() +
geom_smooth() +
coord_cartesian(ylim = c(0, 120))+
xlab("Matches") +
facet_grid(batsman ~ ifelse(as.character(batting_team)==as.character(winner),"Winning Cause","Losing Cause"))
# Maximum runs scored under which bowlers
Kohli<- deliveries %>%
group_by(batsman,bowler) %>%
filter(batsman=="V Kohli") %>%
summarise(runs=sum(batsman_runs)) %>%
top_n(n=50,wt=runs)
Rohit<- deliveries %>%
group_by(batsman,bowler) %>%
filter(batsman=="RG Sharma") %>%
summarise(runs=sum(batsman_runs)) %>%
top_n(n=50,wt=runs)
Gambhir<- deliveries %>%
group_by(batsman,bowler) %>%
filter(batsman=="G Gambhir") %>%
summarise(runs=sum(batsman_runs)) %>%
top_n(n=50,wt=runs)
Raina<- deliveries %>%
group_by(batsman,bowler) %>%
filter(batsman=="SK Raina") %>%
summarise(runs=sum(batsman_runs)) %>%
top_n(n=50,wt=runs)
treemap(Kohli,
index=c("batsman","bowler"),
vSize = "runs",
type="index",
palette = brewer.pal(7,"Reds"),
fontsize.title = 12,
fontfamily.title = "serif",
fontfamily.labels = "symbol",
title = "Runs by Virat Kohli against different bowlers",
fontface.labels = "bold",
border.col="#FFFFFF",
fontsize.legend = 0,bg.labels = "black",fontcolor.labels= "#FFFFFF",
aspRatio= 1.1
)
treemap(Rohit,
index=c("batsman","bowler"),
vSize = "runs",
type="index",
palette = brewer.pal(7,"Blues"),
fontsize.title = 12,
fontfamily.title = "serif",
fontfamily.labels = "symbol",
title = "Runs by RG Sharma against different bowlers",
fontface.labels = "bold",
border.col="#FFFFFF",
fontsize.legend = 0,bg.labels = "black",fontcolor.labels= "#FFFFFF",
aspRatio= 1.1
)
treemap(Gambhir,
index=c("batsman","bowler"),
vSize = "runs",
type="index",
palette = brewer.pal(4,"Purples"),
fontsize.title = 12,
fontfamily.title = "serif",
fontfamily.labels = "symbol",
title = "Runs by G Gambhir against different bowlers",
fontface.labels = "bold",
border.col="#FFFFFF",
fontsize.legend = 0,bg.labels = "black",fontcolor.labels= "#FFFFFF",
aspRatio= 1.1
)
treemap(Raina,
index=c("batsman","bowler"),
vSize = "runs",
type="index",
palette = brewer.pal(3,"YlOrBr"),
fontsize.title = 12,
fontfamily.title = "serif",
fontfamily.labels = "symbol",
title = "Runs by SK Raina against different bowlers",
fontface.labels = "bold",
border.col="#FFFFFF",
fontsize.legend = 0,
bg.labels = "black",fontcolor.labels= "#FFFFFF",
aspRatio= 1.1
)
# Maximum runs scored against which teams
df<-deliveries %>%
filter(batsman=="V Kohli"| batsman=="SK Raina" |batsman=="RG Sharma"|batsman=="G Gambhir") %>%
group_by(batsman,bowling_team) %>%
summarise(runs = sum(batsman_runs)) %>%
filter(runs >100)
treemap(df,
index=c("batsman","bowling_team"),
vSize = "runs",
vColor = "bowling_team",
type="categorical",
palette = brewer.pal(12,"Set3"),
fontsize.title = 15,
fontfamily.title = "serif",
fontfamily.labels = "symbol",
title = "Runs against diff teams",
aspRatio = 1,
border.col="#FFFFFF",bg.labels = "#FFFFFF" ,fontcolor.labels= "black",fontsize.legend = 0
)
# With which non-striker have top players scored
df<-deliveries %>%
filter(batsman=="V Kohli"| batsman=="SK Raina" |batsman=="RG Sharma"|batsman=="G Gambhir") %>%
group_by(batsman,non_striker) %>%
summarise(runs = sum(batsman_runs)) %>%
filter(runs >100)
treemap(df,
index=c("batsman","non_striker"),
vSize = "runs",
vColor = "batsman",
type="categorical",
palette = brewer.pal(6,"Set1"),
fontsize.title = 15,
fontfamily.title = "serif",
fontfamily.labels = "italic",
title = "Runs with different players at the other end ",
aspRatio = 1,
border.col="#FFFFFF",bg.labels = "black" ,fontcolor.labels= "#FFFFFF",fontsize.legend = 0
)
# dismissal types
df<-deliveries %>%
filter(player_dismissed=="V Kohli"| player_dismissed=="SK Raina" |player_dismissed=="RG Sharma"|player_dismissed=="G Gambhir") %>%
group_by(player_dismissed,dismissal_kind) %>%
summarise(type= length(dismissal_kind))
treemap(df,
index=c("player_dismissed","dismissal_kind"),
vSize = "type",
vColor = "dismissal_kind",
type="categorical",
palette = brewer.pal(6,"Set2"),
fontsize.title = 15,
fontfamily.title = "serif",
fontfamily.labels = "italic",
title = "Type of Dismissals ",
aspRatio = 1,
border.col="#FFFFFF",bg.labels = "black" ,fontcolor.labels= "#FFFFFF",fontsize.legend = 0
)
# Strike rate in different stages of game
deliveries %>%
filter(batsman=="V Kohli"| batsman=="SK Raina" |batsman=="RG Sharma"|batsman=="G Gambhir") %>%
group_by(batsman,over) %>%
summarise(strike= mean(batsman_runs)*100) %>%
ggplot(aes(over,strike, col=batsman)) +
geom_line(size=2) +
ylab("Strike Rate") +
ggtitle("Strike rate in different stages of the game ") +
scale_x_continuous(breaks = 1:20)
# Season wise comparison - Runs
deliveries %>%
left_join(matches) %>%
filter(batsman=="V Kohli"| batsman=="SK Raina" |batsman=="RG Sharma"|batsman=="G Gambhir") %>%
group_by(batsman,season) %>%
summarise(runs = sum(batsman_runs)) %>%
ggplot(aes(season,runs, col=batsman)) +
geom_line(size= 2) +
ggtitle("Season wise comparision(Runs)") +
scale_x_continuous(breaks = 2008:2016)
# Season wise comparison - Boundaries
deliveries %>%
left_join(matches) %>%
filter(batsman=="V Kohli"| batsman=="SK Raina" |batsman=="RG Sharma"|batsman=="G Gambhir") %>%
filter(batsman_runs==4|batsman_runs==6) %>%
group_by(batsman,season) %>%
summarise(boundaries= length(batsman_runs)) %>%
ggplot(aes(season,boundaries, col=batsman)) +
geom_line(size= 2) +
ggtitle("Season wise comparision(Boundaries)") +
scale_x_continuous(breaks = 2008:2016)
# Distribution of runs
deliveries %>%
left_join(matches) %>%
filter(batsman=="V Kohli"| batsman=="SK Raina" |batsman=="RG Sharma"|batsman=="G Gambhir") %>%
group_by(batsman,batsman_runs) %>%
summarise(freq=length(batsman_runs)) %>%
filter(batsman_runs != 5) %>%
ggplot(aes(batsman,freq,fill=as.factor(batsman_runs))) +
geom_bar(stat = "identity",position= "dodge") +
coord_flip() +
ylab("Frequency") +
xlab(" Batsman") +
labs(fill="Type of runs scored")
# Innings Progression
deliveries %>%
filter(batsman=="V Kohli") %>%
group_by(match_id) %>%
mutate(cum_run= cumsum(batsman_runs),cum_ball=1:length(match_id)) %>%
ggplot(aes(cum_ball,cum_run,col= as.factor(batsman_runs))) +
geom_point() +
labs(col="Type of Runs") +
xlab("Balls") +
ylab("Runs")+
ggtitle("Innings Progression- Virat Kohli") + coord_cartesian(ylim = c(0, 120))
deliveries %>%
filter(batsman=="G Gambhir") %>%
group_by(match_id) %>%
mutate(cum_run= cumsum(batsman_runs),cum_ball=1:length(match_id)) %>%
ggplot(aes(cum_ball,cum_run,col= as.factor(batsman_runs))) +
geom_point() +
labs(col="Type of Runs") +
xlab("Balls") +
ylab("Runs")+
ggtitle("Innings Progression- G Gambhir")+ coord_cartesian(ylim = c(0, 120))
deliveries %>%
filter(batsman=="RG Sharma") %>%
group_by(match_id) %>%
mutate(cum_run= cumsum(batsman_runs),cum_ball=1:length(match_id)) %>%
ggplot(aes(cum_ball,cum_run,col= as.factor(batsman_runs))) +
geom_point() +
labs(col="Type of Runs") +
xlab("Balls") +
ylab("Runs")+
ggtitle("Innings Progression- RG Sharma")+ coord_cartesian(ylim = c(0, 120))
deliveries %>%
filter( batsman=="SK Raina" ) %>%
group_by(match_id) %>%
mutate(cum_run= cumsum(batsman_runs),cum_ball=1:length(match_id)) %>%
ggplot(aes(cum_ball,cum_run,col= as.factor(batsman_runs))) +
geom_point() +
labs(col="Type of Runs") +
xlab("Balls") +
ylab("Runs")+
ggtitle("Innings Progression- SK Raina")+
coord_cartesian(ylim = c(0, 120))
# Inning Progression vs Match Result
colnames(matches)[1]<- "match_id"
deliveries %>%
left_join(matches) %>%
filter(batsman=="V Kohli"| batsman=="SK Raina" |batsman=="RG Sharma"|batsman=="G Gambhir") %>%
group_by(match_id) %>%
mutate(cum_run= cumsum(batsman_runs),cum_ball=1:length(match_id)) %>%
ggplot(aes(cum_ball,cum_run,col=ifelse(as.character(batting_team)==as.character(winner),"Winning Cause","Losing Cause"))) +
geom_point() +
facet_wrap(~batsman,ncol=2) +
labs(col="Type of Runs") +
xlab("Balls") +
ylab("Runs")+
ggtitle(" Relating Innings Progression with Result of the match")
# Best overall players
deliveries %>%
group_by(bowler) %>%
filter(length(ball) > 500) %>%
summarise(Economy= mean(total_runs)*6, Wickets = length(which(player_dismissed!="")) ) %>%
ggplot(aes(Wickets,Economy,label=bowler,alpha= Wickets/Economy)) +
geom_text(color="black") +
theme_minimal(base_family = "Ubuntu Condensed")+
theme(legend.position = "none",
panel.grid.major.x = element_blank(),
panel.grid.minor.x = element_blank(),
panel.grid.minor.y = element_blank(),
panel.grid.major.y = element_blank(),
plot.background = element_rect(fill = "#EFF2F4"),
axis.text = element_text(size = 15),
plot.title = element_text(size=16)) +
ggtitle("Economy vs Wickets!",subtitle = "Min balls bowled : 500") +
coord_cartesian(xlim = c(25, 170))
|
e5e1a733b8da5c36eaab62e0226f1173f958337a
|
b2f61fde194bfcb362b2266da124138efd27d867
|
/code/dcnf-ankit-optimized/Results/QBFLIB-2018/E1/Database/Jordan-Kaiser/reduction-finding-full-set-params-k1c3n4/query55_query42_1344/query55_query42_1344.R
|
e62881f84b286769ed8424a92cc0c4846e39f69c
|
[] |
no_license
|
arey0pushpa/dcnf-autarky
|
e95fddba85c035e8b229f5fe9ac540b692a4d5c0
|
a6c9a52236af11d7f7e165a4b25b32c538da1c98
|
refs/heads/master
| 2021-06-09T00:56:32.937250
| 2021-02-19T15:15:23
| 2021-02-19T15:15:23
| 136,440,042
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 71
|
r
|
query55_query42_1344.R
|
0f7d8610a87492ea64e73add5c0bca86 query55_query42_1344.qdimacs 1146 2951
|
96ecddcb211a55df7091d34aceed4060ba01e0c4
|
d2a90abc19ee4e17ff1fd77647fc5999b9e1bb40
|
/dentalAffinities/R/D2.R
|
f061c4363640420f5d7eec857a2d1302f72d0948
|
[] |
no_license
|
pbiecek/dentalAffinities
|
6bf31768eab4568cd832876f4e369a85ea1c7a8a
|
950bc4a9282c7033e48d40a471713fc1a1b7a3c1
|
refs/heads/master
| 2021-06-12T09:08:57.810635
| 2017-02-20T02:59:47
| 2017-02-20T02:59:47
| 81,302,713
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,065
|
r
|
D2.R
|
#' Calcuate Mahalanobis D2 Measures
#'
#' Based on Lyle W. Konigsberg scripts (tdistR.zip) http://lylek.ucoz.org/index.html
#' with some additional cleaning and regularisation
#'
#' @param binary_trait_data a data frame with binary data
#' @param deltamin will replace any negative distance
#'
#' @export
calculateD2 <- function(binary_trait_data, deltamin= 0.01) {
# remove columns with wrong data (only NA or 1)
idx <- c(1:3,which(apply(binary_trait_data[,-(1:3)], 2, function(x) length(unique(na.omit(x)))) > 1) + 3)
binary_trait_data <- binary_trait_data[,idx]
# stop
colnames(binary_trait_data)[1:3] <- c("id", "site", "sex")
X <- binary_trait_data[,-(1:3)]
binary_trait_data <- binary_trait_data[!is.na(binary_trait_data$site),]
tmp <- dentalAffinities::get_Mn_Mp(binary_trait_data)
Sites <- tmp[[1]][1]
Mn <- tmp[[1]][-1]
Mp <- tmp[[2]][-1]
# remove traits with 0 observations
idx <- which(!apply(Mn == 0, 2, any))
X <- X[,idx]
Mn <- Mn[,idx]
Mp <- Mp[,idx]
n0 = Mn*Mp
n1 = Mn*(1-Mp)
# correction for 0
n0[n1 == 0] = n0[n1 == 0] - 0.5
n1[n1 == 0] = .5
n1[n0 == 0] = n1[n0 == 0] - 0.5
n0[n0 == 0] = .5
# calculate z
z <- apply(n1/(n0+n1), 1:2, qnorm)
rownames(z) = Sites$site
N.sites = nrow(Sites)
N.traits = ncol(X)
n.cases = nrow(X)
# here calculate R
R = diag(N.traits)
N.R = diag(N.traits)
n.unique=N.traits*(N.traits-1)/2
for(k in 1:N.sites)
{
icount = 0
sto = X[binary_trait_data[,2] == Sites$site[k],]
for(i in 1:(N.traits-1)){
for(j in (i+1):N.traits){
icount=icount+1
trait.ij=as.vector(table(sto[,c(j,i)]))
if(sum(trait.ij==0)>=2 | length(trait.ij)<4){
r=0
calc.please=F
} else{
calc.please=T
KDELTA=1
DELTA=0
if(trait.ij[1]==0 | trait.ij[4]==0) KDELTA=2
if(trait.ij[2]==0 | trait.ij[3]==0) KDELTA=KDELTA+2
if(KDELTA==2) DELTA=.5
if(KDELTA==3) DELTA=-.5
if(trait.ij[1]==0 & trait.ij[4]==0){
r=-1
calc.please=F
}
if(trait.ij[2]==0 & trait.ij[3]==0){
r=1
calc.please=F
}
trait.ij=trait.ij+DELTA*c(1,-1,-1,1)
}
if(calc.please==T) r = psych::tetrachoric(trait.ij,correct=F)$rho
N.cell = sum(trait.ij)
N.R[i,j] = N.R[i,j] + N.cell
N.R[j,i] = N.R[j,i] + N.cell
R[i,j] = R[i,j] + r * N.cell
R[j,i] = R[j,i] + r * N.cell
}
}
}
R = R/N.R
rownames(R) = colnames(X)
colnames(R) = colnames(X)
# stop calculations of R
I = diag(N.sites)
o = rep(1,N.sites)
J = o %*% t(o)
w = o/sum(o)
Delta = (I - o %*% t(w)) %*% z
Cp = Delta %*% solve(R) %*% t(Delta)
D2 = (Cp*I) %*% J + J %*% (Cp*I) - 2*Cp
# replace negative values
D2[D2 <= 0] <- deltamin
rownames(D2) = Sites$site
colnames(D2) = Sites$site
list(MMDMatrix = D2, SDMatrix = NULL, SigMatrix = NULL)
}
|
813c80dbe431e59d98d5761824bc07d8d6f3ceff
|
681ea8b254221dd5c89b70c22f0db91b03befc32
|
/explore/03-prep-reported-diversions.R
|
17f30f5025ca698f48ad1abc37ce4beb7fc29a70
|
[
"MIT"
] |
permissive
|
dhesswrce/dwr-wasdet
|
481f123e2da7b169c4debef4f6a654e3b780ad5a
|
03f16732e7967b472279f581247e0c66805cd69d
|
refs/heads/main
| 2023-05-01T05:50:42.628528
| 2021-05-12T21:13:24
| 2021-05-12T21:13:24
| 360,266,689
| 0
| 0
|
MIT
| 2021-04-21T18:21:51
| 2021-04-21T18:21:50
| null |
UTF-8
|
R
| false
| false
| 2,502
|
r
|
03-prep-reported-diversions.R
|
## Load library packages.
if(!("package:tidyr" %in% search())) {
suppressMessages(library(tidyr))
}
if(!("package:dplyr" %in% search())) {
suppressMessages(library(dplyr))
}
if(!("package:readr" %in% search())) {
suppressMessages(library(readr))
}
if(!("package:lubridate" %in% search())) {
suppressMessages(library(lubridate))
}
## Initialization. ----
# Switches.
use_local_file <- TRUE
save_data_gaps <- TRUE
# Load wr_info.Rdata.
load("./output/wr_info.RData")
## Load and Process reported diversions flat file. ----
## Load directly from jasperreports when intranet access available.
if(use_local_file) {
file_loc <- "./dwr-flat-files/water_use_report_20201020.zip"
} else {
file_loc <- "http://jasperreports/EwrimsFlatFile/water_use_report.csv"
}
diversions_raw <- readr::read_csv(file_loc)
diversions <- diversions_raw %>%
dplyr::select(wr_id = APPL_ID,
rep_year = YEAR,
rep_month = MONTH,
amount = AMOUNT,
diversion_type = DIVERSION_TYPE)
# Convert numeric months to names
diversions <- diversions %>%
dplyr::mutate(rep_month = ordered(month.abb[rep_month], levels = month.abb))
# Limit Reported Diversion scenarios to prior 10 years.
diversions <- diversions %>%
dplyr::filter(diversion_type != "USE",
rep_year %in% seq((as.numeric(format(Sys.Date(), "%Y"))-10),
length.out = 10))
# For each water right id, aggregate diversions by year and month.
diversions <- diversions %>%
dplyr::group_by(wr_id, rep_year, rep_month) %>%
dplyr::summarise(diverted = sum(amount, na.rm = TRUE),
.groups = "drop")
# Add demand type distinguishers.
diversions <- diversions %>%
dplyr::mutate(demand_type = "Reported Diversions",
demand_scenario = paste0("Reported Diversions - ", rep_year))
# Join wr_info to diversions.
diversions <- wr_info %>%
dplyr::right_join(., diversions, by = "wr_id") %>%
arrange(demand_type,
demand_scenario,
priority,
wr_id,
rep_month)
# capture water rights that have no reported diversion data in eWRIMS.
never_reported <- wr_info %>%
dplyr::filter(!wr_id %in% diversions$wr_id)
readr::write_csv(never_reported, "./data-gaps/never_reported.csv")
# Rearrange diversions into list of tibbles by demand scenario.
diversions <- split(diversions,
f = diversions$demand_scenario)
## save diversions for testing other modules.
save(diversions, file = "./output/diversions.RData")
|
22392b5cfc404be7618b85636e6efcb9338eeae0
|
b2b18dc1c271042a2c48463361c7cf17da787f66
|
/man/Read.Data.Rd
|
7ed60b8b3bf99cb5bc4ca27d402dcbdd4a1d512d
|
[] |
no_license
|
vivianaayus/jrich
|
286ad9624872ba7c286af38ea8ddadd1be492bb1
|
14f93b4f4c4439d6e643b2dccd4e0486728a2362
|
refs/heads/master
| 2020-12-25T02:30:28.692004
| 2015-03-16T15:08:42
| 2015-03-16T15:08:42
| 32,331,073
| 0
| 0
| null | 2015-03-16T14:35:48
| 2015-03-16T14:35:48
| null |
UTF-8
|
R
| false
| false
| 279
|
rd
|
Read.Data.Rd
|
% Generated by roxygen2 (4.0.1): do not edit by hand
\name{Read.Data}
\alias{Read.Data}
\title{Read distributions as a csv with two columns species and area}
\usage{
Read.Data(data.File)
}
\description{
Read distributions as a csv with two columns species and area
}
|
ec500170ba40f5dc03ffbdf3c8be753430c6f8e0
|
9ec5fb4553164a88bace89c9efd6b847e57dcf71
|
/scripts/f test.R
|
66c056ae651763e56265ad5c96a4e1eaf52a2dac
|
[] |
no_license
|
doerlbh/eye-tracking-orientation-categorization
|
caed0432ad7ed6a783964f8aa4981021b896755c
|
64a0ed3781bec8d1a2263197c7701ee4f195de58
|
refs/heads/master
| 2020-06-19T23:55:32.059509
| 2019-04-03T02:02:23
| 2019-04-03T02:02:23
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,490
|
r
|
f test.R
|
# This is the first part to run, read stimuli files
# set up working directory for each participant
setwd("/Users/TBB/Desktop/eye-tracking data")
master <- read.csv("master.csv", header = TRUE, stringsAsFactors = FALSE)
colnames(master)
mas <- master[, c(3, 4, 5, 6, 7, 8, 10, 11, 12, 18, 19, 20, 21)]
## No 443 or 558 patch for every subject, even in original excel spreadsheet
## exclude them from here
mas <- mas[!(mas$Media_ID == 558 | mas$Media_ID == 443), ]
# assign value to blank lines in subject ESCR006 patch 183 and 64
test <- as.numeric(output[output[,"MEDIA_ID"] == "183" & output[ ,"subject"] == "ESCR006_all_gaze.csv", c("H", "V", "D", "0")])
mas[mas$Participant == "ESCR006" & mas$Media_ID == 183, c("H..", "V..", "D..", "Fixations..")] <- test
# split mas
ma <- split(mas, mas$Participant)
sapply(ma, function(x)sum(is.na(x$H..)))
# output cleaned dataset
write.csv(mas, file = "mas.csv")
# do manova analysis
res.mas <- aov(V..[mas$Orientation == 'H'] ~ as.factor(Level)[mas$Orientation == 'H'], data = mas)
summary(res.mas)
# return mean for H/V/D/F in Orientation, Level, Accuracy
a <- c('H..', 'V..', 'D..', 'Fixations..')
b <- c('Orientation', 'Level', 'Accuracy')
# level <- list()
# for (i in length(b))
# level[[i]] <- levels(as.factor(mas[, b[i]]))
m <- list()
for (j in length(a)){
m[[j]] <- NaN
for (i in length(b)){
m[[j]] <- unlist(tapply(mas[ ,a[j]], INDEX = mas[ ,b[i]], mean))
}
}
|
d7aff55ad1e18d78b4a5c3fd8a149fb7fa62ddac
|
ea18d64407ee0b44cadbe8f756ca078a9c9cc7e4
|
/RandomForest_script_Final_trial4.R
|
8e24dd9617d652cf243b89a5a563f290ed2d8226
|
[] |
no_license
|
A-Why-not-fork-repositories-Good-Luck/Automatic-Detection-of-Building-Damages-following-the-Beirut-Port-Explosion-using-Satellite-Data
|
a9159d512482ed240b1356da23552f4bedabc77a
|
4dcf51f049cc15ab0150042a7bebce380cdf20f4
|
refs/heads/main
| 2023-08-01T06:20:05.526326
| 2021-09-19T08:12:19
| 2021-09-19T08:12:19
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 14,787
|
r
|
RandomForest_script_Final_trial4.R
|
#############################################################################
# Code is adapted from:
# https://bitbucket.org/rsbiodiv/randomforestclassification/src/master/rf_classification.R
# The script reads an ESRI Shapefile (defined by the "shapefile" variable) with
# training polygons and then either select all pixels or randomly select a
# user-determined number of samples (defied by the "numsamps" variable) from
# each land cover type. A multilayer image that contains spectral, other
# continuous data or categorical data is also input (defined by the inImage
# variable). For each randomly selected sample the data values for that pixel
# are determined and these data are used to run the Random Forest model.
#
# After building the model the multilayer image is read, and up to three output
# layers (classImage, probImage, threshImage) can be selected for the output image.
# "classImage" classifies all of the pixels.
#
# "probImage" outputs the class probability of the class that got the most votes
# (i.e., the class that was selected for the classImage layer).
#
# "threshImage" is the same as "classImage" except all pixels with a class probability
# of the class that got the most votes below the "probThreshold" parameter are set to 0.
# This is useful to identify pixels with inter-class confusion.
#
# The image is written out (name and location is defined by the "outImage variable)
# using the GeoTIFF format. A variable importance plot is displayed to provide information
# about the influence of each variable. An error rate estimate and confusion matrix are also
# printed to provide information about classification accuracy.
#
# There is an option to assess the quality of the training data. The metric for thist
# is the “margin”. The margin of a training point is the proportion of votes for the correct
# class minus maximum proportion of votes for the other classes for that segment. Positive margin
# values represent correct classification, and vice versa. The margin data are written to a
# point ESRI Shapefile so they can be overlaid on the image and training polygons to assess which
# points need to be removed and relabeled in the training data and it can help determine which
# classes needs additional training segments. If this output is not needed you can enter two
# double or single-quotes (“” or '') for the variable outPointsFile.
#
# There is also an option to output a feature space plot using two bands of your choice.
# If a feature space plot is not needed then enter "0" for the variables xBand and/or yBand.
#
# Set the variables below in the "SET VARIABLES HERE" section of the script.
#
# This script was written by Ned Horning [horning@amnh.org]
# Support for writing and maintaining this script comes from The John D. and
# Catherine T. MacArthur Foundation and Google.org.
#
# This script is free software; you can redistribute it and/or modify it under the
# terms of the GNU General Public License as published by the Free Software Foundation
# either version 2 of the Licenase, or ( at your option ) any later version. *
#
#############################################################################
#Load libraries
install.packages("randomForest")
library(spatstat)
library(here)
library(sp)
library(rgeos)
library(maptools)
library(GISTools)
library(tmap)
library(sf)
library(geojson)
library(geojsonio)
library(tmaptools)
library(dplyr)
library(stringr)
library(readr)
library(rgdal)
library(tmap)
library(janitor)
library(ggplot2)
library(raster)
library(fpc)
library(dbscan)
library(tidyverse)
library(tidyr)
library(fs)
require(maptools)
require(sp)
require(randomForest)
require(raster)
require(rgdal)
#
cat("Set variables and start processing\n")
setwd('/Users/saram/Desktop/Dissertation/Data')
#
############################# SET VARIABLES HERE ###################################
# Name and path for the Shapefile (don't need the .shp extension)
shapefile <- 'training_poly_3.shp'
# Approximate number of training samples to be randomly selected for each land cover class
# If numsamps is set to "0" then all pixels in all of the polygons will be used as training samples
numsamps <- 800
# Name of the attribute that holds the integer land cover type identifyer
attName <- 'id'
# No-data value for the input image
nd <- -999
# Name and path for the input satellite image
inImage <-'MX_31Jul_5Aug_UN.tif'
# Name and path of the output GeoTiff image
outImageName <- 'rf_output_final_trial4.tif'
# Name and location of the output Shapefile point file that will be created. If this output
# is not needed you can enter two double or single-quotes (“” or '')
# Note that if this file exists the write will fail with the message "Creation of output file failed"
outMarginFile <- 'rf_margin_final_trial4.shp'
# Output classification layer without applying threshold (enter TRUE or FALSE)
classImage <- TRUE
# Output probability image layer (enter TRUE or FALSE)
probImage <- TRUE
# Output classification layer and set pixels with probability less than "probThreshold" to 0 (enter TRUE or FALSE)
threshImage <- TRUE
# Enter threshold probability in percent (values must be between 0 and 100) only used if threshImage=TRUE
probThreshold <- 75
# Layer number (band number) for the X and Y axis of the feature space plot.
# If you do not want to calculate a feature plot enter 0 as the layer number
xBand <- 1
yBand <- 2
#######################################################################################
#
# Start processing
startTime <- Sys.time()
cat("Start time", format(startTime),"\n")
# Read the Shapefile
vec <- readShapePoly(shapefile)
#
# Load the image then flag all no-data values (nd) so they are not processed
satImage <- stack(inImage)
for (b in 1:nlayers(satImage)) { NAvalue(satImage@layers[[b]]) <- nd }
#
# Create vector of unique land cover attribute values
allAtt <- slot(vec, "data")
tabAtt <-table(allAtt[[attName]])
uniqueAtt <-as.numeric(names(tabAtt))
# Check if there are data in uniqueAtt
if (is.na(uniqueAtt[1])) {
cat("\n*************************No attributes were found**************************** \n")
stop("Check the attName variable in the variable settings\n", call.=FALSE)
}
# If all pixels in a polygon are to be used process this block
if (numsamps == 0) {
# Create input data from a Shapefile using all training data
cat("Create training data using all pixels in training polygons\n")
predictors <- data.frame()
response <- numeric()
for (x in 1:length(uniqueAtt)) {
# Get the metadata for all polygons for a particular class (based on the uniqueAtt variable)
class_data<- vec[vec[[attName]]==uniqueAtt[x],]
# Extract and combine predictor and response variables for each polygon within a class
for (i in 1:dim(class_data)[1]) {
satValues <- raster::extract(satImage, class_data[i,])
satValues <- as.data.frame(do.call(rbind,satValues))
attributeVector <- rep.int(uniqueAtt[x],nrow(satValues))
predictors <- rbind(predictors, satValues)
response <- c(response, attributeVector)
}
}
trainvals <- cbind(response, predictors)
} else {
# Create input data from a Shapefile by sampling training data
cat("Create training data by sampling", numsamps, "pixels for each class\n")
for (x in 1:length(uniqueAtt)) {
# Get the metadata for all polygons for a particular class (based on the uniqueAtt variable)
class_data<- vec[vec[[attName]]==uniqueAtt[x],]
# Get the area of each polygon for a particular class
areas <- sapply(slot(class_data, "polygons"), slot, "area")
# Calculate the number of samples for each polygon based on the area in proportion to total area for a class
nsamps <- ceiling(numsamps*(areas/sum(areas)))
# Use random sampling to select training points (proportial based on area) from each polygon for a given class
for (i in 1:dim(class_data)[1]) {
xy_class <- spsample(class_data[i,], type="random", n=nsamps[i])
# Add coordinates to create a list of random points for all polygons
if (i == 1) cpts <- xy_class
else cpts <- rbind(cpts, xy_class)
}
# The number of points might not match numsamps exactly.
classpts <- cpts
if (x == 1) {
xy_allClasses<- classpts
} else {
xy_allClasses<- rbind(xy_allClasses, classpts)
}
}
# Get class number for each sample point for responce variable
response <- over(xy_allClasses, vec)[[attName]]
# Get pixel DNs from the image for each sample point
trainvals <- cbind(response, raster::extract(satImage, xy_allClasses))
}
# Test if feature space plot is needed
if (xBand != 0 & yBand != 0) {
#Plot feature space and samples
#
continue <- "c"
while (continue == "c") {
plotImage <- stack(satImage[[xBand]], satImage[[yBand]])
# Get pixel values from the image under each sample point and create a table with
# observed and predicted values
cat("Getting pixel values to create feature space plot\n\n")
featurePlotPoints <- sampleRegular(plotImage,100000 )
# Remove NA values from trainvals table created above
featurePlotPoints <- na.omit(featurePlotPoints)
minBand1 <- min(featurePlotPoints[,1])
maxBand1 <- max(featurePlotPoints[,1])
minBand2 <- min(featurePlotPoints[,2])
maxBand2 <- max(featurePlotPoints[,2])
rangeBand1 <- maxBand1 - minBand1 + 1
rangeBand2 <- maxBand2 - minBand2 + 1
xAxisLabel <- paste("Layer", xBand, sep=" ")
yAxisLabel <- paste("Layer", yBand, sep=" ")
plot(featurePlotPoints[,1], featurePlotPoints[,2], col="lightgrey", xlab=xAxisLabel, ylab=yAxisLabel)
uniqueValues <- unique(trainvals[,1])
for (v in 1:length(uniqueValues)) {
points(trainvals[which(trainvals[,1]==uniqueValues[v]), xBand+1], trainvals[which(trainvals[,1]==uniqueValues[v]), yBand+1], col=v, pch=20)
}
legend(minBand1, maxBand2, col=1:v, pch=20, title="Classes", legend=as.character(uniqueValues))
continue <- readline(prompt="Type n to stop, c to change feature space bands or any other key to continue with randome forests model creation and prediciton: \n\n")
if (substr(continue, 1,1) == "n") {
stop("Processing stopped at users request \n\n", call.=FALSE)
}
if (substr(continue, 1,1) == "c") {
xBand <- as.numeric(readline(prompt="Enter the band number for the x axis: \n"))
yBand <- as.numeric(readline(prompt="Enter the band number for the y axis: \n"))
}
}
}
# Remove NA values
trainvals <- na.omit(trainvals)
# Check to make sure Shapefile and input image are in the same projection
if (nrow(trainvals) == 0) {
cat("\n*************************No training data found**************************** \n")
stop("It is possible the projection of the Shapefile with training data and input image are different\nCheck projections and run again", call.=FALSE)
}
# Run Random Forest
cat("Calculating random forest object\n")
randfor <- randomForest(as.factor(response) ~., data=trainvals, importance=TRUE, na.action=na.omit)
# Start predictions
cat("Starting predictions\n")
# Calculate how many bands the output image should have
numBands <- classImage + probImage + threshImage
# Calculate the image block size for processing
bs <- blockSize(satImage)
# Create the output raster block
outImage <- brick(satImage, values=FALSE, nl=numBands)
outImage <- writeStart(outImage, filename=outImageName, progress='text', format='GTiff', datatype='INT1U', overwrite=TRUE)
# Loop though each of the image blocks to calculate the output layers selected in the variables section
for (i in 1:bs$n) {
cat("processing block", i, "of", bs$n, "\r")
imageBlock <- getValuesBlock(satImage, row=bs$row[i], nrows=bs$nrows[i])
predValues <- predict(randfor, imageBlock, type='response')
classValues <- as.numeric(levels(predValues))[predValues]
outMatrix <- matrix(nrow=nrow(imageBlock), ncol=0)
if (classImage) {
outMatrix <- cbind(outMatrix, classValues)
}
if (probImage || threshImage) {
predProbs <- as.data.frame(predict(randfor, imageBlock, type='prob'))
maxProb <- round(apply(predProbs, 1, max) * 100)
if (probImage) {
outMatrix <- cbind(outMatrix, maxProb)
}
if (threshImage) {
threshValues <- classValues
threshValues[which(maxProb <= probThreshold)] <- 0
outMatrix <- cbind(outMatrix,threshValues)
}
}
writeValues(outImage, outMatrix, bs$row[i])
}
# Stop writing and close the file
outImage <- writeStop(outImage)
# Plotting variable importance plot
varImpPlot(randfor)
# Print error rate and confusion matrix for this classification
confMatrix <- randfor$confusion
cat("#################################################################################\n")
cat("OOB error rate estimate\n", 1 - (sum(diag(confMatrix)) / sum(confMatrix[,1:ncol(confMatrix)-1])), "%\n\n", sep="")
cat("Confusion matrix\n")
print(randfor$confusion)
cat("\n")
if (outMarginFile != "") {
# Calculate margin (proportion of votes for correct class minus maximum proportion of votes for other classes)
marginData <- margin(randfor)
trainingAccuracy <- cbind(marginData[order(marginData)], trainvals[order(marginData),1])
# Add column names to attributes table
colnames(trainingAccuracy) <- c("margin", "classNum")
# Calculate X and Y coordinates for training data points
xyCoords <- xy_allClasses@coords
xyCoords <- xyCoords[order(marginData),]
# Create and write point Shapefile with margin information to help improve training data
pointVector <- SpatialPointsDataFrame(xyCoords, as.data.frame(trainingAccuracy), coords.nrs = numeric(0), proj4string = satImage@crs)
writeOGR(pointVector, outMarginFile, "layer", driver="ESRI Shapefile", check_exists=TRUE)
}
# Calculate processing time
timeDiff <- Sys.time() - startTime
cat("\nProcessing time", format(timeDiff), "\n")
randfor$err.rate
plot(randfor$err.rate)
install.packages("party")
library("party")
x <- ctree(Species ~ ., data=iris)
plot(x, type="simple")
install.packages("stabletree")
install.packages("stablelearner")
install.packages("vip")
#importance variable in barplot
library(vip)
# generate vip plot
vip::vip(randfor, horizontal = FALSE)
#After that, we plot the error rates with various number of trees.
plot(randfor)
attributes(randfor)
randfor
|
80826dc817b6b7e790a1b23822afeb5142554bbf
|
eb944c49fc400a35785673b8e5c2a396c54e020b
|
/Code/Session_7_cluster_stud.R
|
aaa2d6fa8e9b6c90d4b0eb3a729ca1727f65d20f
|
[] |
no_license
|
rbslandau/statistics_multi
|
290064b26e613b2f71fc47ae28fd599c5ec731b6
|
a6f0de239a6f763379f86cc611bd040af611cba0
|
refs/heads/master
| 2022-05-03T01:19:43.731965
| 2022-04-13T20:46:11
| 2022-04-13T20:46:11
| 103,508,951
| 17
| 13
| null | null | null | null |
UTF-8
|
R
| false
| false
| 15,522
|
r
|
Session_7_cluster_stud.R
|
###################################################################################
# Script for Session 7 of the course: Applied multivariate Statistics with R #
# by Ralf B. Schäfer, WS 2018/19 #
# Cluster analysis #
###################################################################################
# we first set a working directory i.e. a directory where we store all files
setwd("~/Gitprojects/Teaching/Statistics_multi/Code")
# you have to set a path to a working directory on your local machine here
# to simplify the identification of your path, you can use the following function
file.choose()
# and select a file in your desired working directory. Subsequently, copy the path
# without (!!!) the file reference into the setwd function
library(vegan)
werra_sp <- read.table("https://raw.githubusercontent.com/rbslandau/statistics_multi/master/Data/River_sp.csv", sep = ';', header = TRUE)
werra_env <- read.table("https://raw.githubusercontent.com/rbslandau/statistics_multi/master/Data/River_env.csv", sep = ';', header=TRUE)
# we load some species data from governmental stream monitoring and
# related grouping information
###########################
# Data preparation #
# Reduce weight of #
# dominant taxa #
###########################
### Transformation required?
# check species maxima
apply(werra_sp[ ,-1], 2, max)
# remove species that are absent in all sites
# see script for RDA
# for rationale of following lines
werra_pa <- decostand(werra_sp[ ,-1], "pa")
# calculate sum per species
werra_sum <- apply(werra_pa, 2, sum)
sort(werra_sum)
# remove species that occur in less than 2 sites
werra_fin <- werra_sp[ ,-1][ , ! werra_sum <3]
range(apply(werra_fin, 2, max))
# maxima differ strongly
range(apply(werra_fin^0.5, 2, max))
# also after square root transformation
range(apply(werra_fin^0.25, 2, max))
# double square root leads to stronger downweighing of abundant taxa
# is required in this case
werra_sp_t <- werra_fin^0.25
### Create Bray-Curtis distance matrix
werra_dist <- vegdist(werra_sp_t)
# if no distance measure is specified, vegdist uses the Bray Curtis distance
# Be careful, the dist function uses Euclidean as default!
# Note that the distance measure influences the cluster result
werra_clus <- hclust(werra_dist, method="average")
# cluster analysis with average linkage -
# see lecture and R help for other methods available
# we also use Wards method for comparison
werra_clus2 <- hclust(werra_dist, method="ward.D2")
############################################
# Check preservation of initial distances #
############################################
# we can check the relationship between the cophenetic matrix
# and the initial distance matrix to see how the distances
# between the initial objects are preserved
# distance matrix before clustering
werra_dist
# distance matrix after clustering for average linkage
cophenetic(werra_clus)
# correlation of matrices for average linkage
cor(cophenetic(werra_clus), werra_dist)
# correlation for wards method
cor(cophenetic(werra_clus2), werra_dist)
# correlation is lower
# Compute Stress1 (analogous to NMDS) for average linkage
sqrt(sum((werra_dist - cophenetic(werra_clus))^2) / sum(werra_dist^2) )
# Compute Stress1 (analogous to NMDS) for Wards method
sqrt(sum((werra_dist - cophenetic(werra_clus2))^2) / sum(werra_dist^2) )
# Stress 1 much higher for Wards method
#############################
# Plot cluster dendrograms #
#############################
# plot results for both methods
par(mfrow= c(1,2), cex = 1.5)
plot(werra_clus, ann = TRUE)
plot(werra_clus2, ann = TRUE)
# group 2-4 is placed in different cluster
# for Wards method
# check changes more elegantly
library(dendextend)
# converting to dendogram objects as dendextend works with dendogram objects
werr_clus_dend <- as.dendrogram(werra_clus)
werr_clus_dend2 <- as.dendrogram(werra_clus2)
par(mfrow= c(1,1), cex = 1.5)
tanglegram(werr_clus_dend, werr_clus_dend2, sort = TRUE, margin_outer= 3.5, main_left = "Average linkage", main_right = "Wards method", edge.lwd = 2)
# dashed lines indicate different clustering
# coloured lines connect same objects
###################################
# Using cluster validity indices #
###################################
# how many groups are optimal?
# internal validation
# we have no prior information on group/cluster membership
# we compare the results for 2 to 4 groups
# for the average linkage method
# first we visualise the groups
par(mfrow= c(1,1), cex=1.5)
plot(werra_clus, ann=TRUE)
rect.hclust(werra_clus, k = 2, border = 'steelblue')
rect.hclust(werra_clus, k = 3, border = 'darkgreen')
rect.hclust(werra_clus, k = 4, border = 'orange')
# and extract the different grouping vectors
cut_2 <- cutree(werra_clus, k = 2)
cut_3 <- cutree(werra_clus, k = 3)
cut_4 <- cutree(werra_clus, k = 4)
# The plclust function can be employed
# to assign the cluster group to the dendrogram.
# Check help of the function for further options
plot(werra_clus, labels = cut_2)
plot(werra_clus, labels = cut_4)
# the package factoextra also allows several options for nice dendrograms
library("factoextra")
fviz_dend(werra_clus)
# plot clusters for k = 4
fviz_dend(werra_clus, k = 4, # Cut in four groups
k_colors = c("#2E9FDF", "#00AFBB", "#E7B800", "#FC4E07"),
color_labels_by_k = TRUE, # color labels by groups
rect = TRUE, # Add rectangle around groups
rect_border = c("#2E9FDF", "#00AFBB", "#E7B800", "#FC4E07"),
rect_fill = TRUE, lower_rect = -0.05
)
# see Kassambara A. (2017) Practical guide to cluster analysis in R:
# unsupervised machine learning, Edition 1. STHDA.
# Page 84ff for more information on plot editing and different plot types
# such as phylogenetic plots
# we can also visualise cluster results using nmds
werra_mds <- metaMDS(werra_dist)
# note the limitations of NMDS regarding
# precise representation of distances
#
# Plot NMDS for cluster solution
par(cex=1.5)
ordiplot(werra_mds, type = "n")
points(werra_mds, pch = 16)
# next we add our cluster solution
ordispider(werra_mds, factor(cut_2), label = TRUE)
ordihull(werra_mds, factor(cut_2), lty = "dotted")
# check for 4 groups
ordiplot(werra_mds, type = "n")
points(werra_mds, pch = 16)
# next we add our cluster solution
ordispider(werra_mds, factor(cut_4), label = TRUE)
ordihull(werra_mds, factor(cut_4), lty = "dotted")
# 2 groups looks more convincing
# beside visual checking, several indices have been developed
# to evaluate the quality of the cluster solution
##### computation of internal validation indices ######
# now we calculate several indices for the different groups
library(fpc)
indi_2 <- cluster.stats(werra_dist, cut_2)
indi_3 <- cluster.stats(werra_dist, cut_3)
indi_4 <- cluster.stats(werra_dist, cut_4)
# the function calculates all indices automatically
# note that the R package clusterSim
# allows for the comparison of several
# choices for distance measures and cluster methods
# NbClust is another R package that provides many
# cluster validity indices. It also features automatic
# computation of indices for all clusters
# from minimum to maximum number of clusters defined
indi_2
# range of indices provided
# we restrict the comparison to a few indices
# Calinski and Harabasz index
indi_2$ch
indi_3$ch
indi_4$ch
# the higher the better
# the highest CH for 2 groups
# average silhouette width
indi_2$avg.silwidth
indi_3$avg.silwidth
indi_4$avg.silwidth
# highest avg. silhouette width for 2 groups
# avg. sildwidth closer to 0.2 (no cluster structure)
# than to 0.5 (reasonable cluster structure)
# calculate individual silhouette values s(x)
library(cluster)
sil <- silhouette(cut_2, werra_dist)
print(sil)
# shows that observation 8 seems misclassified
plot(sil)
# plot for individual s(x)
# GAP index
# for hierarchical clustering, we need to prepare
# a helper function that directly returns the cluster vector
hclusCut <- function(x, k, d.meas = "bray", clus.meth="average" )
list(cluster = cutree(hclust(vegdist(x, method=d.meas), method = clus.meth), k=k))
# GAP function
clusGap(werra_sp_t, hclusCut, K.max = 5, spaceH0="original")
# the SSQ are actually only for four clusters lower than from a
# reference distribution
# for visualisation of the indices, see also Kassambara 2017:
# chapters 12 to 14
##### computation of stability index ######
boot_hc1 <- clusterboot(werra_dist, distances=TRUE, clustermethod=disthclustCBI, method = "average", k = 2)
boot_hc2 <- clusterboot(werra_dist, distances=TRUE, clustermethod=disthclustCBI, method = "average", k = 4)
# run for k = 2 and k = 4 from above
print(boot_hc1)
# high stability for both clusters
# "dissolved" indicates no of times the bootstrapped cluster
# is smaller or equal than the value set for dissolve (default = 0.5)
# "recovered" indicates no of times the bootstrapped cluster
# is higher or equal than the value set for recover (default = 0.75)
# plot frequency distribution of bootstrap results
plot(boot_hc1)
print(boot_hc2)
# similar stability for 3 of the 4 clusters
# overall, 2 or 4 clusters could be justified based on the indices
# the data actually comes more or less from two groups:
# upstream and downstream of a salinity discharge point
# we can use this information to compute external validity
##### computation of external validation index: Rand index ######
extindi_2 <- cluster.stats(werra_dist, clustering = cut_2, alt.clustering=as.numeric(werra_env$Position))
extindi_2$corrected.rand
# reasonably high (in terms of Jaccard index)
cut_2
as.numeric(werra_env$Position)
# only 1 case misclassified (it does not matter that 1 and 2 are interchanged)
# however, the upstream/downstream position is not necessarily the "truth"
# the cluster solution can be regarded as a different truth
# in this sense, external validation is not required
extindi_4 <- cluster.stats(werra_dist, clustering = cut_4, alt.clustering=as.numeric(werra_env$Position))
extindi_4$corrected.rand
# index logically much lower, given that external data consists of 2 groups
# visual comparison of external data
# using nmds results from above
# set colors for up-/downstream
cols <- c("darkred", "steelblue")
# x11() on windows
quartz()
ordiplot(werra_mds, type = "n")
points(werra_mds, col = cols[werra_env$Position], pch = 16)
legend("bottomleft", pch = 16,
col = cols,
legend = c('downstream', 'upstream'))
# next we add our cluster solution
ordispider(werra_mds, factor(cut_2), label = TRUE)
ordihull(werra_mds, factor(cut_2), lty = "dotted")
# one point is misclassified! But see comment above:
# upstream/downstream position is not necessarily
# truth that determines ecological similarity
######################################################
# Extra Section: Don't in statistical analysis #
# Check cluster analysis with ANOVA #
######################################################
# Although advocated in some text books,
# you must not use the group vector in a MANOVA/PERMANOVA
# To check for significant differences in the cluster solution
# see warning example below
# translated from Matlab code
# https://gist.github.com/mrkrause/2b315222abd00c902a1d
# This is a warning example that statistical tests on
# results from the cluster analysis are significant even for
# complete random data
# we construct clusters from completely random data
nrep <- 1000
sam_size <- 1000;
# sample size
groups_k <- 5;
p_random <- c(NA)
# construct vector with NA
p_cluster <- c(NA)
# construct vector with NA
for(i in 1:nrep)
{
data <- runif(sam_size)
# sample from random uniform distribution
cluster_group <- kmeans(data, groups_k)
# k means clustering
random_group <- sample(1:groups_k, 1000, replace=TRUE)
# assign random groups
p_random[i] <- summary(aov(data ~ random_group))[[1]][["Pr(>F)"]][1]
# p value for random group
p_cluster[i] <- summary(aov(data ~ cluster_group$cluster))[[1]][["Pr(>F)"]][1]
# p value for clusters from random data
}
#
par(mfrow=c(1,2))
hist(p_random, breaks=c(seq(0,1,0.025)), main = "Random data", ylab ="No of runs", xlab="p value from ANOVA")
#
sum(p_random < 0.05)/1000
# as expected about 5% of random groups are significant
hist(p_cluster, breaks=c(seq(0,1,0.025)), main = "Cluster data", ylab ="No of runs", xlab="p value from ANOVA")
sum(p_cluster < 0.05)/1000
# by contrast, more than 90% of clusters from complete random data are significant
##########################
# End Extra Section #
# #
##########################
###################
# Exercise #
###################
# a) Conduct a cluster analysis for the glass data set.
# You should standardize the environmental variables
# before analysis using the scale() function.
# Compare the results for "complete", "single" and "average" linkage.
# Check the correlations of the cophenetic matrix with the initial distance matrices.
# Evaluate the number of clusters for average linkage using the GAP index for up to k = 20
library(chemometrics)
data(glass)
# b) We have groups for the glass data, which can by accessed through:
data(glass.grp)
glass.grp
# Validate the cluster solution exernally, i.e. check the match between the
# given groups and the groups from cluster analysis
# Extract the cluster grouping vector for average linkage
# and check consistency with the given groups using the adjusted Rand index
######################
# k-means Clustering #
######################
library(chemometrics)
data(glass)
sc_glass <- scale(glass)
# scale glass data
# conduct k means clustering
set.seed(1000)
kclus <- kmeans(sc_glass, centers = 4, iter.max = 1000, nstart = 100)
# n start gives the number of random assignments at the start
# and iter.max the maximum number of iterations.
# Exercise: Change the values by orders of magnitude and check what happens
# compute euclidean distance based nmds
dist_glass <- dist(sc_glass)
glass_mds <- metaMDS(dist_glass)
# Plot NMDS for cluster solution using the colours based on k-means
groups <- levels(factor(kclus$cluster))
ordiplot(glass_mds, type = "n")
cols <- c("steelblue", "darkred", "darkgreen", "pink")
for(i in seq_along(groups)){
points(glass_mds, select = factor(kclus$cluster) == groups[i], col = cols[i], pch = 16)
}
ordispider(glass_mds, factor(kclus$cluster), label = TRUE)
ordihull(glass_mds, factor(kclus$cluster), lty = "dotted")
# calculate internal validation indices for different ks
# the function cascadeKM is a wrapper for k-means
# and computes the Calinski and Harabasz index
# using the calinski criterion
mult_k_clus <- cascadeKM(sc_glass, inf.gr=2, sup.gr=10, iter=50, criterion="ca")
# runs k means for different numbers of groups
# we reduce the number of iterations to save time
# inf.gr = min k; inf.gr = max.k
par(cex=1.5, las=1)
plot(mult_k_clus)
# would suggest 2 groups
# you can access the partitioning into the different clusters with
mult_k_clus$partition[ , 1]
# gives the clustering, i.e. partitioning for two groups
colnames(mult_k_clus$partition)[1]
# gives the name for the partitioning
mult_k_clus$partition[ , 2]
# gives the clustering, i.e. partitioning for three groups
colnames(mult_k_clus$partition)[2]
# gives the name for the partitioning
################
# Exercise #
################
# Compare the solutions of 2 and 4 groups
# using two other CVIs (note: use the clustering!) and visualise for 2 and 4 groups
# Which solution would you regard as the most appropriate?
|
211cbad809677e130266c618733f82488e37c6ce
|
4f6f63d0b02c75fc25f426f47da0dc1ddad74428
|
/man/plotArrayAsImage.Rd
|
f07fb3f834d5cb9d7d5e2730966e75a23ed314ae
|
[] |
no_license
|
Deyong-Li/countcolors
|
938b7c3bddadd258eb8a4b58a183e92482fc798d
|
615ecab8f3ddfea0dcb819bd6a99cc249a1bd1a4
|
refs/heads/master
| 2023-05-03T07:21:30.745547
| 2020-04-17T20:34:38
| 2020-04-17T20:34:38
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 729
|
rd
|
plotArrayAsImage.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/visualization_tools.R
\name{plotArrayAsImage}
\alias{plotArrayAsImage}
\title{Plot a 3D array as an RGB image}
\usage{
plotArrayAsImage(rgb.array, main = "")
}
\arguments{
\item{rgb.array}{3D RGB array with R, G, and B channels (pixel rows x pixel
columns x color channels) to be plotted in the plot window as an actual
image.}
\item{main}{Optional title to display for image.}
}
\description{
Plots a 3D array as an sRGB image in the plot window.
}
\examples{
# Read in image
flowers <- jpeg::readJPEG(system.file("extdata", "flowers.jpg", package =
"countcolors"))
# Plot
plotArrayAsImage(flowers, main = "flowers!")
}
|
8204990925da99fb31b1bc656bc4cc3a0616e328
|
fc0f81494141a700b6228f23ef6fb30887a4d552
|
/man/cons_comp_analysis.Rd
|
e1085ae1c567838ecb464c7604e206ec1c86ae20
|
[] |
no_license
|
kthorstmann/simcom
|
3bed4e626c41193791116ec70931746f5b7c985b
|
0d4eb0faffde29a84b8103a6d6c5e9820039af18
|
refs/heads/master
| 2021-07-05T17:44:01.587052
| 2017-09-26T15:56:00
| 2017-09-26T15:56:00
| 102,983,513
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 996
|
rd
|
cons_comp_analysis.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/simcom_functions.R
\name{cons_comp_analysis}
\alias{cons_comp_analysis}
\title{Run Components Analysis}
\usage{
cons_comp_analysis(data, outcome_state = "b_state", pred_trait = c("P",
"E"), pred_state = c("s_state"), single = FALSE, id = "id")
}
\arguments{
\item{data}{The data frame to analyze}
\item{outcome_state}{The outcome or dependent variable that is predicted (the average level-1 variable)}
\item{pred_trait}{The predictor variables on level-2}
\item{pred_state}{The predictor variable on level-1}
\item{id}{The id/group variable of the participants}
}
\value{
Returns a list with two elements:
\itemize{
\item{1}{The results of the components analysis}
\item{2}{The explained variance per state}
\item{3}{The results of the dominance analyses}
}
}
\description{
Run Components Analysis
}
\examples{
simulated_data <- sim_data()
data <- simulated_data
data <- cons_comp_analysis(simulated_data)
}
|
87e2f324c6957eda19f3992153992d16dc3c7438
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/reutils/examples/espell.Rd.R
|
79ccba286e135e25570fb581191fe54b6f419aad
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 156
|
r
|
espell.Rd.R
|
library(reutils)
### Name: espell
### Title: espell - retrieving spelling suggestions
### Aliases: espell
### ** Examples
e <- espell("Chlamidia")
e
|
6f7631428d28f56f0eb338a5dadc9075e765ff61
|
09257aaa16f79444f0f56abf1e94f0aade6b69bd
|
/R/tree.constraints.R
|
428b7d75ae6c03e099d32ba4e73287a37ab31272
|
[] |
no_license
|
aaronpeikert/semtree
|
1023f61be1e63258cc3d45388c5c69861f54e176
|
bd92e8522a62026b9ac5050c0b8f5a7ff58e9cb7
|
refs/heads/master
| 2020-06-12T09:07:51.598586
| 2019-06-25T09:41:24
| 2019-06-25T09:41:24
| 194,253,753
| 1
| 0
| null | 2019-06-28T10:17:18
| 2019-06-28T10:17:18
| null |
UTF-8
|
R
| false
| false
| 311
|
r
|
tree.constraints.R
|
semtree.constraints <- function(local.invariance=NULL, global.invariance=NULL, focus.parameters=NULL)
{
ret <- list()
ret$local.invariance <- local.invariance
ret$global.invariance <- global.invariance
ret$focus.parameters <- focus.parameters
class(ret) <- "semtree.constraints"
return(ret)
}
|
17fbd0f1dbba0bf34ebe7bd08795dfb36f1e09f8
|
450d1e2f3f661fb725f5dd86c243967d825ccaf4
|
/RCalcs_3_46_252.R
|
20205562974feffed8f42845d4b328674d6fd690
|
[] |
no_license
|
wli289/R
|
5a18f85ca34c06adb87a422740d2fa3736702041
|
2b327e387784f35a113857e67c63e298d8889601
|
refs/heads/master
| 2021-05-14T05:25:19.510529
| 2018-01-04T06:01:48
| 2018-01-04T06:01:48
| 116,221,227
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,836
|
r
|
RCalcs_3_46_252.R
|
#################################################################################################
# SoA Exam C # 3
data <- c(2,3,3,3,7) # c(.) means "concatenate"
plot(density(data)) # visualize the results, "density" calculates the kernel density function
# calculate the kernel density estimate at 2.5
density(data, kernel = "triangular", bw=2/sqrt(6),from=2.5,to=2.5, n=1)
# in R, "bw" is the standard deviation of the kernel
# for example, specifying a uniform (or "rectangular") over [-1,1], the variance
# is (1- (-1))^2/12 = 4/12 = 1/3, so if we want b=2, specify bw=2/sqrt(3)
# check that the standard deviation for the triangular kernel is 1/sqrt(6)
# So, if we want b=2, specify bw=2/sqrt(6)
# Just for fun, try out these examples
plot(density(data, bw=.33), main="", xlab="x") # Change the bandwidth
plot(density(data, kernel = "triangular"), main="", xlab="x") # Change the kernel
#################################################################################################
# SoA Exam C # 46
library(survival)
data <- c(2,3,3,5,5,6,7,7,9,10)
status <- c(1,1,1,1,0,1,1,0,1,0)
(SurvObject <- Surv(data, status,type="right")) # create a survival object
(surv.fit <- survfit(SurvObject ~ 1,type="kaplan-meier")) # calculates the survival curve
rbind(surv.fit$time, surv.fit$surv) # gives the curve for these time points
# This problem wants the survival curve at 8. Because the product-limit estimate is constant
# between observations, the value at 8 is found from the curve at 7, which is 0.36.
# Just for fun, you can visualize the curve
plot(survfit(SurvObject ~ 1, type="kaplan-meier", conf.int=FALSE))
plot(survfit(SurvObject ~ 1, type="kaplan-meier", conf.type="plain"))
# Try doing it with the "fleming-harrington"-Nelson-Aalen estimate of the cumulative hazard
(survFH.fit <- survfit(SurvObject ~ 1,type="fleming-harrington")) # calculates the survival curve
rbind(survFH.fit$time, survFH.fit$surv) # gives the curve for these time points
#################################################################################################
# SoA Exam C # 252
library(survival)
data <- c(4,4,5,5,5,8,10,10,12,15)
status <- c(1,1,0,0,0,1,0,0,1,1)
(SurvObject <- Surv(data, status,type="right")) # create a survival object
(surv.fit <- survfit(SurvObject ~ 1,type="kaplan-meier")) # calculates the survival curve
Greenwood.Var <- (surv.fit$surv*surv.fit$std.err)^2
rbind(surv.fit$time, surv.fit$surv,Greenwood.Var) # gives the curve for these time points
# Just for fun, here is the "fleming-harrington"-Nelson-Aalen estimate of the cumulative hazard
(survFH.fit <- survfit(SurvObject ~ 1,type="fleming-harrington")) # calculates the survival curve
rbind(survFH.fit$time, survFH.fit$surv)
|
76ba11b473fa35b1382161e923446c7ba4a2777d
|
30e4c96b6aace7090bfdf3d7bbdef47c6883afd9
|
/R/BioGeoBEARS_stratified_v1.R
|
5d7f633976f5bdbdff01e636a79b713dc30c0804
|
[] |
no_license
|
nmatzke/BioGeoBEARS
|
623a7efbccab4835004100993fcfd2e9d36a39d6
|
27b2533c2e2cc47aab6db9645e19f12593a80771
|
refs/heads/master
| 2023-07-13T21:47:48.512098
| 2023-06-26T23:01:19
| 2023-06-26T23:01:19
| 9,406,671
| 42
| 20
| null | 2018-10-11T23:57:19
| 2013-04-13T00:56:24
|
R
|
UTF-8
|
R
| false
| false
| 218,435
|
r
|
BioGeoBEARS_stratified_v1.R
|
# require("ape")
# require("rexpokit")
# require("cladoRcpp")
#######################################################
# Do a stratified or other constrained analysis
#######################################################
# Based on:
# /_examples/changing_geog_v1.R
#######################################################
# section_the_tree
#######################################################
#' Section a tree for stratified analysis
#'
#' A utility function for stratified analysis. Sections the tree into a series of strata.
#' Each stratum may have one or more subtrees (APE phylo3 objects, *WITH* root edges) and/or
#' branch segments (which are just represented as numeric values, indicating the length of the sub-branch,
#' i.e. the time-width of the stratum, if the branch crosses the whole stratum.
#'
#' @param inputs The list of inputs for stratified analysis
#' @param make_master_table If desired, make an \code{inputs$master_table} containing the
#' correspondance between the original tree and the sectioned pieces.
#' @param plot_pieces If \code{TRUE}, plot the tree chunks (but not isolated branch segments) as they are created.
#' @param cut_fossils If \code{TRUE}, the program is stopped if there are fossils, i.e. tips older than 0.001 my (default). Default FALSE, as ideally, users should
#' use code{\link[ape]{drop.tip}} or an external program to clip fossils out of the tree. PLEASE NOTE that several times I have experienced miserable long nights
#' due, apparently, to \code{\link[ape]{drop.tip}} producing weird tree structures, resulting in weird Newick files, without me realizing it. The solution is usually to
#' open the Newick file in something like \code{FigTree}, resort the branches, and save to a new Newick file.
#' Fossils have now been implemented in stratified analysis; this was complicated, as it involves inserting new branches in chopped trees.
#' @param fossils_older_than Tips that are older than \code{fossils_older_than} will be marked as \code{TRUE} in a column called \code{fossils}.
#' This is not currently set to 0, because Newick files can have slight precision issues etc. that mean not all tips quite come to zero. You
#' can attempt to fix this with \code{\link{extend_tips_to_ultrametricize}} (but make sure you do not inappropriately average in fossils!!).
#' @param min_dist_between_node_and_stratum_line An error check is run, if any nodes are
#' closer to a stratum boundary than this line, an error is thrown. The easiest
#' solution is to change the date of your stratum boundary line slightly.
#' @param remove_root_edge Default TRUE, which means the root edge will be removed;
#' chainsaw2 function will not work if it is present.
#' @return \code{inputs} with \code{inputs$tree_sections_list} added.
#' @export
#' @seealso \code{\link{prt}}, \code{\link{chainsaw2}}, \code{\link[ape]{drop.tip}}
#' @note Go BEARS!
#' @author Nicholas J. Matzke \email{matzke@@berkeley.edu}
#' @references
#' \url{http://phylo.wikidot.com/matzke-2013-international-biogeography-society-poster}
#' @bibliography /Dropbox/_njm/__packages/BioGeoBEARS_setup/BioGeoBEARS_refs.bib
#' @cite Matzke_2012_IBS
#' @examples
#' test=1
section_the_tree <- function(inputs, make_master_table=FALSE, plot_pieces=TRUE, cut_fossils=FALSE, fossils_older_than=0.001, min_dist_between_node_and_stratum_line=0.00001, remove_root_edge=TRUE, save_phys_before_they_are_chopped=FALSE)
{
runjunk='
make_master_table=TRUE; plot_pieces=FALSE; cut_fossils=TRUE; fossils_older_than=0.6;
inputs=BioGeoBEARS_run_object
make_master_table=TRUE
plot_pieces=TRUE
cut_fossils=TRUE
fossils_older_than=0.1
min_dist_between_node_and_stratum_line=0.00001
# testing:
orig_timeperiods = c(0.5, 1.9, 3.7, 5.1)
save_phys_before_they_are_chopped = FALSE
' # END runjunk
# trstr = "((((((((P_hawaiiensis_WaikamoiL1:0.9656850499,P_mauiensis_Eke:0.9656850499):0.7086257935,(P_fauriei2:1.230218511,P_hathewayi_1:1.230218511):0.4440923324):0.1767115552,(P_kaduana_PuuKukuiAS:1.851022399,P_mauiensis_PepeAS:1.851022399):0.0008897862802):0.3347375986,P_kaduana_HawaiiLoa:2.185759997):0.302349378,(P_greenwelliae07:1.131363255,P_greenwelliae907:1.131363255):1.35674612):1.689170274,((((P_mariniana_MauiNui:1.994011054,P_hawaiiensis_Makaopuhi:1.994011054):0.7328279804,P_mariniana_Oahu:2.726839034):0.2574151709,P_mariniana_Kokee2:2.984254205):0.4601084855,P_wawraeDL7428:3.444362691):0.732916959):0.7345185743,(P_grandiflora_Kal2:2.479300491,P_hobdyi_Kuia:2.479300491):2.432497733):0.2873119899,((P_hexandra_K1:2.363984189,P_hexandra_M:2.363984189):0.4630447802,P_hexandra_Oahu:2.826939991):2.372081244);"
# tr = read.tree(file="", text=trstr)
# END runjunk
if (save_phys_before_they_are_chopped == TRUE)
{
phys_before_they_are_chopped = list()
pcount = 0
}
# Fixing nodes, for marginal local optimum ancestral state reconstruction, is COMPLICATED when you are
# chopping up an APE tree. Somehow we would have to keep track of which node. So, save this for later.
orig_timeperiods = inputs$timeperiods
timeperiods = orig_timeperiods
#original_tree = read.tree(inputs$trfn)
original_tree = check_trfn(trfn=inputs$trfn)
# Remove root edge
if (remove_root_edge == TRUE)
{
if ("root.edge" %in% names(original_tree))
{
txt = paste0("WARNING in chainsaw2: input tree had a 'root.edge', which crashes chainsaw2. Setting original_tree$root.edge=NULL.")
cat("\n\n")
cat(txt)
cat("\n\n")
warning(txt)
original_tree$root.edge = NULL
} # END if ("root.edge" %in% names(original_tree))
} # END if (remove_root_edge == TRUE)
phy_as_it_is_chopped_down = original_tree
# Make the tree table for the original tree
orig_tr_table = prt(original_tree, printflag=FALSE, get_tipnames=TRUE, fossils_older_than=fossils_older_than)
times_older_than_root_node_TF = orig_timeperiods > max(orig_tr_table$node_ht)
times_younger_than_root_node_TF = orig_timeperiods < max(orig_tr_table$node_ht)
# Error check
if (sum(times_older_than_root_node_TF) >= 2)
{
txt = paste0("STOP ERROR in section_the_tree(): the timeperiods file can have ONLY ONE time older than the bottom node in your tree. Use the function prt() to get a table of node ages for your tree. The oldest node age in your tree is: ", max(orig_tr_table$node_ht), ". The times in your timeperiods file are: ", paste(orig_timeperiods, collapse=" ", sep=""), ".")
cat("\n\n")
cat(txt)
cat("\n\n")
stop(txt)
} # END if (sum(times_older_than_root_node_TF) >= 2)
# Error check
if (sum(times_younger_than_root_node_TF) == 0)
{
txt = paste0("STOP ERROR in section_the_tree(): the timeperiods file *HAS* to have an oldest time that is older than the bottom node in your tree. Use the function prt() to get a table of node ages for your tree. The oldest node age in your tree is: ", max(orig_tr_table$node_ht), ". The times in your timeperiods file are: ", paste(orig_timeperiods, collapse=" ", sep=""), ".")
cat("\n\n")
cat(txt)
cat("\n\n")
stop(txt)
} # END if (sum(times_older_than_root_node_TF) >= 2)
#######################################################
# Check that only ONE time is older than the root node of the tree
#######################################################
#######################################################
# Check for the case where node(s) exactly match stratum boundaries
# This would be BAD, so disallow it.
#######################################################
for (i in 1:length(orig_timeperiods))
{
TF = orig_tr_table$time_bp == orig_timeperiods[i]
if (sum(TF) > 0)
{
errortxt = paste("\n\nERROR in section_the_tree(): your tree has ", sum(TF), " nodes with date ", orig_timeperiods[i], ".\nThis is a problem because you have a timeperiod boundary of: ", orig_timeperiods[i], "\nThe function doesn't know how to section a tree exactly at a node boundary.", "\nTo fix: change the timeperiod date, or edit the tree so that all nodes are more than ", min_dist_between_node_and_stratum_line, " time units from a timeperiod boundary\n(specified by 'min_dist_between_node_and_stratum_line', default min_dist_between_node_and_stratum_line=", min_dist_between_node_and_stratum_line, ").", "\n\nIf it makes you feel better, there is no way your dating of either phylogenetic or geological events is all that precise anyway.", sep="")
cat(errortxt)
cat("\n\nNodes with this problem:\n\n")
print(orig_tr_table[TF,])
stop("\n\nStopping on error.\n\n")
}
# Or, check for nodes too near to boundaries
diffs = abs(orig_tr_table$time_bp - orig_timeperiods[i])
TF = diffs < min_dist_between_node_and_stratum_line
if (sum(TF) > 0)
{
errortxt = paste("\n\nERROR in section_the_tree(): your tree has ", sum(TF), " nodes with a date too close to your timeperiod boundary of: ", orig_timeperiods[i], ".\nThis is a problem because very short branches may cause issues with likelihood calculations, ancestral state estimation, and stochastic mapping.", "\nSee e.g. the min_branchlength option of calc_loglike_sp().", "\nTo fix: change the timeperiod date, or edit the tree so that all nodes are more than ", min_dist_between_node_and_stratum_line, " time units from a timeperiod boundary\n(specified by 'min_dist_between_node_and_stratum_line', default min_dist_between_node_and_stratum_line=", min_dist_between_node_and_stratum_line, ").", "\n\nIf it makes you feel better, there is no way your dating of either phylogenetic or geological events is all that precise anyway.", sep="")
cat(errortxt)
cat("\n\nNodes with this problem:\n\n")
print(orig_tr_table[TF,])
stop("\n\nStopping on error.\n\n")
}
}
# Identify fossils
tipnums = 1:length(original_tree$tip.label)
fossils_TF = orig_tr_table$time_bp[tipnums] >= fossils_older_than
numfossils = sum(fossils_TF)
fossil_names = original_tree$tip.label[fossils_TF]
if (numfossils > 0)
{
if (cut_fossils == TRUE)
{
# Stop the analysis so that the user may cut the fossils.
stoptxt = cat("\n\nFATAL ERROR in section_the_tree(): Your tree has ", numfossils, " fossil tips older than ", fossils_older_than, " my!\n",
"But you have not turned on fossils by setting 'cut_fossils=FALSE' in section_the_tree().\n",
"Fossil tipnames listed below:\n", sep="")
cat(stoptxt)
print(fossil_names)
# Warn about drop.tip
cat("\n\nAlso: PLEASE NOTE that several times I have experienced miserable long nights due, apparently, to drop.tip producing weird tree structures, resulting in weird Newick files, without me realizing it. The solution is usually to open the Newick file in something like FigTree, resort the branches, and save to a new Newick file.\n\n")
stop(stoptxt)
junk='
tr_nofossils = drop.tip(original_tree, fossil_names)
write.tree(tr_nofossils, file="venerid_tree_for_biogeog_v1.newick")
'
} else {
# The simplest approach to INCLUDING fossils is to artificially extend the branchlengths
warntxt = cat("\n\nWARNING: Your tree has ", numfossils, " fossil tips older than fossils_older_than=", fossils_older_than, " my!\n",
"If you actually have that many fossil tips, then everything is fine, and you can ignore this warning. If not, make sure that all fossils are older than whatever you set 'fossils_older_than' to be. If you do *not* have any fossils, then you are probably using an undated tree. This is a Very Bad Idea in general, please see 'BioGeoBEARS Mistakes To Avoid' at PhyloWiki.\n",
"(default: fossils_older_than=0.6)\n",
"Fossil tipnames listed below:\n", sep="")
warning(warntxt)
cat(warntxt)
cat(paste(fossil_names, collapse="\n", sep=""))
cat("\n\n")
# This will extend ALL tips up to time_bp=0 my. Keep track of true tip age through orig_tr_table$fossils and orig_tr_table$time_bp
phy_as_it_is_chopped_down = extend_tips_to_ultrametricize(obj=phy_as_it_is_chopped_down, age_of_root=0, tips_end_at_this_date=NA)
}
}
# Make a master table of how the pieces correspond to the original tree!
if (make_master_table == TRUE)
{
master_table = NULL
}
if (plot_pieces == TRUE)
{
plot(phy_as_it_is_chopped_down)
#abline(v=timeperiods)
axisPhylo()
}
# CHECK THIS FUNCTION
#phy_as_it_is_chopped_down$edge.length = phy_as_it_is_chopped_down$edge.length + 0.0001
tree_sections_list = NULL
tnum = 0
if (length(timeperiods) <= 1)
{
chainsaw_result = list()
chainsaw_result$tree_to_chainsaw = phy_as_it_is_chopped_down
chainsaw_result$return_pieces_list[[1]] = phy_as_it_is_chopped_down
# Merge THEN split THEN sort!!
# Make sure to sort the names before merging
tmp_sorted_names_merge = paste(phy_as_it_is_chopped_down$tip.label, collapse=",", sep="")
tmp_sorted_names_split = strsplit(x=tmp_sorted_names_merge, split=",")[[1]]
chainsaw_result$return_pieces_basenames[[1]] = paste(sort(tmp_sorted_names_split), collapse=",", sep="")
attr(chainsaw_result, "class") = "chainsaw_result"
tree_sections_list[[1]] = chainsaw_result
} else {
# Instead of using these column names, which might change:
# c(1,4:8,10)
# ...use col headings (and add fossils)
table_colnames = c("node", "node.type", "parent_br", "edge.length", "ancestor", "daughter_nds", "time_bp", "fossils", "label")
SUBtable_colnames = paste("SUB", table_colnames, sep="")
# Put the tips into the master condlikes table (important if we have ambiguous tips)
if (make_master_table == TRUE)
{
# First, put in the original tree tips
orig_tips_table = orig_tr_table[1:length(original_tree$tip.label), ]
subtree_table = orig_tips_table
names(subtree_table) = paste("SUB", names(subtree_table), sep="")
# Get the relative timepoint
stratum = 0
reltimept = 0
time_bot = 0
time_top = 0
piecenum = 0
piececlass = "orig_tip"
subtree_table = cbind(stratum, time_top, time_bot, reltimept, piecenum, piececlass, subtree_table[,SUBtable_colnames])
subtree_table$SUBnode.type = "orig_tip"
tmp_join_table = cbind(orig_tips_table[, table_colnames], subtree_table)
tmp_join_table
master_table = rbind(master_table, tmp_join_table)
}
for (i in 1:(length(timeperiods)))
#for (i in 1:3))
{
# Label the stratum
stratum = i
cat("\n", i, "- top: ", orig_timeperiods[i]-timeperiods[i], ", bot: ", orig_timeperiods[i], ", rel_bot: ", timeperiods[i], "\n", sep="")
# Chainsaw the top off the tree
if (i == 1)
{
timepoint = timeperiods[i] - 0
} else {
timepoint = timeperiods[i]# - timeperiods[i-1]
}
# Update timepoints so you are subtracting the right amount!!!!!!!!
timeperiods = timeperiods - timepoint
timeperiods
if (save_phys_before_they_are_chopped == TRUE)
{
phys_before_they_are_chopped[[(pcount=pcount+1)]] = phy_as_it_is_chopped_down
}
# Check if you are in the last timeperiod
if (i < length(timeperiods))
{
# Otherwise, CHAINSAW the sucker!
chainsaw_result = chainsaw2(phy_as_it_is_chopped_down, timepoint=timepoint, return_pieces=TRUE)
#print(chainsaw_result)
} else {
# If it's the last piece, just use the remaining leftover tree chunk
chainsaw_result = list()
chainsaw_result$tree_to_chainsaw = phy_as_it_is_chopped_down
chainsaw_result$return_pieces_list[[1]] = phy_as_it_is_chopped_down
# Merge THEN split THEN sort!!
# This may not be necessary; but what the heck.
# Make sure to sort the names before merging
tmp_sorted_names_merge = paste(phy_as_it_is_chopped_down$tip.label, collapse=",", sep="")
tmp_sorted_names_split = strsplit(x=tmp_sorted_names_merge, split=",")[[1]]
chainsaw_result$return_pieces_basenames[[1]] = paste(sort(tmp_sorted_names_split), collapse=",", sep="")
attr(chainsaw_result, "class") = "chainsaw_result"
}
# Store the chainsaw result (initial: store again if you change chainsaw_result)
tree_sections_list[[(tnum=tnum+1)]] = chainsaw_result
# Make a master table of how the pieces correspond to the original tree!
if (make_master_table == TRUE)
{
# Update the corresponding table
tipnames_above_cutpoints = unlist(chainsaw_result$return_pieces_basenames)
tipnames_above_cutpoints
# Find the position of this subbranch (its top node) in the overall tree
pos_of_1st_in_2nd = match(tipnames_above_cutpoints, orig_tr_table$tipnames)
pos_of_1st_in_2nd
classes_of_pieces = sapply(X=chainsaw_result$return_pieces_list, FUN=class)
classes_of_pieces[classes_of_pieces == "numeric"] = "subbranch"
classes_of_pieces[classes_of_pieces == "phylo"] = "subtree"
# Get the tree structure as the tree is chopped down
#print(i)
phy_chopped_down_table = prt(phy_as_it_is_chopped_down, printflag=FALSE, get_tipnames=TRUE, fossils_older_than=fossils_older_than)
# re-sort the tipnames
for (rownum in 1:nrow(phy_chopped_down_table))
{
temp_tipnames = phy_chopped_down_table$tipnames[rownum]
words = strsplit(temp_tipnames, split=",")[[1]]
words = sort(words)
phy_chopped_down_table$tipnames[rownum] = paste(words, collapse=",", sep="")
}
# Get the relative timepoint
reltimept = timepoint
time_bot = orig_timeperiods[i]
time_top = time_bot - reltimept
# Accumulate the rows of the table
# Go through the pieces
for (p in 1:length(classes_of_pieces))
{
# For subtrees, get all the corresponding node info
if (classes_of_pieces[p] == "subtree")
{
# Get the nodenums in the subtree that's been removed
tmp_subtree = chainsaw_result$return_pieces_list[[p]]
subtree_table = prt(tmp_subtree, printflag=FALSE, get_tipnames=TRUE, fossils_older_than=fossils_older_than)
# re-sort the tipnames
for (rownum in 1:nrow(subtree_table))
{
temp_tipnames = subtree_table$tipnames[rownum]
words = strsplit(temp_tipnames, split=",")[[1]]
words = sort(words)
subtree_table$tipnames[rownum] = paste(words, collapse=",", sep="")
}
names(subtree_table) = paste("SUB", names(subtree_table), sep="")
subtree_table
# Identify the corresponding nodes
tree_piece_nodenums = subtree_table$SUBnode
tiplabels_for_each_node_in_tree_piece = subtree_table$SUBtipnames
pos_of_1st_in_2nd = match(tiplabels_for_each_node_in_tree_piece, orig_tr_table$tipnames)
pos_of_1st_in_2nd
# Add the pieces identifiers
piecenum = p
piececlass = classes_of_pieces[p]
subtree_table = cbind(stratum, time_top, time_bot, reltimept, piecenum, piececlass, subtree_table[,SUBtable_colnames])
subtree_table
tmp_join_table = cbind(orig_tr_table[pos_of_1st_in_2nd, table_colnames], subtree_table)
tmp_join_table
################################################
# 2016-02-29 bug fix
# Fossil branches on sub-trees were not being cut down appropriately,
# at least for hook nodes
################################################
# Identify tips that are fossils *in* the subtree:
# these branches need to be cut down further
actual_heights_below_bin_top = tmp_join_table$time_bp - tmp_join_table$time_top
actual_height_lower_than_bin_top_TF = actual_heights_below_bin_top > 1e-10
subtree_tip_TF = tmp_join_table$SUBnode.type == "tip"
fossil_in_subtree_TF = (actual_height_lower_than_bin_top_TF + subtree_tip_TF) == 2
# Declare them fossils WITHIN the subtree
tmp_join_table$SUBfossils[fossil_in_subtree_TF] = TRUE
tmp_join_table$SUBtime_bp[fossil_in_subtree_TF] = actual_heights_below_bin_top[fossil_in_subtree_TF]
# Adjust the edge lengths in the table, and in the subtree
# table
new_subtree_edge_lengths = tmp_join_table$SUBedge.length[fossil_in_subtree_TF]
tmp_join_table$SUBedge.length[fossil_in_subtree_TF] = tmp_join_table$SUBedge.length[fossil_in_subtree_TF] - actual_heights_below_bin_top[fossil_in_subtree_TF]
# subtree
# tmp_subtree = chainsaw_result$return_pieces_list[[p]]
tmp_subtree2 = tmp_subtree
#print(tmp_subtree2$edge.length)
# Remove the root node edge length, which will NOT be in the
# tree object's list of edges
subtree_tipnums_to_change = tmp_join_table$SUBnode[fossil_in_subtree_TF]
# Match the subtree tipnums to the subtree's edge table
edge_table_rownums_to_change = match(x=subtree_tipnums_to_change, table=tmp_subtree2$edge[,2])
tmp_subtree2$edge.length[edge_table_rownums_to_change] = tmp_join_table$SUBedge.length[fossil_in_subtree_TF]
#print(tmp_subtree2$edge.length)
#print(tmp_join_table)
#print(write.tree(tmp_subtree2, file=""))
#plot(tmp_subtree2)
#axisPhylo()
chainsaw_result$return_pieces_list[[p]] = tmp_subtree2
# Store the chainsaw result (again: store again if you change chainsaw_result)
tree_sections_list[[tnum]] = chainsaw_result
################################################
# END 2016-02-29 bug fix
################################################
# NA check
if (is.na(tmp_join_table[1,1]) == TRUE)
{
stoptxt = "\n\nFATAL ERROR #1 produced in section_the_tree(): NAs in tmp_join_table.\n\n"
cat(stoptxt)
print("i")
print(i)
print("p")
print(p)
print(tmp_join_table)
stop(stoptxt)
}
# END subtree
# END if (classes_of_pieces[p] == "subtree")
} else {
# START sub-branch
# For sub-branches, just add 1 row
# Get the nodenums in the subtree that's been removed
tmp_subbranch = chainsaw_result$return_pieces_list[[p]]
# subtree_table = prt(phy_as_it_is_chopped_down, printflag=FALSE, get_tipnames=TRUE)
# names(subtree_table) = paste("SUB", names(subtree_table), sep="")
# subtree_table
# Identify the corresponding nodes
tree_piece_nodenums = 1
tmp_basenames = chainsaw_result$return_pieces_basenames[[p]]
# This may not be necessary; but what the heck.
tmp_basenames2 = paste(tmp_basenames, collapse=",", sep="")
tmp_basenames3 = strsplit(x=tmp_basenames2, split=",")[[1]]
tiplabels_for_each_node_in_tree_piece = paste(sort(tmp_basenames3), collapse=",", sep="")
pos_of_1st_in_2nd = match(tiplabels_for_each_node_in_tree_piece, phy_chopped_down_table$tipnames)
pos_of_1st_in_2nd
# Use the chopped-down-tree to reference isolated branches (may not matter)
subtree_table = phy_chopped_down_table
names(subtree_table) = paste("SUB", names(subtree_table), sep="")
subtree_table
# Add the pieces identifiers
piecenum = p
piececlass = classes_of_pieces[p]
subtree_table = cbind(stratum, time_top, time_bot, reltimept, piecenum, piececlass, subtree_table[pos_of_1st_in_2nd, SUBtable_colnames])
# Find the reference to the master tree
pos_of_1st_in_2nd = match(tiplabels_for_each_node_in_tree_piece, orig_tr_table$tipnames)
pos_of_1st_in_2nd
tmp_join_table = cbind(orig_tr_table[pos_of_1st_in_2nd, table_colnames], subtree_table)
tmp_join_table
# NA check
if (is.na(tmp_join_table[1,1]) == TRUE)
{
stoptxt = "\n\nFATAL ERROR #2 produced in section_the_tree(): NAs in tmp_join_table.\n\n"
cat(stoptxt)
print("i")
print(i)
print("p")
print(p)
print(tmp_join_table)
print(tiplabels_for_each_node_in_tree_piece)
#print(orig_tr_table$tipnames)
print(pos_of_1st_in_2nd)
stop(stoptxt)
}
} # END if (classes_of_pieces[p] == "subtree")
master_table = rbind(master_table, tmp_join_table)
} # END for (p in 1:length(classes_of_pieces))
} # END if (make_master_table == TRUE)
# Convey the tree to the next round of chopping
phy_as_it_is_chopped_down = chainsaw_result$tree_to_chainsaw
if (plot_pieces == TRUE)
{
plot(phy_as_it_is_chopped_down)
#axisPhylo2(side = 1, roundlabels=TRUE, minage=timeperiods[i]
axisPhylo()
}
}
}
if (save_phys_before_they_are_chopped == TRUE)
{
inputs$phys_before_they_are_chopped = phys_before_they_are_chopped
}
# Append to inputs and return
inputs$tree_sections_list = tree_sections_list
# Also append the master table
inputs$master_table = master_table
return(inputs)
} # END section_the_tree <- function(inputs, make_master_table=FALSE, plot_pieces=TRUE, cut_fossils=FALSE, fossils_older_than=0.001, min_dist_between_node_and_stratum_line=0.00001, remove_root_edge=TRUE, save_phys_before_they_are_chopped=FALSE)
#######################################################
# chainsaw2
#######################################################
#' Saw a tree off at a particular time before present
#'
#' This function chops a tree like a hedge-trimmer, cutting straight across at a particular timepoint.
#' The pieces are returned, as is the leftover tree, with branches shortened appropriately. Pieces
#' that are mini-trees are returned as ape objects, whereas single branches are just lengths.
#'
#' This function is used during stratification, but could have other uses as well.
#'
#' @param tr An ape phylo object.
#' @param timepoint The time at which the tree should be "chopped".
#' @param return_pieces Default TRUE, which means pieces should be returned
#' @param remove_root_edge Default TRUE, which means the root edge will be removed;
#' chainsaw2 function will not work if it is present.
#' @return \code{chainsaw_result} (a list object with the pieces) or \code{tree_to_chainsaw}, just the leftover tree
#' @export
#' @seealso \code{\link{section_the_tree}}
#' @note Go BEARS!
#' @author Nicholas J. Matzke \email{matzke@@berkeley.edu}
#' @references
#' \url{http://phylo.wikidot.com/matzke-2013-international-biogeography-society-poster}
#' @bibliography /Dropbox/_njm/__packages/BioGeoBEARS_setup/BioGeoBEARS_refs.bib
#' @cite Matzke_2012_IBS
#' @examples
#' test=1
chainsaw2 <- function(tr, timepoint=10, return_pieces=TRUE, remove_root_edge=TRUE)
{
# Take a tree and saw it off evenly across a certain timepoint.
# This removes any tips above the timepoint, and replaces them
# with a single tip representing the lineage crossing
# the timepoint (with a new tip name).
defaults='
phy_as_it_is_chopped_down
timepoint=timepoint
return_pieces=TRUE
'
# Remove root edge
if (remove_root_edge == TRUE)
{
if ("root.edge" %in% names(tr))
{
txt = paste0("WARNING in chainsaw2: input tree had a 'root.edge', which crashes chainsaw2. Setting tr$root.edge=NULL.")
cat("\n\n")
cat(txt)
cat("\n\n")
warning(txt)
tr$root.edge = NULL
} # END if ("root.edge" %in% names(tr))
} # END if (remove_root_edge == TRUE)
# Get the tree in a table
tr_table = prt(tr, printflag=FALSE, get_tipnames=FALSE)
tr_table
# Find the tips that are less than 10 my old and drop them
TF_exists_more_recently_than_10mya = tr_table$time_bp < timepoint
# Get the corresponding labels
labels_for_tips_existing_more_recently_than_10mya = tr_table$label[ TF_exists_more_recently_than_10mya == TRUE ]
###########################################
# Draft chainsaw function
###########################################
# loop through the branches that cross 10 mya
# get a list of the edge start/stops in the phylogeny's edges
edge_times_bp = get_edge_times_before_present(tr)
# which of these branches cross 10 mya (or whatever timepoint)?
edges_start_earlier_than_10mya = edge_times_bp[, 1] > timepoint
edges_end_later_than_10mya = edge_times_bp[, 2] <= timepoint
edges_to_chainsaw = edges_start_earlier_than_10mya + edges_end_later_than_10mya == 2
# then, for each of these edges, figure out how many tips exist descending from it
# these are the nodes ABOVE the cutoff line
nodes_to_chainsaw = tr$edge[, 2][edges_to_chainsaw]
# Take only internal nodes (? why ?)
numtips = length(tr$tip.label)
#nodes_to_chainsaw = nodes_to_chainsaw[nodes_to_chainsaw > numtips]
# create a copy of the tree to chainsaw
tree_to_chainsaw = tr
if (return_pieces == TRUE)
{
return_pieces_list = as.list(rep(NA, length(nodes_to_chainsaw)))
return_pieces_basenames = as.list(rep(NA, length(nodes_to_chainsaw)))
chopTable = NULL
}
chainsaw_table = NULL
for (i in 1:length(nodes_to_chainsaw))
{
# If this is a tip node on the current tree, shorten the branch rather than cut it off
if (nodes_to_chainsaw[i] <= numtips)
{
# Here, chainsaw is cutting an internal node, so extract the sectioned branch before you cut it down
# (the cutting happens after the forloop)
# (This is easy, it is just the length of the timeslab;
# which you should UPDATE as you move down the tree
if (return_pieces == TRUE)
{
# Record the length of the branch section, and the name of that tip
# (which is also the name of that base)
return_pieces_list[[i]] = timepoint
tmp_tipname = tr$tip.label[nodes_to_chainsaw[i]]
return_pieces_basenames[[i]] = tmp_tipname
}
# You don't have to do anything else, the chopping of single branches is
# covered after the forloop
#cat("\ni=", i, " ntips=", length(tree_to_chainsaw$tip.label), sep="")
} else {
# Here, it's an internal node, so extract the subtree before you drop it
tmp_subtree = extract.clade(tr, nodes_to_chainsaw[i])
#plot(tmp_subtree, root.edge=TRUE)
# Also, record the branchlength below this node
branchlength_below_subtree_LCA_node = timepoint - get_max_height_tree(tmp_subtree)
# Add this to the bottom of the subtree
tmp_subtree$root.edge = branchlength_below_subtree_LCA_node
#plot(tmp_subtree, root.edge=TRUE)
# Record the piece, if desired
if (return_pieces == TRUE)
{
# Record the length of the branch section, and the name of that tip
# (which is also the name of that base)
return_pieces_list[[i]] = tmp_subtree
# Merge THEN split THEN sort!!
tmp_labels_merge = paste(tmp_subtree$tip.label, collapse=",", sep="")
tmp_labels_split = strsplit(tmp_labels_merge, split=",")[[1]]
new_labels = sort(tmp_labels_split)
basename_after_cutting = paste(new_labels, collapse=",", sep="")
return_pieces_basenames[[i]] = basename_after_cutting
}
#print(tmp_subtree$tip.label)
tmp_number_of_tips = length(tmp_subtree$tip.label)
#print(tmp_number_of_tips)
# number of tips to drop = (numtips -1)
numtips_to_drop = tmp_number_of_tips - 1
# tips_to_drop
tmp_labels = tmp_subtree$tip.label
labels_to_drop = tmp_labels[1:numtips_to_drop]
ordered_labels_to_make_into_new_name = sort(tmp_labels)
name_new_tip = paste(ordered_labels_to_make_into_new_name, collapse=",", sep="")
# new label
label_kept_num = length(tmp_labels)
label_kept = tmp_labels[label_kept_num]
#new_label = paste("CA_", label_kept, "+", numtips_to_drop, "_tips", sep="")
new_label = name_new_tip
tree_to_chainsaw$tip.label[tree_to_chainsaw$tip.label == label_kept] = new_label
# chop off e.g. 2 of the 3 tips
tree_to_chainsaw = drop.tip(tree_to_chainsaw, labels_to_drop)
#cat("\ni=", i, " ntips=", length(tree_to_chainsaw$tip.label), sep="")
} # end else
} # end for loop
#plot(tree_to_chainsaw)
#axisPhylo()
tree_to_chainsaw_table = prt(tree_to_chainsaw, printflag=FALSE)
tree_to_chainsaw_table_tips_TF_time_bp_LT_10my = tree_to_chainsaw_table$time_bp < timepoint
tmp_edge_lengths = tree_to_chainsaw_table$edge.length[tree_to_chainsaw_table_tips_TF_time_bp_LT_10my]
times_bp_for_edges_to_chainsaw = tree_to_chainsaw_table$time_bp[tree_to_chainsaw_table_tips_TF_time_bp_LT_10my]
adjustment = times_bp_for_edges_to_chainsaw - timepoint
revised_tmp_edge_lengths = tmp_edge_lengths + adjustment
tree_to_chainsaw_table$edge.length[tree_to_chainsaw_table_tips_TF_time_bp_LT_10my] = revised_tmp_edge_lengths
# revised
ordered_nodenames = get_nodenums(tree_to_chainsaw)
parent_branches = get_indices_where_list1_occurs_in_list2(ordered_nodenames, tree_to_chainsaw$edge[,2])
# NJM 2014-12-11: this assumes your tree has NO root edge length;
# I'll put in a check for this.Ffunction
NA_false = is.not.na(tree_to_chainsaw_table$edge.length)
tree_to_chainsaw$edge.length[parent_branches[NA_false]] = tree_to_chainsaw_table$edge.length[NA_false]
#######################################################
# Error check
#######################################################
tmp_trtable = prt(tree_to_chainsaw, printflag=FALSE)
brlens = tmp_trtable$edge.length
TF = brlens <= 0
TF[is.na(TF)] = FALSE
if (sum(TF) > 0)
{
nodenums = (1:nrow(tmp_trtable))[TF]
nodenums
txt = paste0("STOP ERROR in chainsaw2(): the post-chainsaw tree had ", sum(TF), " negative branchlengths. READ THE FOLLOWING ERROR MESSAGE SLOWLY AND CAREFULLY AND YOU MAY FIND A SOLUTION. This error seems to sometimes occur with large cuts on trees with many fossil tips (i.e., non-contemporaneous tips). I'm not sure what causes the bug, except that chainsaw-ing an APE phylo object is quite complex, and it is even more complex for a tree with many non-contemporaneous tips. Imagine a phylogeny made of cardboard, then cutting it at various timepoints, then keeping track of all of the pieces. Anyway, until I fix it, your best bet is to chainsaw2() in stages, using smaller cuts than the one that caused the error. Or if you are using BioGeoBEARS and doing a time-stratified analysis, create extra time-strata (perhaps repeating the same settings for several time bins), so that the usage of chainsaw2() within section_the_tree() does not cause an error.")
cat("\n\n")
cat(txt)
cat("\n\n")
stop(txt)
} # END if (sum(TF) > 0)
if (return_pieces == TRUE)
{
chainsaw_result = NULL
chainsaw_result$tree_to_chainsaw = tree_to_chainsaw
chainsaw_result$return_pieces_list = return_pieces_list
chainsaw_result$return_pieces_basenames = return_pieces_basenames
class(chainsaw_result) = "chainsaw_result"
return(chainsaw_result)
} else {
return(tree_to_chainsaw)
}
}
#######################################################
# calc_loglike_sp_stratified:
#######################################################
#' Calculate log-likelihood with a transition matrix and speciation events, and with stratification
#'
#' This function is the stratified version of \code{\link{calc_loglike_sp}}.
#'
#' @param tip_condlikes_of_data_on_each_state A numeric matrix with rows representing tips, and columns representing states/geographic ranges. The cells
#' give the likelihood of the observation data under the assumption that the tip has that state; typically this means that the known geographic range gets a
#' '1' and all other states get a 0.
#' @param phy A phylogeny object. The function converts it to pruningwise order.
#' @param Qmat A Q transition matrix representing the along-branch model for the evolution of geographic range, using parameters \emph{d} (dispersal/range expansion),
#' \emph{e} (extinction/range contraction/local extirpation), and perhaps others (e.g. distance). This matrix can be input in either dense or sparse (COO) format,
#' as specified by \code{input_is_COO}.
#' @param spPmat Default is \code{NULL}; users should usually use \code{spPmat_inputs}. \code{spPmat} is A numeric matrix representing the probability of each
#' ancestor range-->(Left range, Right range) transition at cladogenesis events. There are
#' different ways to represent this matrix. In the simplest representation, this is just a rectangular matrix with numstates rows (representing the ancestral
#' states) and numstates^2 columns (representing all possible descendant pairs). Use of this type of matrix is specified by \code{cppSpMethod=1}. It is calculated
#' from a textual speciation matrix (typically \code{spmat} in the code) via \code{\link{symbolic_to_relprob_matrix_sp}}. However, this matrix gets huge and
#' slow for large numbers of states/ranges. \code{cppSpMethod=2} and \code{cppSpMethod=3} implement successively more efficient and faster
#' representation and processing of this matrix in COO-like formats. See \code{\link[cladoRcpp]{rcpp_calc_anclikes_sp_COOprobs}} for the \code{cppSpMethod=2}
#' method, and \code{\link[cladoRcpp]{rcpp_calc_anclikes_sp_COOweights_faster}} for the \code{cppSpMethod=3} method (the fastest).
#' @param min_branchlength Nodes with branches below this branchlength will not be treated as cladogenesis events; instead, they will be treated as
#' if an OTU had been sampled from an anagenetic lineage, i.e. as if you had a direct ancestor. This is useful for putting fossils into the biogeography analysis,
#' when you have fossil species that range through time. (Note: the proper way to obtain such trees, given that most phylogenetic methods force all OTUs to be tips
#' rather than direct ancestors, is another question subject to active research. However, one method might be to just set a branch-length cutoff, and treat any
#' branches sufficiently small as direct ancestors.)
#' @param return_what What should be returned to the user? Options are "loglike" (the log-likelihood of the data under the tree, model, and model parameters),
#' "nodelikes" (the scaled conditional likelihoods at the nodes), "rootprobs" (the relative probability of the geographic ranges/states at the root), or "all"
#' (all of the above in a list). Typically the user will only want to return "loglike" while doing ML optimization, but then return "all" once the ML parameter
#' values have been found.
#' @param probs_of_states_at_root The prior probability of the states/geographic ranges at the root. The default, \code{NULL}, effectively means an equal probability
#' for each state (this is also what \code{LAGRANGE} assumes; and running with NULL will reproduce exactly the \code{LAGRANGE} parameter inferences and
#' log-likelihood).
#' @param rootedge Should the root edge be included in the calculation (i.e., calculate to the bottom of the root), if a root edge is present? Default \code{FALSE}.
#' @param sparse Should sparse matrix exponentiation be performed? This should be faster for very large matrices (> 100-200 states), however, the calculations
#' appear to be less accurate. The function will transform a dense matrix to COO format (see \code{\link[rexpokit]{mat2coo}}) if necessary according to
#' the \code{input_is_COO} parameter.
#' @param printlevel If >= 1, various amounts of intermediate output will be printed to screen. Note: Intermediate outputs from C++ and FORTRAN functions have been
#' commented out, to meet CRAN guidelines.
#' @param use_cpp Should the C++ routines from \code{\link[cladoRcpp]{cladoRcpp}} be used to speed up calculations? Default \code{TRUE}.
#' @param input_is_COO Is the input Q matrix a sparse, COO-formatted matrix (\code{TRUE}) or a standard dense matrix (\code{FALSE}). Default \code{FALSE}.
#' @param spPmat_inputs A list of parameters so that \code{spPmat} (the speciation transition probability matrix) can be calculated on-the-fly, according
#' to the method in \code{cppSpMethod}. See example.
#' @param cppSpMethod Three C++ methods from cladoRcpp for calculating and using the cladogenesis probability matrix. 1 is slowest but easiest to understand; 3 is fastest.
#' If \code{spPmat_inputs} is given, the program will generate the appropriate spPmat on-the-fly, and the user does not have to input the full \code{spPmat} manually.
#' @param cluster_already_open If the user wants to distribute the matrix exponentiation calculations from all the branches across a number of processors/nodes on
#' a cluster, specify the cluster here. E.g. \code{cluster_already_open = makeCluster(rep("localhost",num_cores_to_use), type = "SOCK")}. Note: this will work on
#' most platforms, including Macs running R from command line, but will NOT work on Macs running the R GUI \code{R.app}, because parallel processing functions like
#' \code{MakeCluster} from e.g. \code{library(parallel)} for some reason crash R.app. The program runs a check for R.app and will just run on 1 node if found.
#' @param calc_ancprobs Should ancestral state estimation be performed (adds an uppass at the end).
#' @param include_null_range Does the state space include the null range?
#' Default is \code{NULL} which means running on a single processor.
#' @param fixnode If the state at a particular node is going to be fixed (e.g. for ML marginal ancestral states), give the node number.
#' (Trial implementation for stratified analysis.)
#' @param fixlikes The state likelihoods to be used at the fixed node. I.e. 1 for the fixed state, and 0 for the others.
#' (Trial implementation for stratified analysis.)
#' @param inputs A list of inputs containing the dispersal matrix for each time period, etc.
#' @param allareas A list of all the areas in the total analysis
#' @param all_states_list A list of all the stats in the total analysis (0-based coding - ?)
#' @param return_condlikes_table If \code{TRUE}, return the table of ALL conditional likelihood results, including at branch subsections
#' (only some should be used in calculating the final log-likelihood of the geography range data on the tree!)
#' @param calc_TTL_loglike_from_condlikes_table If TRUE, force making of the condlikes table, and use it to calculate the log-likelihood
#' (default=TRUE; matches LAGRANGE).
#' @return grand_total_likelihood The total log-likelihood of the data on the tree (default). Or, if
#' \code{return_condlikes_table==TRUE}, the function returns \code{calc_loglike_sp_stratified_results}, with
#' \code{calc_loglike_sp_stratified_results$condlikes_table} and \code{calc_loglike_sp_stratified_results$grand_total_likelihood}
#' as list items. This can be useful for debugging stratified analyses, which have a lot of extra book-keeping that is easy to mess up.
#' @export
#' @seealso \code{\link{calc_loglike_sp}}, \code{\link[cladoRcpp]{rcpp_calc_anclikes_sp}}, \code{\link[cladoRcpp]{rcpp_calc_anclikes_sp_COOprobs}},
#' \code{\link[cladoRcpp]{rcpp_calc_anclikes_sp_COOweights_faster}}, \code{\link[rexpokit]{mat2coo}},
#' \code{\link{rcpp_calc_anclikes_sp_COOweights_faster}}
#' @bibliography /Dropbox/_njm/__packages/cladoRcpp_setup/cladoRcpp_refs.bib
#' @cite Matzke_2012_IBS
#' @cite ReeSmith2008
#' @cite Landis_Matzke_etal_2013_BayArea
#' @note Go BEARS!
#' @note (COO = Coordinate list format for a matrix, see \url{http://en.wikipedia.org/wiki/Sparse_matrix#Coordinate_list_.28COO.29}
#' @author Nicholas Matzke \email{matzke@@berkeley.edu}
#' @examples
#' testval=1
#'
calc_loglike_sp_stratified <- function(tip_condlikes_of_data_on_each_state, phy, Qmat=NULL, spPmat=NULL, min_branchlength=0.000001, return_what="loglike", probs_of_states_at_root=NULL, rootedge=TRUE, sparse=FALSE, printlevel=0, use_cpp=TRUE, input_is_COO=FALSE, spPmat_inputs=NULL, cppSpMethod=3, cluster_already_open=NULL, calc_ancprobs=FALSE, include_null_range=TRUE, fixnode=NULL, fixlikes=NULL, inputs=inputs, allareas=allareas, all_states_list=all_states_list, return_condlikes_table=FALSE, calc_TTL_loglike_from_condlikes_table=TRUE)
{
defaults='
Qmat=NULL; spPmat=NULL; min_branchlength=0.000001; return_what="loglike"; probs_of_states_at_root=NULL; rootedge=FALSE; sparse=FALSE; printlevel=1; use_cpp=TRUE; input_is_COO=FALSE; spPmat_inputs=NULL; cppSpMethod=3; cluster_already_open=NULL; calc_ancprobs=FALSE; include_null_range=TRUE; fixnode=NULL; fixlikes=NULL; inputs=inputs; allareas=allareas; all_states_list=all_states_list; return_condlikes_table=FALSE; calc_TTL_loglike_from_condlikes_table=TRUE
'
defaults='
maxareas = 4
include_null_range = TRUE
phy = read.tree(inputs$trfn)
tipranges = getranges_from_LagrangePHYLIP(lgdata_fn=np(inputs$geogfn))
tip_condlikes_of_data_on_each_state = tipranges_to_tip_condlikes_of_data_on_each_state(tipranges, phy, maxareas=maxareas, include_null_range=include_null_range)
allareas = getareas_from_tipranges_object(tipranges)
all_states_list = rcpp_areas_list_to_states_list(areas=allareas, include_null_range=TRUE, maxareas=maxareas)
tmpres = calc_loglike_sp_stratified(tip_condlikes_of_data_on_each_state, phy, Qmat=NULL, spPmat=NULL, min_branchlength=0.000001, return_what="all", probs_of_states_at_root=NULL, rootedge=TRUE, sparse=FALSE, printlevel=0, use_cpp=TRUE, input_is_COO=FALSE, spPmat_inputs=NULL, cppSpMethod=3, cluster_already_open=NULL, calc_ancprobs=FALSE, include_null_range=TRUE, fixnode=NULL, fixlikes=NULL, inputs=inputs, allareas=allareas, all_states_list=all_states_list, return_condlikes_table=FALSE)
tmpres
min_branchlength=0.000001
include_null_range=TRUE
printlevel=0
cppSpMethod=3
return_condlikes_table=TRUE
calc_TTL_loglike_from_condlikes_table=TRUE
calc_ancprobs=TRUE
'
defaults='
tmpres = calc_loglike_sp_stratified(tip_condlikes_of_data_on_each_state, phy, Qmat=NULL, spPmat=NULL, min_branchlength=0.000001, return_what="all", probs_of_states_at_root=NULL, rootedge=TRUE, sparse=FALSE, printlevel=0, use_cpp=TRUE, input_is_COO=FALSE, spPmat_inputs=NULL, cppSpMethod=3, cluster_already_open=NULL, calc_ancprobs=TRUE, include_null_range=TRUE, fixnode=NULL, fixlikes=NULL, inputs=inputs, allareas=allareas, all_states_list=all_states_list, return_condlikes_table=TRUE, calc_TTL_loglike_from_condlikes_table=TRUE)
'
# defaults='
# # STANDARD DEBUGGING HERE
# tip_condlikes_of_data_on_each_state=tip_condlikes_of_data_on_each_state; phy=phy; Qmat=NULL; spPmat=NULL; min_branchlength=0.000001; return_what="loglike"; probs_of_states_at_root=NULL; rootedge=TRUE; sparse=FALSE; printlevel=0; use_cpp=TRUE; input_is_COO=FALSE; spPmat_inputs=NULL; cppSpMethod=3; cluster_already_open=NULL; calc_ancprobs=FALSE; include_null_range=TRUE; fixnode=fixnode; fixlikes=fixlikes; inputs=BioGeoBEARS_run_object; allareas=areas; all_states_list=states_list; return_condlikes_table=TRUE; calc_TTL_loglike_from_condlikes_table=TRUE;
# ' # end junk
# START OF FUNCTION
BioGeoBEARS_run_object = inputs
if (is.null(inputs$printlevel))
{
inputs$printlevel = 0
}
printlevel = inputs$printlevel
# Is this a traits-based analysis?
traitTF = is.null(BioGeoBEARS_run_object$trait) == FALSE
if (traitTF == TRUE)
{
trait_Pmat_txt = BioGeoBEARS_run_object$trait_Pmat_txt
num_trait_states = ncol(trait_Pmat_txt)
} # END if (traitTF == TRUE)
# Initialize m
m = NULL
# Initialize jts_matrix, matrix of t12, t23, etc., during a j event
jts_matrix = NULL
# Put the parameters into the BioGeoBEARS_model_object, so that they can be universally read out
# into any function
BioGeoBEARS_model_object = BioGeoBEARS_run_object$BioGeoBEARS_model_object
#print(params)
#print(BioGeoBEARS_model_object)
######################################################
# 2016-03-23_NJM: adding rescaling
# (unscale params, if they were used before)
######################################################
if (BioGeoBEARS_run_object$rescale_params == TRUE)
{
BioGeoBEARS_model_object@params_table = unscale_BGB_params(scaled_params_table=BioGeoBEARS_model_object@params_table)
BioGeoBEARS_run_object$BioGeoBEARS_model_object@params_table = BioGeoBEARS_model_object@params_table
}
# Update linked parameters
BioGeoBEARS_model_object = calc_linked_params_BioGeoBEARS_model_object(BioGeoBEARS_model_object)
# Update to the run object, just to be SURE
BioGeoBEARS_run_object$BioGeoBEARS_model_object = BioGeoBEARS_model_object
inputs$BioGeoBEARS_model_object = BioGeoBEARS_model_object
#######################################################
# Error check on fixnode / fixlikes
#######################################################
if (!is.null(fixnode))
{
if (( is.null(dim(fixlikes)) == TRUE) && (length(fixnode)==1))
{
pass_fixlikes = TRUE
} else {
if ( (dim(fixlikes)[1]) == length(fixnode) )
{
pass_fixlikes = TRUE
# Another error check: Multiple nodes in 'fixnode' MUST be sorted in increasing order
if ( (all(c(order(fixnode) == 1:length(fixnode)))) == TRUE)
{
pass_fixlikes = TRUE
} else {
pass_fixlikes = FALSE
error_msg = "ERROR in calc_loglike_sp_stratified(): \n Multiple nodes in 'fixnode' MUST be sorted in increasing order.\n"
cat(error_msg)
stop(error_msg)
}
} else {
pass_fixlikes = FALSE
error_msg = "ERROR in calc_loglike_sp_stratified(): Either:\n (1) fixnode must be a single node number, and fixlikes must be a vector, or\n (2) fixlikes like must be a matrix with the # of rows equal to length(fixnode).\n"
cat(error_msg)
stop(error_msg)
} # end 2nd if()
} # end 1st if()
}
if ((return_condlikes_table == TRUE) || (calc_TTL_loglike_from_condlikes_table == TRUE))
{
names_in_inputs = names(inputs) # can't use exists() on list items; reasons explained here:
# http://stackoverflow.com/questions/7719741/how-to-test-if-list-element-exists
if ( ("master_table" %in% names_in_inputs) == TRUE)
{
# Old
#condlikes_table = matrix(data=0, nrow=nrow(inputs$master_table), ncol=length(all_states_list))
# When traits are possible
#condlikes_table = matrix(data=0, nrow=nrow(inputs$master_table), ncol=numstates_geogtrait)
condlikes_table = matrix(data=0, nrow=nrow(inputs$master_table), ncol=ncol(tip_condlikes_of_data_on_each_state))
# Put in the conditional likelihoods at the tips
tmprownums = nrow(tip_condlikes_of_data_on_each_state)
condlikes_table[1:tmprownums, ] = tip_condlikes_of_data_on_each_state
if (calc_ancprobs == TRUE)
{
if (traitTF == FALSE)
{
relative_probs_of_each_state_at_branch_bottom_below_node_DOWNPASS_TABLE = matrix(data=0, nrow=nrow(inputs$master_table), ncol=length(all_states_list))
} else {
relative_probs_of_each_state_at_branch_bottom_below_node_DOWNPASS_TABLE = matrix(data=0, nrow=nrow(inputs$master_table), ncol=length(all_states_list)*num_trait_states)
} # END if (traitTF == FALSE)
}
} else {
cat("\n\nWARNING: in 'calc_loglike_sp_stratified()', you set 'return_condlikes_table=TRUE'\n
and/or calc_TTL_loglike_from_condlikes_table=TRUE, but this requires that\n
'inputs$master_table' be available from the 'section_the_tree()' function. Try
\nrunning 'inputs=section_the_tree(inputs, make_master_table=TRUE).\n", sep="")
cat("\nAs a result, we are setting return_condlikes_table=FALSE\n\n", sep="")
return_condlikes_table=FALSE
}
}
# Get the timeperiods; if 1 time period, run once; if multiple, run
if (is.null(inputs$timeperiods) || length(inputs$timeperiods) == 1)
{
num_timeperiods = 1
} else {
# Multiple timeperiods
timeperiods = inputs$timeperiods
num_timeperiods = length(timeperiods)
}
# All areas in the total analysis
allareas=allareas
allareas_list = seq(0, length(allareas)-1, 1) # 0-base indexes
# This is used on the uppass -- it might change, if we re-write this to have changing # of areas within the stratum
areas = allareas_list
# All states in the total analysis (after e.g. limitation on total # of areas)
all_states_list=all_states_list
# Other variables
# sparse should probably be false for ancestral states/downpass/uppass considerations
BioGeoBEARS_model_object = inputs$BioGeoBEARS_model_object
force_sparse = sparse
#######################################################
# Set up the starting probabilities etc.
#######################################################
# Starting tip_relative_probs_of_each_state
#current_condlikes_row = 0
tip_relative_probs_of_each_state = tip_condlikes_of_data_on_each_state / rowSums(tip_condlikes_of_data_on_each_state)
tip_relative_probs_of_each_state
current_tip_relative_probs_of_each_state = tip_relative_probs_of_each_state
current_condlikes_row = nrow(current_tip_relative_probs_of_each_state)
# matrix to hold all of the relative probabilities; Making this purposely too big
numnodes = phy$Nnode + length(phy$tip.label)
all_relative_probs_of_each_state = matrix(0, ncol=ncol(tip_condlikes_of_data_on_each_state), nrow=(numnodes*length(timeperiods)))
all_condlikes_of_each_state = matrix(0, ncol=ncol(tip_condlikes_of_data_on_each_state), nrow=(numnodes*length(timeperiods)))
all_relative_probs_of_each_state[1:current_condlikes_row, ] = current_tip_relative_probs_of_each_state
all_condlikes_of_each_state[1:current_condlikes_row, ] = current_tip_relative_probs_of_each_state
#######################################################
# Take the original tree and scale the branchlengths by b (branch-length exponent)
# b=0, all branches=1; b=1, all branches normal
#######################################################
previous_timepoint = 0
original_phy = phy
# This is pointless in a stratified analysis
# b_branch_length_exponent = inputs$BioGeoBEARS_model_object@params_table["b", "est"]
# Branch-length exponent (must be applied *after* tree has been sectioned!)
#original_phy$edge.length = original_phy$edge.length ^ b_branch_length_exponent
phy_as_it_is_chopped_down = original_phy
#tiplikes_to_delete = list()
########################################################
# DOWNPASS through the tree pieces
########################################################
for (i in 1:num_timeperiods)
{
#i=1
#cat("\ni=",i, sep="")
# Set the dispersal and extinction rate
d = BioGeoBEARS_model_object@params_table["d","est"]
e = BioGeoBEARS_model_object@params_table["e","est"]
a = BioGeoBEARS_model_object@params_table["a","est"]
#######################################################
# CONVERT NULL RANGE FROM "_" TO NA -- CRUCIAL, CAUSES CRASH OTHERWISE!!
#######################################################
# if (include_null_range == TRUE)
# {
# TF = all_states_list == "_"
# all_states_list[TF] = NA
# } else {
# TF = all_states_list == "_"
# all_states_list[TF] = NA
# }
#######################################################
# Cut down the number of areas, by what is allowed
# (it would be more efficient to do this once during setup,
# but probably no biggie)
#######################################################
# states_to_use_TF: states to use in Qmat, speciation models, etc.
# states_allowed_TF: use this to zero out impossible ancestral states according to
# areas_allowed matrix
#######################################################
# Should we modify the list of allowed states?
# default: no areas_allowed or areas_adjacency constraints
user_specified_constraints_on_states_list_TF = FALSE
states_allowed_TF1 = rep(TRUE, length(all_states_list))
states_allowed_TF2 = rep(TRUE, length(all_states_list))
states_allowed_TF3 = rep(TRUE, length(all_states_list))
if ( (is.null(inputs$list_of_areas_allowed_mats) == FALSE))
{
user_specified_constraints_on_states_list_TF = TRUE
}
if ( (is.null(inputs$list_of_areas_adjacency_mats) == FALSE))
{
user_specified_constraints_on_states_list_TF = TRUE
}
if ( (is.null(inputs$lists_of_states_lists_0based) == FALSE))
{
user_specified_constraints_on_states_list_TF = TRUE
}
# Get TF for whether each state in the master list is
# turned on in this time period.
# (then edit Qmat etc.)
if (user_specified_constraints_on_states_list_TF == TRUE)
{
# Check that lists_of_states_lists_0based has been specified
if ( is.null(inputs$lists_of_states_lists_0based) == TRUE )
{
errortxt = paste0("STOP ERROR in calc_loglike_sp_stratified(): User has specified areas_allowed or area_adjacency constraints, but 'lists_of_states_lists_0based' has not been added to the BioGeoBEARS_run_object.")
cat("\n\n")
cat(errortxt)
cat("\n\n")
stop(errortxt)
}
# Areas allowed in this time bin
if ( (is.null(inputs$list_of_areas_allowed_mats) == FALSE))
{
areas_allowed_mat = inputs$list_of_areas_allowed_mats[[i]]
states_allowed_TF1 = sapply(X=all_states_list, FUN=check_if_state_is_allowed, areas_allowed_mat)
#states_to_use_TF = all_states_list %in% tmp_states_list
if (include_null_range == TRUE)
{
states_allowed_TF1[1] = TRUE
}
# NO; use all areas for this
# states_to_use_TF = states_allowed_TF
} # END if ( (is.null(inputs$list_of_areas_allowed_mats) == FALSE))
# Areas adjacency
if ( (is.null(inputs$list_of_areas_adjacency_mats) == FALSE))
{
areas_adjacency_mat = inputs$list_of_areas_adjacency_mats[[i]]
states_allowed_TF2 = sapply(X=all_states_list, FUN=check_if_state_is_allowed_by_adjacency, areas_adjacency_mat)
#states_to_use_TF = all_states_list %in% tmp_states_list
if (include_null_range == TRUE)
{
states_allowed_TF2[1] = TRUE
}
# NO; use all areas for this
# states_to_use_TF = states_allowed_TF
} # END if ( (is.null(inputs$list_of_areas_adjacency_mats) == FALSE))
# Manual list of allowed states
if ( (is.null(inputs$lists_of_states_lists_0based) == FALSE))
{
states_allowed_TF3 = all_states_list %in% inputs$lists_of_states_lists_0based[[i]]
if (include_null_range == TRUE)
{
states_allowed_TF3[1] = TRUE
}
} # END if ( (is.null(inputs$lists_of_states_lists_0based) == FALSE))
# Combine the 3 (areas_allowed, areas_adjacency, lists_of_states_lists_0based)
states_allowed_TF = ((states_allowed_TF1 + states_allowed_TF2 + states_allowed_TF3) == 3)
# CHANGE the inputs here, so that it can be used easily in BSM
inputs$lists_of_states_lists_0based[[i]] = all_states_list[states_allowed_TF]
} else {
# Otherwise,
# make no change
pass = 1
#states_list = states_list
states_allowed_TF = rep(TRUE, length(all_states_list))
} # END if (user_specified_constraints_on_states_list_TF == TRUE)
# Use this for regular calculations (Qmat, speciation models, etc.)
states_to_use_TF = rep(TRUE, length(all_states_list))
#print(states_to_use_TF)
#print(states_allowed_TF)
#####################################################
# Make the dedf matrix for this time period
#####################################################
# If there is a distance matrix, use the first one
# (non-stratified analysis, here)
# If there is a distance matrix, use the first one
# (non-stratified analysis, here)
if ( (is.null(inputs$list_of_distances_mats) == FALSE))
{
distances_mat = inputs$list_of_distances_mats[[i]]
} else {
# Default is all areas effectively equidistant
distances_mat = matrix(1, nrow=length(areas), ncol=length(areas))
}
# Get the exponent on distance, apply to distances matrix
x = BioGeoBEARS_model_object@params_table["x","est"]
dispersal_multipliers_matrix = distances_mat ^ x
# Environmental distances
if ( (is.null(inputs$list_of_envdistances_mats) == FALSE))
{
envdistances_mat = inputs$list_of_envdistances_mats[[i]]
} else {
# Default is all areas effectively equidistant
envdistances_mat = matrix(1, nrow=length(areas), ncol=length(areas))
}
# Get the exponent on environmental distance, apply to distances matrix
n = BioGeoBEARS_model_object@params_table["n","est"]
dispersal_multipliers_matrix = dispersal_multipliers_matrix * envdistances_mat^n
# Apply manual dispersal multipliers, if any
# If there is a manual dispersal multipliers matrix, use the first one
# (non-stratified analysis, here)
if ( (is.null(inputs$list_of_dispersal_multipliers_mats) == FALSE))
{
manual_dispersal_multipliers_matrix = as.matrix(inputs$list_of_dispersal_multipliers_mats[[i]])
} else {
# Default is all areas effectively equidistant
manual_dispersal_multipliers_matrix = matrix(1, nrow=length(areas), ncol=length(areas))
}
# Get the exponent on manual dispersal multipliers
w = BioGeoBEARS_model_object@params_table["w","est"]
#print("manual_dispersal_multipliers_matrix ^ w")
#print(manual_dispersal_multipliers_matrix ^ w)
# Apply element-wise
dispersal_multipliers_matrix = dispersal_multipliers_matrix * manual_dispersal_multipliers_matrix ^ w
#######################################################
# multiply parameter d by dispersal_multipliers_matrix
#######################################################
dmat_times_d = dispersal_multipliers_matrix * matrix(d, nrow=length(areas), ncol=length(areas))
amat = dispersal_multipliers_matrix * matrix(a, nrow=length(areas), ncol=length(areas))
#######################################################
#######################################################
# Do area-dependence and extinction multipliers list
#######################################################
#######################################################
if ( (is.null(inputs$list_of_area_of_areas) == FALSE))
{
area_of_areas = inputs$list_of_area_of_areas[[i]]
} else {
# Default is all areas effectively equidistant
area_of_areas = rep(1, length(areas))
}
# Get the exponent on extinction, apply to extinction modifiers
u = BioGeoBEARS_model_object@params_table["u","est"]
extinction_modifier_list = area_of_areas ^ (1 * u)
# Apply to extinction rate
elist = extinction_modifier_list * rep(e, length(areas))
# Calculate the Q matrix
# if (is.null(Qmat))
# {
# 2014 version
#Qmat_tmp = rcpp_states_list_to_DEmat(areas_list=allareas_list, states_list=all_states_list[states_to_use_TF],
#dmat=dmat_times_d, elist=elist, amat=amat, include_null_range=TRUE, normalize_TF=TRUE, makeCOO_TF=force_sparse)
# 2015 version
# Qmat_tmp = rcpp_states_list_to_DEmat(areas_list=allareas_list, states_list=all_states_list[states_allowed_TF], dmat=dmat_times_d, elist=elist, amat=amat, include_null_range=include_null_range, normalize_TF=TRUE, makeCOO_TF=force_sparse)
# 2018 version
if (traitTF == FALSE)
{
Qmat_tmp = rcpp_states_list_to_DEmat(areas_list=allareas_list, states_list=all_states_list[states_allowed_TF], dmat=dmat_times_d, elist=elist, amat=amat, include_null_range=include_null_range, normalize_TF=TRUE, makeCOO_TF=force_sparse)
#print(dim(Qmat_tmp))
# } else {
# # If Qmat is pre-specified
# Qmat_tmp = Qmat
# }
} # END if (traitTF == FALSE)
# Analysis with a trait modifying dispersal rate
if (traitTF == TRUE)
{
num_geog_states = length(all_states_list[states_allowed_TF])
numstates_geogtrait = num_trait_states * num_geog_states
# print("states_allowed_TF")
# print(states_allowed_TF)
# print("num_geog_states")
# print(num_geog_states)
# print("num_trait_states")
# print(num_trait_states)
# print("numstates_geogtrait")
# print(numstates_geogtrait)
# Get the modified Qmatrix (traits + geog)
tmpres = modify_Qmat_with_trait(Qmat=NULL, BioGeoBEARS_run_object, numstates_geogtrait=numstates_geogtrait, areas_list=allareas_list, states_list=all_states_list[states_allowed_TF], dispersal_multipliers_matrix=dispersal_multipliers_matrix, elist=elist, force_sparse=force_sparse)
Qmat_tmp = tmpres$Qmat
m = tmpres$m
# If the trait can change during jump events
if (is.null(BioGeoBEARS_run_object$jts_txt_matrix) == FALSE)
{
jts_txt_matrix = BioGeoBEARS_run_object$jts_txt_matrix
jts_matrix = matrix(data=0, nrow=nrow(jts_txt_matrix), ncol=ncol(jts_txt_matrix))
TF_matrix = matrix(data=TRUE, nrow=nrow(jts_txt_matrix), ncol=ncol(jts_txt_matrix))
diag(TF_matrix) = FALSE
jts_txt_params = c(jts_txt_matrix[TF_matrix])
jts_txt_params
# Populate the numeric jts_matrix
for (jts_i in 1:nrow(jts_txt_matrix))
{
diag_val = 1
for (jts_j in 1:ncol(jts_txt_matrix))
{
if (jts_i == jts_j)
{
next()
}
jts_txt = jts_txt_matrix[jts_i,jts_j]
newval = as.numeric(BioGeoBEARS_model_object@params_table[jts_txt, "est"])
jts_matrix[jts_i,jts_j] = newval
diag_val = 1-newval
}
# Populate the diagonal
jts_matrix[jts_i,jts_i] = diag_val
} # END for (jts_i in 1:nrow(jts_txt_matrix))
} # END if (is.null(BioGeoBEARS_run_object$jts_txt_matrix) == FALSE)
} else {
num_geog_states = length(all_states_list[states_allowed_TF])
numstates_geogtrait = num_geog_states
} # END if (traitTF == TRUE)
if (force_sparse == TRUE)
{
# Convert the COO-formatted trait+geog matrix to CRS format for kexpmv
tmpQmat_in_REXPOKIT_coo_fmt = Qmat_tmp
# Make a CRS-formatted matrix, for kexpmv
# DO THE TRANSPOSE HERE, trait+geog matrices assembled transposed
tmpQmat_in_kexpmv_crs_fmt = coo2crs(
ia=tmpQmat_in_REXPOKIT_coo_fmt[,"ia"],
ja=tmpQmat_in_REXPOKIT_coo_fmt[,"ja"],
a =tmpQmat_in_REXPOKIT_coo_fmt[,"a"],
n=numstates_geogtrait, transpose_needed=FALSE)
Qmat_tmp = tmpQmat_in_REXPOKIT_coo_fmt
} # END if (force_sparse == TRUE)
# Now. IF you have a subtree structure, you need to run this with a cladogenesis matrix,
# through calc_loglike_sp(), like normal.
# If there's just one tree, store it in the object
if (is.null(inputs$timeperiods) || length(inputs$timeperiods) == 1)
{
#tr = read.tree(inputs$trfn)
tr = check_trfn(trfn=inputs$trfn)
tree_to_chainsaw = NULL
tree_to_chainsaw[[1]] = tr
return_pieces_list = NULL
return_pieces_list[[1]] = tr
return_pieces_basenames = NULL
# Merge THEN split THEN sort!!
tmp_labels_merge = paste(tr$tip.label, collapse=",", sep="")
tmp_labels_split = strsplit(tmp_labels_merge, split=",")[[1]]
return_pieces_basenames[[1]] = paste(sort(tmp_labels_split), collapse=",", sep="")
chainsaw_object = list()
chainsaw_object$tree_to_chainsaw = tree_to_chainsaw
chainsaw_object$return_pieces_list = return_pieces_list
chainsaw_object$return_pieces_basenames = return_pieces_basenames
attr(chainsaw_object, "class") = "chainsaw_result"
inputs$tree_sections_list[[1]] = chainsaw_object
} # END if (is.null(inputs$timeperiods) || length(inputs$timeperiods) == 1)
# OK, if you have a tree here, do that
# if not, exp the branch
#######################################################
# Cladogenic model
#######################################################
spPmat_inputs = get_spPmat_inputs_from_BGB(BioGeoBEARS_run_object=BioGeoBEARS_run_object, states_list=all_states_list[states_allowed_TF], dispersal_multipliers_matrix=dispersal_multipliers_matrix)
########################################################
# DOWNPASS through the tree pieces
#######################################################
# Go through the tree pieces
#######################################################
chainsaw_result = inputs$tree_sections_list[[i]]
# You will need the new tip likelihoods of the new tree:
current_tip_relative_probs_of_each_state
# Old
#new_tip_likelihoods = matrix(0, nrow=length(chainsaw_result$return_pieces_list), ncol=length(all_states_list))
# When traits are possible
new_tip_likelihoods = matrix(0, nrow=length(chainsaw_result$return_pieces_list), ncol=ncol(current_tip_relative_probs_of_each_state))
# Error check for traits model
if (traitTF == TRUE)
{
# DOWNPASS definition of states_allowed_TF with traits
wTrait_states_allowed_TF = c(rep(states_allowed_TF, times=num_trait_states))
#print("wTrait_states_allowed_TF:")
#print(wTrait_states_allowed_TF)
if (sum(wTrait_states_allowed_TF) != numstates_geogtrait)
{
txt = paste0("STOP ERROR in calc_loglike_sp_stratified(): sum(wTrait_states_allowed_TF)=", sum(wTrait_states_allowed_TF), ", and numstates_geogtrait=", numstates_geogtrait, ". They must be equal to proceed.")
cat("\n\n")
cat(txt)
cat("\n\n")
stop(txt)
} # END if (ncol(current_tip_relative_probs_of_each_state) != numstates_geogtrait)
}
for (jj in 1:length(chainsaw_result$return_pieces_list))
{
#cat("i=", i, " jj=",jj, "\n", sep="")
treepiece = chainsaw_result$return_pieces_list[[jj]]
treepiece
############################################
# DOWNPASS -- process just a branch section (an edge)
############################################
if (is.numeric(treepiece))
{
do_exponentiation = TRUE # default
# Check for fossil
# If you are storing ALL of the conditional likelihoods that were calculated
if ((return_condlikes_table == TRUE) || (calc_TTL_loglike_from_condlikes_table == TRUE))
{
# Find the row in the master_table
TF1 = inputs$master_table$stratum == i
TF2 = inputs$master_table$piecenum == jj
TF3 = inputs$master_table$piececlass == "subbranch"
TF = (TF1 + TF2 + TF3) == 3
this_row_of_master_table_is_being_used = TF
# Find the row
rownum = (1:nrow(condlikes_table))[TF]
tmp_master_table_row = inputs$master_table[rownum, ]
# Error check
if (nrow(tmp_master_table_row) != 1)
{
stoptxt = paste("\n\nFATAL ERROR in stratified loglike calculation at i=", i, "; jj=", jj, "; ", 'inputs$master_table$piececlass == "subbranch"',
"\nnrow(tmp_master_table_row) should =1 but instead =", nrow(tmp_master_table_row), "\n", sep="")
stop(stoptxt)
}
# Now check if it's a fossil that appears in this time bin
master_tip_time_bp = tmp_master_table_row$time_bp
time_top = tmp_master_table_row$time_top
time_bot = tmp_master_table_row$time_bot
is_fossil = tmp_master_table_row$fossils
# THIS (FOSSIL_HASNT_OCCURRED_YET) MUST GO *BEFORE* THE 'FOSSIL_TIP_DOES_OCCUR_IN_BIN' IF/THEN
# If this is TRUE, this fossil hasn't occurred yet, and you are looking at the "phantom limb".
# In this case, DON'T do matrix exponentiation, just copy the likelihoods down!!
if (( master_tip_time_bp > time_top) && (is.na(is_fossil) == FALSE) && (is_fossil == TRUE))
{
do_exponentiation = FALSE
}
# THIS (FOSSIL_TIP_DOES_OCCUR_IN_BIN) MUST GO *AFTER* THE 'FOSSIL_HASNT_OCCURRED_YET' IF/THEN
# If this is TRUE, there's a match and the fossil tip appears in this time period
# (THIS IS CRUCIAL TO GETTING STRATIFICATION TO WORK -- YOU NEED THE is_fossil==TRUE ADDED!!)
if ( (master_tip_time_bp >= time_top) && (master_tip_time_bp < time_bot) && (is.na(is_fossil) == FALSE) && (is_fossil == TRUE))
{
# Shorten the branchlength by master_tip_time_bp-time_top
amount_to_shorten_by = master_tip_time_bp-time_top
treepiece = treepiece - amount_to_shorten_by
do_exponentiation = TRUE
}
# 2016-02-24
# Also, DON'T do exponentiation if the branch length in the master branch
# is a direct ancestor, i.e., less than min_branchlength
if (tmp_master_table_row$edge.length < min_branchlength)
{
#print("It's a direct ancestor, so DON'T do matrix exponentiation!")
# It's a direct ancestor, so DON'T do matrix exponentiation
do_exponentiation = FALSE
}
# If FALSE, you're below all this and hopefully don't care
} # END if ((return_condlikes_table == TRUE) || (calc_TTL_loglike_from_condlikes_table == TRUE))
tipname = chainsaw_result$return_pieces_basenames[[jj]]
tip_TF = phy_as_it_is_chopped_down$tip.label == tipname
# 22 rather than 17
# 2019-06-21_NJM error:
# Error in current_tip_relative_probs_of_each_state[tip_TF, states_to_use_TF] :
# (subscript) logical subscript too long
# This error was due to tips with age of 0.07019192 or 0.02674141 time_bp, with
# time-slices of 0.01, 0.012
# print("2019-06-21_NJM error:")
# print(tip_TF)
# print(length(tip_TF))
# print(states_to_use_TF)
# print(length(states_to_use_TF))
# print(current_tip_relative_probs_of_each_state)
# print(dim(current_tip_relative_probs_of_each_state))
relative_probs_of_each_state_at_the_tip_of_this_branch = current_tip_relative_probs_of_each_state[tip_TF, states_to_use_TF]
# if (sum(tip_TF) > 2)
# {
# relative_probs_of_each_state_at_the_tip_of_this_branch = current_tip_relative_probs_of_each_state[tip_TF, states_to_use_TF]
# } else {
# relative_probs_of_each_state_at_the_tip_of_this_branch_tmp = current_tip_relative_probs_of_each_state
# relative_probs_of_each_state_at_the_tip_of_this_branch = relative_probs_of_each_state_at_the_tip_of_this_branch_tmp[states_to_use_TF]
# relative_probs_of_each_state_at_the_tip_of_this_branch = matrix(data=relative_probs_of_each_state_at_the_tip_of_this_branch, nrow=1)
# }
if (do_exponentiation == TRUE)
{
# DENSE MATRIX EXPONENTIATION DOWN ONE BRANCH
if (force_sparse == FALSE)
{
# t = treepiece
independent_likelihoods_at_branch_section_bottom = expokit_dgpadm_Qmat2(times=treepiece, Qmat=Qmat_tmp, transpose_needed=TRUE)
#independent_likelihoods_at_branch_section_bottom = expokit_dgpadm_Qmat(Qmat=Qmat_tmp, t=treepiece, transpose_needed=FALSE)
if (traitTF == FALSE)
{
# 2014 version
#conditional_likelihoods_at_branch_section_bottom = matrix(independent_likelihoods_at_branch_section_bottom %*% relative_probs_of_each_state_at_the_tip_of_this_branch, nrow=1)
# 2015 version
tmp_conditional_likelihoods_at_branch_section_bottom = matrix(independent_likelihoods_at_branch_section_bottom %*% relative_probs_of_each_state_at_the_tip_of_this_branch[states_allowed_TF], nrow=1)
} else {
tmp_conditional_likelihoods_at_branch_section_bottom = matrix(independent_likelihoods_at_branch_section_bottom %*% relative_probs_of_each_state_at_the_tip_of_this_branch[wTrait_states_allowed_TF], nrow=1)
}
} else {
# SPARSE MATRIX EXPONENTIATION DOWN ONE BRANCH
# CHECK THAT IT'S IN COO FORMAT
if (class(Qmat_tmp) != "data.frame")
{
txt = paste0("ERROR: calc_loglike_sp_stratified is attempting to use a sparse COO-formated Q matrix, to calculated the likelihoods down one branch segment, but you provided a Qmat not in data.frame form")
cat("\n\n")
cat(txt)
cat("\n\n")
print("class(Qmat_tmp)")
print(class(Qmat_tmp) )
print("Qmat_tmp")
print(Qmat_tmp)
stop(txt)
}
if ( (ncol(Qmat_tmp) != 3) )
{
txt = paste0("ERROR: calc_loglike_sp_stratified is attempting to use a sparse COO-formated Q matrix, to calculated the likelihoods down one branch segment, but you provided a Qmat that does't have 3 columns")
cat("\n\n")
cat(txt)
cat("\n\n")
print("class(Qmat_tmp)")
print(class(Qmat_tmp) )
print("Qmat_tmp")
print(Qmat_tmp)
stop(txt)
}
coo_n = numstates_geogtrait
anorm = 1
#print(relative_probs_of_each_state_at_the_tip_of_this_branch[states_allowed_TF])
if (traitTF == FALSE)
{
try_result_segment = try (
kexpmv::expokit_dgexpv(mat=tmpQmat_in_kexpmv_crs_fmt, t=treepiece, vector=relative_probs_of_each_state_at_the_tip_of_this_branch[states_allowed_TF], transpose_needed=TRUE, transform_to_crs=FALSE, crs_n=numstates_geogtrait, anorm=NULL, mxstep=10000, tol=as.numeric(1e-10))
)
} else {
try_result_segment = try (
kexpmv::expokit_dgexpv(mat=tmpQmat_in_kexpmv_crs_fmt, t=treepiece, vector=relative_probs_of_each_state_at_the_tip_of_this_branch[wTrait_states_allowed_TF], transpose_needed=TRUE, transform_to_crs=FALSE, crs_n=numstates_geogtrait, anorm=NULL, mxstep=10000, tol=as.numeric(1e-10))
)
}
if (printlevel >=1)
{
txt = "S"
cat(txt)
}
# Error check
if (class(try_result_segment) == "try-error")
{
cat("\n\ntry-error on kexpmv::expokit_dgexpv():\n\n")
cat("i=", i, "\n")
cat("t=treepiece==", treepiece, "\n")
print(tmpQmat_in_kexpmv_crs_fmt)
print(phy2)
print(relative_probs_of_each_state_at_branch_top_AT_node_DOWNPASS)
print(relative_probs_of_each_state_at_branch_top_AT_node_DOWNPASS[left_desc_nodenum,])
print(coo_n)
print(anorm)
print("BioGeoBEARS_model_object")
print(BioGeoBEARS_model_object)
stoptxt = "\n\nStopping on error in sparse exponentiation downpass (treepiece, aka a branch segment): NaNs produced in likelihood calculation. This may mean your transition matrix disallows necessary transitions. E.g., if your ranges are 'A' and 'B', and your model is DEC, then allowing range 'AB' as a possible state is required, so that you can get from 'A' to 'B' via 'AB' as the intermediate. Alternatively, NaNs can be produced sometimes if your Maximum Likelihood (ML) search proposes weird parameter values (such as a negative rate or weight) or a parameter so small that required transitions have a probability that machine precision rounds to zero or negative. Sometimes this seems to occur because optim, optimx, etc. propose parameters slightly outside the user-specified upper and lower (min/max) boundaries for some reason. One solution is often to narrow the min/max limits. \n\nAnother solution: To have this error report an extremely low log-likelihood,, set BioGeoBEARS_run_object$on_NaN_error to something like -1e50.\n\n"
if (is.null(on_NaN_error))
{
stop(stoptxt)
}
print("print(on_NaN_error):")
print(on_NaN_error)
if ( (is.numeric(on_NaN_error)) && (return_what == "loglike") )
{
warning(paste0("\n\nWarning on error in sparse exponentiation downpass (treepiece, aka a branch segment): NaNs produced in likelihood calculation. This may mean your transition matrix disallows necessary transitions. E.g., if your ranges are 'A' and 'B', and your model is DEC, then allowing range 'AB' as a possible state is required, so that you can get from 'A' to 'B' via 'AB' as the intermediate. Alternatively, NaNs can be produced sometimes if your Maximum Likelihood (ML) search proposes weird parameter values (such as a negative rate or weight) or a parameter so small that required transitions have a probability that machine precision rounds to zero or negative. Sometimes this seems to occur because optim, optimx, etc. propose parameters slightly outside the user-specified upper and lower (min/max) boundaries for some reason. One solution is often to narrow the min/max limits. \n\nYou are using another solution: Normally, this would be a stop error, but you specified that BioGeoBEARS_run_object$on_NaN_error=", on_NaN_error, "\n\n"))
return(on_NaN_error)
} else {
stop(stoptxt)
}
} # END if (any(is.nan(condlikes_Left)))
# If all checks are survived, get the downpass probabilities
tmp_conditional_likelihoods_at_branch_section_bottom = c(try_result_segment$output_probs)
tmp_conditional_likelihoods_at_branch_section_bottom[tmp_conditional_likelihoods_at_branch_section_bottom<0] = tmp_conditional_likelihoods_at_branch_section_bottom
} # END if (force_sparse == FALSE)
if (traitTF == FALSE)
{
# save to full matrix
conditional_likelihoods_at_branch_section_bottom = matrix(0, nrow=1, ncol=length(relative_probs_of_each_state_at_the_tip_of_this_branch))
conditional_likelihoods_at_branch_section_bottom[states_allowed_TF] = tmp_conditional_likelihoods_at_branch_section_bottom
} else {
# save to full matrix
conditional_likelihoods_at_branch_section_bottom = matrix(0, nrow=1, ncol=length(relative_probs_of_each_state_at_the_tip_of_this_branch))
conditional_likelihoods_at_branch_section_bottom[wTrait_states_allowed_TF] = tmp_conditional_likelihoods_at_branch_section_bottom
}
# Zero out impossible states according to
# areas_allowed/areas_adjacency
# keep from 2014 just to double-check
conditional_likelihoods_at_branch_section_bottom[states_allowed_TF==FALSE] = 0
#print("conditional_likelihoods_at_branch_section_bottom #1a:")
#print(conditional_likelihoods_at_branch_section_bottom)
} else {
# Copying the tip likelihoods down
conditional_likelihoods_at_branch_section_bottom = matrix(relative_probs_of_each_state_at_the_tip_of_this_branch, nrow=1)
#print("conditional_likelihoods_at_branch_section_bottom #1b:")
#print(conditional_likelihoods_at_branch_section_bottom)
} # END if (do_exponentiation == TRUE)
# Test forward exponentiation instead...NO
# independent_likelihoods_at_branch_section_bottom = expokit_dgpadm_Qmat2(times=treepiece, Qmat=Qmat_tmp, transpose_needed=FALSE)
# #independent_likelihoods_at_branch_section_bottom = expokit_dgpadm_Qmat(Qmat=Qmat_tmp, t=treepiece, transpose_needed=FALSE)
#
#
# conditional_likelihoods_at_branch_section_bottom = matrix(independent_likelihoods_at_branch_section_bottom %*% relative_probs_of_each_state_at_the_tip_of_this_branch, nrow=1)
# if (include_null_range == TRUE)
# conditional_likelihoods_at_branch_section_bottom[1] = 0
#
# Also, store the conditional likelihoods for all nodes in this subtree
chainsaw_result$conditional_likelihoods_for_nodes_plus_bottom_in_this_section[[jj]] = conditional_likelihoods_at_branch_section_bottom
# (THIS IS CRUCIAL TO GETTING STRATIFICATION TO WORK -- YOU NEED THE is_fossil==TRUE ADDED!!)
# (these don't seem essential, whether divided or not, in stratified analysis; what matters is what
# goes into condlikes_table)
# Relative probabilities -- just the new tip
chainsaw_result$relative_probs_of_each_state_at_bottom_of_root_branch[[jj]] = conditional_likelihoods_at_branch_section_bottom / sum(conditional_likelihoods_at_branch_section_bottom)
# Relative probabilities -- all nodes plus branch bottom (just branch bottom, here)
chainsaw_result$relative_probabilities_for_nodes_plus_bottom_in_this_section[[jj]] = conditional_likelihoods_at_branch_section_bottom / sum(conditional_likelihoods_at_branch_section_bottom)
#print("conditional_likelihoods_at_branch_section_bottom #2:")
#print(conditional_likelihoods_at_branch_section_bottom)
#print("sum(conditional_likelihoods_at_branch_section_bottom)")
#print(sum(conditional_likelihoods_at_branch_section_bottom))
#print("conditional_likelihoods_at_branch_section_bottom / sum(conditional_likelihoods_at_branch_section_bottom)")
#print(conditional_likelihoods_at_branch_section_bottom / sum(conditional_likelihoods_at_branch_section_bottom))
# If you are storing ALL of the conditional likelihoods that were calculated
if ((return_condlikes_table == TRUE) || (calc_TTL_loglike_from_condlikes_table == TRUE))
{
# Find the row in the big conditional likelihoods table
TF1 = inputs$master_table$stratum == i
TF2 = inputs$master_table$piecenum == jj
# 2017-04-06 fix: do BOTH subbranch and orig tip, so that we STORE the
# downpass probabilities at branch bottoms, for the original tips
# This should help with stochastic mapping...
TF3 = inputs$master_table$piececlass == "subbranch"
TF4 = inputs$master_table$piececlass == "orig_tip"
TF5 = (TF3 + TF4) == 1
TF = (TF1 + TF2 + TF5) == 3
rownum = (1:nrow(condlikes_table))[TF]
condlikes_table[rownum, ] = conditional_likelihoods_at_branch_section_bottom
# Also store the subbranch downpass relative probabilities at the bottom of each branch
if (calc_ancprobs == TRUE)
{
# Also store the subbranch downpass relative probabalities at the bottom of each branch
relative_probs_of_each_state_at_branch_bottom_below_node_DOWNPASS_TABLE[rownum, ] = conditional_likelihoods_at_branch_section_bottom / sum(conditional_likelihoods_at_branch_section_bottom)
} # END if (calc_ancprobs == TRUE)
} # END if ((return_condlikes_table == TRUE) || (calc_TTL_loglike_from_condlikes_table == TRUE))
############################################
# END if (is.numeric(treepiece))
# It's just a branch section
############################################
} else {
############################################
# DOWNPASS -- on a subtree
############################################
############################################
# Otherwise, treepiece is a subtree
############################################
tmp_subtree = treepiece
# Check for fossils
# If you are storing ALL of the conditional likelihoods that were calculated
if ((return_condlikes_table == TRUE) || (calc_TTL_loglike_from_condlikes_table == TRUE))
{
tmp_subtree_tipnums = 1:length(tmp_subtree$tip.label)
for (iter in 1:length(tmp_subtree_tipnums))
{
# Find the row in the master table corresponding to this subtree_tip
subtree_tip = tmp_subtree_tipnums[iter]
TF1 = inputs$master_table$stratum == i
TF2 = inputs$master_table$piecenum == jj
TF3 = inputs$master_table$piececlass == "subtree"
TF4 = inputs$master_table$SUBnode == subtree_tip
TF = (TF1 + TF2 + TF3 + TF4) == 4
this_row_of_master_table_is_being_used = TF
# Find the row
rownum = (1:nrow(inputs$master_table))[TF]
tmp_master_table_row = inputs$master_table[rownum, ]
# Error check
if (nrow(tmp_master_table_row) != 1)
{
stoptxt = paste("\n\nFATAL ERROR in stratified loglike calculation at i=", i, "; jj=", jj, "; ",
'inputs$master_table$piececlass == "subtree"', "; subtree_tip=", subtree_tip,
"\nnrow(tmp_master_table_row) should =1 but instead =", nrow(tmp_master_table_row), "\n", sep="")
stop(stoptxt)
}
# Now check if it's a fossil that appears in this time bin
master_tip_time_bp = tmp_master_table_row$time_bp
time_top = tmp_master_table_row$time_top
time_bot = tmp_master_table_row$time_bot
is_fossil = tmp_master_table_row$fossils
# If this is TRUE, there's a match and the fossil tip appears in this time period
if ( (master_tip_time_bp >= time_top) && (master_tip_time_bp < time_bot) && (is_fossil == TRUE))
{
# Shorten the branchlength by master_tip_time_bp-time_top
amount_to_shorten_by = master_tip_time_bp-time_top
# Find the branch of the subtree!
tmp2_edgeTF = tmp_subtree$edge[,2] == subtree_tip
tmp2_edgenum = (1:nrow(tmp_subtree$edge))[tmp2_edgeTF]
# Edit the length of the branch on this subtree tip
# 2016-02-29 -- this adjustment now happens during section_the_tree()
#
#tmp_subtree$edge.length[tmp2_edgenum] = tmp_subtree$edge.length[tmp2_edgenum] - amount_to_shorten_by
# do_exponentiation = TRUE # not needed here
}
} # end forloop through subtree tips
} # End fossils check
# That should be it, everything else works as normal
# Except, do up-pass also
# Get the names of the tips in this subtree
tipnames = tmp_subtree$tip.label
# Use the tipnames to get the conditional likelihoods at these tips
tips_for_subtree_TF = phy_as_it_is_chopped_down$tip.label %in% tipnames
if (traitTF == FALSE)
{
# 2014 version
#subtree_tip_relative_probs_of_each_state = current_tip_relative_probs_of_each_state[tips_for_subtree_TF,states_to_use_TF]
# 2015 version
subtree_tip_relative_probs_of_each_state = current_tip_relative_probs_of_each_state[tips_for_subtree_TF,][,states_allowed_TF]
} else {
subtree_tip_relative_probs_of_each_state = current_tip_relative_probs_of_each_state[tips_for_subtree_TF,][,wTrait_states_allowed_TF]
}
# 2016-05-28_bug_fix
# Fix this error, e.g. when DEC* model + areas_allowed means that
# ranges_list = NULL, Kauai is just
# ranges_list = Kauai
# This means that:
# subtree_tip_relative_probs_of_each_state
# and thus
# tip_condlikes_of_data_on_each_state
# ...are just a list of numbers, not a matrix, thus
# rowSums fails in calc_loglike_sp() in that time-stratum.
#
#
# This was the error message:
#
# Error in rowSums(tip_condlikes_of_data_on_each_state) :
# 'x' must be an array of at least two dimensions
# Calls: bears_optim_run ... calc_loglike_sp_stratified -> calc_loglike_sp -> rowSums
#
# If there is only 1 geographic state...
if (sum(states_allowed_TF) == 1)
{
if (traitTF == FALSE)
{
subtree_tip_relative_probs_of_each_state = matrix(data=subtree_tip_relative_probs_of_each_state, ncol=1)
} else {
# If there's a trait, there are at least 2 geographic states
subtree_tip_relative_probs_of_each_state = matrix(data=subtree_tip_relative_probs_of_each_state, ncol=sum(wTrait_states_allowed_TF))
} # END if (traitTF == FALSE)
} # END if (sum(states_allowed_TF) == 1)
# DOWNPASS: check if this subtree contains fixed internal node(s) on the master tree
# Match the master fixnodes to the fixnodes in *JUST* this subtree
# We will then pass these fixnodes to the subtree loglike calculation
# First, we need to get the master node number, iff it's internal
#
tmp_fixnode = NULL # Default
tmp_fixlikes = NULL # Default
if ((!is.null(fixnode)) && (length(fixnode) > 0))
{
# Check for multiple fixnodes
if (length(fixnode) > 1)
{
# If there are multiple fixnodes,
# Get the matching nodes in this subtree
TF1 = inputs$master_table$stratum == i
TF2 = inputs$master_table$piecenum == jj
TF3 = inputs$master_table$piececlass == "subtree"
TF = ((TF1 + TF2 + TF3) == 3)
tmprows = inputs$master_table[TF,]
# Get the fixnodes found in this subtree
fixnodes_in_subtree_TF = fixnode %in% tmprows$node
# *IF* the subtree contains fixnodes, do this stuff
# otherwise, set to NULL
if (sum(fixnodes_in_subtree_TF) > 0)
{
#master_nodes_in_fixnode_TF = inputs$master_table$node %in% fixnode
#master_nodes_in_fixnode
#TF = (anc == fixnode) # old
# we do not use temporary_fixnode, since we need the fixnodes in the subtree numbering (tmprow$SUBnode)
temporary_fixnodes = fixnode[fixnodes_in_subtree_TF]
if (traitTF == FALSE)
{
# But we will use these
# 2016-03-15_old
#temporary_fixlikes = fixlikes[fixnodes_in_subtree_TF,]
# 2016-03-15_new by Torsten
temporary_fixlikes = fixlikes[fixnodes_in_subtree_TF,states_allowed_TF]
} else {
temporary_fixlikes = fixlikes[fixnodes_in_subtree_TF,wTrait_states_allowed_TF]
}
# The subtree nodenums corresponding to the subset temporary_fixnodes
# NOTE! THIS SUBSET THING WILL ONLY WORK IF THE NODES ARE SORTED IN ORDER FROM THE START
subtree_rows_in_fixnodes_TF = tmprows$node %in% fixnode
subtree_fixnode_master_nodenums = tmprows$node[subtree_rows_in_fixnodes_TF]
subtree_fixnode_nums = tmprows$SUBnode[subtree_rows_in_fixnodes_TF]
# We have to order these subtree fixnodes, and order the subtree fixlikes the same way
order_subtree_fixnode_nums = order(subtree_fixnode_nums)
subtree_fixnode_nums = subtree_fixnode_nums[order_subtree_fixnode_nums]
# Only reorder if there are 2 or more rows, i.e. if it's a matrix not a vector
if (length(order_subtree_fixnode_nums) > 1)
{
temporary_fixlikes = temporary_fixlikes[order_subtree_fixnode_nums, ]
}
} else {
# If *NO* fixnodes in subtree:
temporary_fixnodes = NULL
subtree_fixnode_master_nodenums = NULL
subtree_fixnode_nums = NULL
temporary_fixlikes = NULL
} # end if (sum(fixnodes_in_subtree_TF) > 0)
# Check if we're in the right stratum / piece / piececlass
# (have account for possible multiple rows)
TF1 = unique(tmprows$stratum) == i
TF2 = unique(tmprows$piecenum) == jj
TF3 = unique(tmprows$piececlass) == "subtree"
TF = ((TF1 + TF2 + TF3) == 3)
if (TF == TRUE)
{
#txt = paste("Master tree node ", fixnode, " matched to i=", i, "; jj=", jj, "; piececlass=", piececlass, "; subtree subnode=", tmprow$SUBnode, sep="")
#print(txt)
#print(fixlikes)
# Determine the number of the subnode in the subtree
if (length(subtree_fixnode_nums) == 0)
{
subtree_fixnode_nums = NULL
temporary_fixlikes = NULL
}
tmp_fixnode = subtree_fixnode_nums
tmp_fixlikes = temporary_fixlikes
} else {
tmp_fixnode = NULL
tmp_fixlikes = NULL
} # end if (TF == TRUE)
} else {
# Only 1 fixnode
temporary_fixnode = fixnode
temporary_fixlikes = c(fixlikes)
# e.g.
# fixnode=20
TF1 = inputs$master_table$node == temporary_fixnode
TF2 = inputs$master_table$SUBnode.type == "root"
TF3 = inputs$master_table$SUBnode.type == "internal"
TF = ((TF1 + TF2 + TF3) == 2)
tmprow = inputs$master_table[TF,]
# Check if we're in the right stratum / piece / piececlass
TF1 = tmprow$stratum == i
TF2 = tmprow$piecenum == jj
TF3 = tmprow$piececlass == "subtree"
TF = ((TF1 + TF2 + TF3) == 3)
if (TF == TRUE)
{
#txt = paste("Master tree node ", fixnode, " matched to i=", i, "; jj=", jj, "; piececlass=", piececlass, "; subtree subnode=", tmprow$SUBnode, sep="")
#print(txt)
#print(fixlikes)
# cat("\n\n")
# print(fixnode)
# print(temporary_fixnode)
# print(tmprow$SUBnode)
# print(temporary_fixlikes)
# Determine the number of the subnode in the subtree
tmp_fixnode = tmprow$SUBnode
# 2016-03-15_old
#tmp_fixlikes = temporary_fixlikes
# 2016-03-15_new by Torsten
tmp_fixlikes = temporary_fixlikes[states_allowed_TF]
# end if (TF == TRUE)
} else {
tmp_fixnode = NULL
tmp_fixlikes = NULL
}
} # end if (length(fixnode) > 1)
} # end if (!is.null(fixnode))
# Calculate the likelihoods for this subtree
#prt(tmp_subtree)
#print("subtree_tip_relative_probs_of_each_state")
#print(subtree_tip_relative_probs_of_each_state)
#print("min_branchlength")
#print(min_branchlength)
# print("subtree_tip_relative_probs_of_each_state:")
# print(subtree_tip_relative_probs_of_each_state)
# print("spPmat_inputs:")
# print(spPmat_inputs)
# print("Qmat_tmp:")
# print(Qmat_tmp)
calc_loglike_sp_results = calc_loglike_sp(
tip_condlikes_of_data_on_each_state=subtree_tip_relative_probs_of_each_state,
phy=tmp_subtree,
Qmat=Qmat_tmp,
spPmat=NULL,
min_branchlength=min_branchlength,
return_what="all",
probs_of_states_at_root=NULL,
rootedge=TRUE,
sparse=force_sparse,
printlevel=printlevel,
use_cpp=TRUE,
input_is_COO=force_sparse,
spPmat_inputs=spPmat_inputs,
cppSpMethod=cppSpMethod,
cluster_already_open=cluster_already_open,
calc_ancprobs=calc_ancprobs, # If TRUE, get e.g. relative_probs_of_each_state_at_branch_bottom_below_node_DOWNPASS
include_null_range=include_null_range,
fixnode=tmp_fixnode,
fixlikes=tmp_fixlikes,
stratified=TRUE, # This makes calc_loglike_sp skip UPPASS probs, which are irrelevant inside stratified analyses
# 2014:states_allowed_TF=states_allowed_TF
states_allowed_TF=rep(TRUE, times=ncol(subtree_tip_relative_probs_of_each_state)),
m=m,
jts_matrix=jts_matrix,
BioGeoBEARS_model_object=BioGeoBEARS_model_object,
on_NaN_error=BioGeoBEARS_run_object$on_NaN_error
)
# Slot these likelihoods into a bigger object, if needed due to
# lists_of_states_lists_0based
if (!is.null(inputs$lists_of_states_lists_0based))
{
#print("Here!!")
#print(calc_loglike_sp_results)
#print("Here!!")
names_of_calc_loglike_sp_results_objects = names(calc_loglike_sp_results)
for (name_i in 1:length(calc_loglike_sp_results))
{
# If it's a matrix, slot it inside a new matrix
oldmat = calc_loglike_sp_results[[name_i]]
TF1 = names_of_calc_loglike_sp_results_objects[name_i] == "relative_probs_of_each_state_at_branch_top_AT_node_DOWNPASS"
TF2 = names_of_calc_loglike_sp_results_objects[name_i] == "condlikes_of_each_state"
TF3 = names_of_calc_loglike_sp_results_objects[name_i] == "relative_probs_of_each_state_at_branch_bottom_below_node_DOWNPASS"
TF4 = names_of_calc_loglike_sp_results_objects[name_i] == "relative_probs_of_each_state_at_bottom_of_root_branch"
if (TF1 || TF2 || TF3)
{
# Old
if (traitTF == FALSE)
{
newmat = matrix(0, nrow=nrow(oldmat), ncol=length(states_allowed_TF))
newmat[,states_allowed_TF] = oldmat
}
# New, with traits possible
if (traitTF == TRUE)
{
full_matrix_ncols = length(states_allowed_TF) * num_trait_states
newmat = matrix(0, nrow=nrow(oldmat), ncol=full_matrix_ncols)
wTrait_states_allowed_TF = c(rep(states_allowed_TF, times=num_trait_states))
newmat[,wTrait_states_allowed_TF] = oldmat
}
calc_loglike_sp_results[[name_i]] = newmat
#print("oldmat")
#print(oldmat)
#print("newmat")
#print(newmat)
} # END if (is.matrix(oldmat))
if (TF4)
{
# Old
if (traitTF == FALSE)
{
newmat = matrix(0, nrow=1, ncol=length(states_allowed_TF))
newmat[,states_allowed_TF] = oldmat
}
# New, with traits possible
if (traitTF == TRUE)
{
full_matrix_ncols = length(states_allowed_TF) * num_trait_states
newmat = matrix(0, nrow=1, ncol=full_matrix_ncols)
wTrait_states_allowed_TF = c(rep(states_allowed_TF, times=num_trait_states))
newmat[,wTrait_states_allowed_TF] = oldmat
}
calc_loglike_sp_results[[name_i]] = newmat
#print("oldmat")
#print(oldmat)
#print("newmat")
#print(newmat)
} # END if (is.matrix(oldmat))
} # END for (name_i in 1:length(calc_loglike_sp_results))
} # END if (!is.null(inputs$lists_of_states_lists_0based))
#chainsaw_result$conditional_likelihoods_at_branch_section_bottom[[jj]] =
# Also, store the conditional likelihoods for all nodes in this subtree
# MINUS THE TIPS OF THE SUBTREE, THESE ARE ALREADY IN THERE
tmp_tipnums = 1:length(tipnames)
#tmp_tr_table = prt(tmp_subtree, printflag=FALSE, get_tipnames=FALSE)
# If you are storing ALL of the conditional likelihoods that were calculated
if ((return_condlikes_table == TRUE) || (calc_TTL_loglike_from_condlikes_table == TRUE))
{
for (rownum in 1:nrow(calc_loglike_sp_results$condlikes_of_each_state))
{
tmp_condlikes = calc_loglike_sp_results$condlikes_of_each_state[rownum,]
subtree_node = rownum
TF1 = inputs$master_table$stratum == i
TF2 = inputs$master_table$piecenum == jj
TF3 = inputs$master_table$piececlass == "subtree"
TF4 = inputs$master_table$SUBnode == subtree_node
TF = (TF1 + TF2 + TF3 + TF4) == 4
condlikes_table_rownum = (1:nrow(condlikes_table))[TF]
condlikes_table[condlikes_table_rownum, ] = tmp_condlikes
if (calc_ancprobs == TRUE)
{
# Store the state probabilities at the branch bottoms below nodes
# NOTE: calc_loglikes_sp() returns NA for the bottom of the root branch, and
# stores that instead in
if (rownum <= nrow(calc_loglike_sp_results$relative_probs_of_each_state_at_branch_bottom_below_node_DOWNPASS))
{
# Check if you are the subtree root or not
if (inputs$master_table$SUBnode.type[condlikes_table_rownum] != "root")
{
# Get relative_probs_of_each_state_at_branch_bottom_below_node_DOWNPASS
# For subtree tip and internal nodes
tmp = calc_loglike_sp_results$relative_probs_of_each_state_at_branch_bottom_below_node_DOWNPASS[rownum,]
relative_probs_of_each_state_at_branch_bottom_below_node_DOWNPASS_TABLE[condlikes_table_rownum,] = tmp
} else {
# For subtree root node
tmp = calc_loglike_sp_results$relative_probs_of_each_state_at_bottom_of_root_branch
relative_probs_of_each_state_at_branch_bottom_below_node_DOWNPASS_TABLE[condlikes_table_rownum,] = tmp
}
} # END check of subtree internal/tip vs. subtree root
# Store the state probabilities at the branch bottom below the root node of the subtree
#if (rownum == nrow(calc_loglike_sp_results$relative_probs_of_each_state_at_branch_bottom_below_node_DOWNPASS))
# {
# Get relative_probs_of_each_state_at_branch_bottom_below_node_DOWNPASS
#tmp = NA
# Save the relative probabilities of each state at the BOTTOM of the branch
# BELOW the subtree root
# tmp = calc_loglike_sp_results$relative_probs_of_each_state_at_bottom_of_root_branch
# relative_probs_of_each_state_at_branch_bottom_below_node_DOWNPASS_TABLE[condlikes_table_rownum,] = tmp
#
# cat("\n\n")
# print(i)
# print(jj)
# print(rownum)
# print(calc_loglike_sp_results$relative_probs_of_each_state_at_branch_bottom_below_node_DOWNPASS)
# print(relative_probs_of_each_state_at_branch_bottom_below_node_DOWNPASS_TABLE[condlikes_table_rownum,])
# print(condlikes_table_rownum)
# }
} # END if (calc_ancprobs == TRUE)
} # END for (rownum in 1:nrow(calc_loglike_sp_results$condlikes_of_each_state))
} # END if ((return_condlikes_table == TRUE) || (calc_TTL_loglike_from_condlikes_table == TRUE))
chainsaw_result$conditional_likelihoods_for_nodes_plus_bottom_in_this_section[[jj]] = matrix(data=calc_loglike_sp_results$condlikes_of_each_state[-tmp_tipnums, ], ncol=ncol(calc_loglike_sp_results$condlikes_of_each_state))
# Matrix of tip likelihoods to delete so you don't repeat using them in the total
# loglike
#tiplikes_to_delete[[jj]] = calc_loglike_sp_results$condlikes_of_each_state[tmp_tipnums, ]
# Relative probabilities -- all nodes plus branch bottom (just branch bottom, here)
chainsaw_result$relative_probabilities_for_nodes_plus_bottom_in_this_section[[jj]] = calc_loglike_sp_results$relative_probs_of_each_state_at_branch_top_AT_node_DOWNPASS[-tmp_tipnums, ]
# Relative probabilities -- just the new tip
chainsaw_result$relative_probs_of_each_state_at_bottom_of_root_branch[[jj]] = calc_loglike_sp_results$relative_probs_of_each_state_at_bottom_of_root_branch
# ONLY for the nodes in original tree, store the condlikes
# Add these to the overall list of conditional likelihoods
numrows_to_add = nrow(chainsaw_result$conditional_likelihoods_for_nodes_plus_bottom_in_this_section[[jj]])
# also remove rootedge prob (fixbug)
rownum_for_bottom_of_root = nrow(chainsaw_result$conditional_likelihoods_for_nodes_plus_bottom_in_this_section[[jj]])
startrow = current_condlikes_row + 1
endrow = current_condlikes_row + numrows_to_add
all_relative_probs_of_each_state[startrow:endrow, states_to_use_TF] = chainsaw_result$relative_probabilities_for_nodes_plus_bottom_in_this_section[[jj]]
all_condlikes_of_each_state[startrow:endrow, states_to_use_TF] = chainsaw_result$conditional_likelihoods_for_nodes_plus_bottom_in_this_section[[jj]]
# fixbug, except for root
#if (i != num_timeperiods)
# {
#all_condlikes_of_each_state[endrow, states_to_use_TF] = matrix(data=0, nrow=1, ncol=sum(states_to_use_TF))
# }
current_condlikes_row = current_condlikes_row + numrows_to_add
} # End if/then on branch vs. subtree
# Also, store the relative probabilities for the new tip
#new_tip_likelihoods[jj, states_to_use_TF] = chainsaw_result$relative_probs_of_each_state_at_bottom_of_root_branch[[jj]]
new_tip_likelihoods[jj, states_to_use_TF] = chainsaw_result$conditional_likelihoods_for_nodes_plus_bottom_in_this_section[[jj]][nrow(chainsaw_result$conditional_likelihoods_for_nodes_plus_bottom_in_this_section[[jj]]), ]
# Add these to the overall list of conditional likelihoods
# numrows_to_add = nrow(chainsaw_result$conditional_likelihoods_for_nodes_plus_bottom_in_this_section[[jj]])
#
# startrow = current_condlikes_row + 1
# endrow = current_condlikes_row + numrows_to_add
# all_relative_probs_of_each_state[startrow:endrow, states_to_use_TF] = chainsaw_result$relative_probabilities_for_nodes_plus_bottom_in_this_section[[jj]]
#
# all_condlikes_of_each_state[startrow:endrow, states_to_use_TF] = chainsaw_result$conditional_likelihoods_for_nodes_plus_bottom_in_this_section[[jj]]
#print(chainsaw_result$conditional_likelihoods_for_nodes_plus_bottom_in_this_section[[jj]])
#print(log(sum(chainsaw_result$conditional_likelihoods_for_nodes_plus_bottom_in_this_section[[jj]])))
#rowSums(all_condlikes_of_each_state) != 0
#tmp_all_condlikes_of_each_state = all_condlikes_of_each_state[rowSums(all_condlikes_of_each_state) != 0,]
#currLnL = sum(log(rowSums(tmp_all_condlikes_of_each_state)))
#cat("i=", i, "; jj=", jj, "; currLnL=", currLnL, "\n")
# current_condlikes_row = current_condlikes_row + numrows_to_add
} # End loop through jj tree pieces WITHIN a stratum
# Update for the next loop
# Tip likelihoods
current_tip_relative_probs_of_each_state = new_tip_likelihoods
# Store previous round
#old_phy_as_it_is_chopped_down = phy_as_it_is_chopped_down
#old_chainsaw_result = chainsaw_result
#old_new_tip_likelihoods = new_tip_likelihoods
# Convey the tree to the next iteration
phy_as_it_is_chopped_down = chainsaw_result$tree_to_chainsaw
} # END for (i in 1:num_timeperiods)
# END loop through i strata
##################################################################
##################################################################
##################################################################
# ENDING DOWNPASS
##################################################################
##################################################################
##################################################################
# Remove rows that have not been filled (till zero)
all_condlikes_of_each_state_zero_TF = all_condlikes_of_each_state == 0
all_condlikes_of_each_state_nonzero_TF = all_condlikes_of_each_state_zero_TF == FALSE
rows_that_are_NOT_numeric_zeros_TF = rowSums(all_condlikes_of_each_state_nonzero_TF) >= 1
#rowSums(all_condlikes_of_each_state) != 0
final_all_condlikes_of_each_state = all_condlikes_of_each_state[rows_that_are_NOT_numeric_zeros_TF,]
#rowSums(all_relative_probs_of_each_state) != 0
all_relative_probs_of_each_state = all_relative_probs_of_each_state[rows_that_are_NOT_numeric_zeros_TF,]
#all_relative_probs_of_each_state
# Note: LAGRANGE uses rootedge = TRUE
# This is not the source of the bug...
#rootedge=FALSE
if (rootedge == TRUE)
{
grand_total_likelihood = sum(log(rowSums(final_all_condlikes_of_each_state)))
grand_total_likelihood
} else {
# Skip the last row
grand_total_likelihood = sum(log(rowSums(final_all_condlikes_of_each_state[-nrow(final_all_condlikes_of_each_state),])))
grand_total_likelihood
}
#rootedge=TRUE
# Check for NA -- this can be caused by e.g. dispersal matrix constraints of 0 causing NAs
# in the calculation
if (is.na(grand_total_likelihood) == TRUE)
{
TF = is.na(all_relative_probs_of_each_state[,1])
tmpr = (1:nrow(all_relative_probs_of_each_state))[TF]
stoptxt1 = paste("\n\nFATAL ERROR IN calc_loglike_sp_stratified(). grand_total_likelihood=NA.\n",
"These rows of 'all_relative_probs_of_each_state' had NAs:\n",
paste(tmpr, collpase=",", sep=""), "\n",
"\n",
"One possible cause of this: your dispersal matrix may be too restrictive; try changing\n",
"e.g. the 0 values to e.g. 0.0000001. Good luck!", sep="")
if (printlevel > 0)
{
cat(stoptxt1)
}
if (is.null(BioGeoBEARS_run_object$on_NaN_error) == TRUE)
{
stop(stoptxt1)
} else {
grand_total_likelihood = BioGeoBEARS_run_object$on_NaN_error
}
}
if (calc_TTL_loglike_from_condlikes_table == TRUE)
{
#print ("HEY!")
# Standard LAGRANGE result (exactly)
TF2 = inputs$master_table$SUBnode.type == "internal" # internal nodes in subtrees are also internal nodes in the master tree
TF3 = inputs$master_table$SUBnode.type == "orig_tip" # These are the original tips likelihoods; doesn't matter for unambiguous tips, but
# DOES matter if there is a detection model.
TF4 = inputs$master_table$SUBnode.type == "root" # root nodes in subtrees are also internal nodes in the master tree
TF234 = (TF2 + TF3 + TF4) == 1
sum(TF234)
#TF5 = inputs$master_table$piececlass == "subbranch"
#TF234 = (TF2 + TF3 + TF4 + TF5) == 1
TF = TF234 == 1
nodes_in_original_tree = inputs$master_table[TF,]
node_order_original = order(nodes_in_original_tree$node)
# Get some output matrices
condlikes_of_each_state = condlikes_table[TF,][node_order_original,]
computed_likelihoods_at_each_node = rowSums(condlikes_of_each_state)
grand_total_likelihood = sum(log(computed_likelihoods_at_each_node))
if (calc_ancprobs == TRUE)
{
# Downpass relprobs at the branch bottoms BELOW the nodes (just above speciation events)
# Can't use "root" nodes, they have no DOWNPASS BELOW NODE stored
TF2 = inputs$master_table$SUBnode.type == "internal"
TF3 = inputs$master_table$SUBnode.type == "tip" # These are the original tips likelihoods; doesn't matter for unambiguous tips, but
# DOES matter if there is a detection model.
TF4 = inputs$master_table$piececlass == "subtree" # Take only subtree tips, or internal nodes; these should have the DOWNPASS below nodes stored
TF234 = (TF2 + TF3 + TF4) == 2 # ==2, because we need BOTH subtree and subtree-internal-node or subtree-tip-node
sum(TF234)
TF = TF234 == 1
nodes_in_original_tree = inputs$master_table[TF,]
node_order_original = order(nodes_in_original_tree$node)
tmptable = relative_probs_of_each_state_at_branch_bottom_below_node_DOWNPASS_TABLE[TF,][node_order_original,]
relative_probs_of_each_state_at_branch_bottom_below_node_DOWNPASS = tmptable / rowSums(tmptable)
# This leaves out the master tree root row, so add that in
root_row = rep(NA, times=ncol(relative_probs_of_each_state_at_branch_bottom_below_node_DOWNPASS))
tmpmat1 = relative_probs_of_each_state_at_branch_bottom_below_node_DOWNPASS[1:length(original_phy$tip.label), ]
tmpmat3_rows = (length(original_phy$tip.label)+1):nrow(relative_probs_of_each_state_at_branch_bottom_below_node_DOWNPASS)
tmpmat3 = relative_probs_of_each_state_at_branch_bottom_below_node_DOWNPASS[tmpmat3_rows, ]
relative_probs_of_each_state_at_branch_bottom_below_node_DOWNPASS = rbind(tmpmat1, root_row, tmpmat3)
# Relative probability of states at nodes, at the branch tops, ON THE DOWNPASS
# (needs to be recalculated, to be in the right order)
relative_probs_of_each_state_at_branch_top_AT_node_DOWNPASS = NULL
#tmpmat = matrix(data=computed_likelihoods_at_each_node, ncol=1)
tmptable = condlikes_of_each_state / rowSums(condlikes_of_each_state)
relative_probs_of_each_state_at_branch_top_AT_node_DOWNPASS = tmptable
# Get the relative probabilities at the root
anc_row_of_master_table_TF = inputs$master_table$node.type=="root"
anc_node_original_tree = inputs$master_table$node[anc_row_of_master_table_TF]
anc_node_original_tree
# Just always use the root node, not anything below it!
starting_probs = relative_probs_of_each_state_at_branch_top_AT_node_DOWNPASS[anc_node_original_tree, ]
} # END if (calc_ancprobs == TRUE)
} # END if (calc_TTL_loglike_from_condlikes_table == TRUE)
#######################################################
# Now do UPPASS for internal nodes
#######################################################
if (calc_ancprobs == TRUE)
{
cat("\nUppass started for (STRATIFIED) marginal ancestral states estimation!\n", sep="")
#######################################################
#######################################################
# THIS IS AN UPPASS FROM THE TIPS TO THE ROOT
#######################################################
#######################################################
# Setup matrices
numrows_for_UPPASS = original_phy$Nnode + length(original_phy$tip.label)
relative_probs_of_each_state_at_branch_top_AT_node_UPPASS = matrix(data=0, nrow=numrows_for_UPPASS, ncol=ncol(relative_probs_of_each_state_at_branch_top_AT_node_DOWNPASS))
relative_probs_of_each_state_at_branch_bottom_below_node_UPPASS = matrix(data=0, nrow=numrows_for_UPPASS, ncol=ncol(relative_probs_of_each_state_at_branch_bottom_below_node_DOWNPASS))
# Vist edges in reverse order from the downpass
# This would only work on un-stratified trees
#edges_to_visit_uppass = seq(from=(num_internal_nodes*2), by=-2, length.out=num_internal_nodes)
# Get the starting probabilities at the root
# THIS ASSUMES THE STARTING PROBS ARE AT THE ROOT NODE, NOT SOME STUPID BRANCH BELOW THE ROOT NODE
starting_probs
# Put this starting prob into the root node
relative_probs_of_each_state_at_branch_top_AT_node_UPPASS[anc_node_original_tree,] = 1/length(starting_probs)
#print("starting_probs")
#print(starting_probs)
#print(relative_probs_of_each_state_at_branch_top_AT_node_UPPASS[(anc_node_original_tree-3):(anc_node_original_tree+3),])
#print(dim(relative_probs_of_each_state_at_branch_top_AT_node_UPPASS))
# Go through strata in REVERSE order
#print("num_timeperiods")
#print(num_timeperiods)
#print("inputs$list_of_areas_adjacency_mats")
#print(inputs$list_of_areas_adjacency_mats)
for (i in num_timeperiods:1)
{
#######################################################
# Cut down the number of areas, by what is allowed
# (it would be more efficient to do this once during setup,
# but probably no biggie)
#######################################################
# states_to_use_TF: states to use in Qmat, speciation models, etc.
# states_allowed_TF: use this to zero out impossible ancestral
# states according to areas_allowed matrix/areas_adjacency matrix
#
# Should we modify the list of allowed states?
# default: no areas_allowed or areas_adjacency constraints
user_specified_constraints_on_states_list_TF = FALSE
states_allowed_TF1 = rep(TRUE, length(all_states_list))
states_allowed_TF2 = rep(TRUE, length(all_states_list))
states_allowed_TF3 = rep(TRUE, length(all_states_list))
if ( (is.null(inputs$list_of_areas_allowed_mats) == FALSE))
{
user_specified_constraints_on_states_list_TF = TRUE
}
if ( (is.null(inputs$list_of_areas_adjacency_mats) == FALSE))
{
user_specified_constraints_on_states_list_TF = TRUE
}
if ( (is.null(inputs$lists_of_states_lists_0based) == FALSE))
{
user_specified_constraints_on_states_list_TF = TRUE
}
if (user_specified_constraints_on_states_list_TF == TRUE)
{
# Areas allowed
if ( (is.null(inputs$list_of_areas_allowed_mats) == FALSE))
{
areas_allowed_mat = inputs$list_of_areas_allowed_mats[[i]]
cat("\ni=", i, "\n", sep="")
cat("areas_allowed_mat: ", sep="")
print(areas_allowed_mat)
states_allowed_TF1 = sapply(X=all_states_list, FUN=check_if_state_is_allowed, areas_allowed_mat)
#states_to_use_TF = all_states_list %in% tmp_states_list
if (include_null_range == TRUE)
{
states_allowed_TF1[1] = TRUE
}
# NO; use all areas for this
# states_to_use_TF = states_allowed_TF
} # END if ( (is.null(inputs$list_of_areas_allowed_mats) == FALSE))
# Areas adjacency
if ( (is.null(inputs$list_of_areas_adjacency_mats) == FALSE))
{
areas_adjacency_mat = inputs$list_of_areas_adjacency_mats[[i]]
states_allowed_TF2 = sapply(X=all_states_list, FUN=check_if_state_is_allowed_by_adjacency, areas_adjacency_mat)
#states_to_use_TF = all_states_list %in% tmp_states_list
if (include_null_range == TRUE)
{
states_allowed_TF2[1] = TRUE
}
# NO; use all areas for this
# states_to_use_TF = states_allowed_TF
} # END if ( (is.null(inputs$list_of_areas_adjacency_mats) == FALSE))
# Manual list of allowed states
if ( (is.null(inputs$lists_of_states_lists_0based) == FALSE))
{
states_allowed_TF3 = all_states_list %in% inputs$lists_of_states_lists_0based[[i]]
if (include_null_range == TRUE)
{
states_allowed_TF3[1] = TRUE
}
} # END if ( (is.null(inputs$lists_of_states_lists_0based) == FALSE))
# Combine the 3 (areas_allowed, areas_adjacency, lists_of_states_lists_0based)
states_allowed_TF = ((states_allowed_TF1 + states_allowed_TF2 + states_allowed_TF3) == 3)
# CHANGE the inputs here, so that it can be used easily in BSM
inputs$lists_of_states_lists_0based[[i]] = all_states_list[states_allowed_TF]
} else {
# Otherwise,
# make no change
pass = 1
#states_list = states_list
states_allowed_TF = rep(TRUE, length(all_states_list))
} # END if (user_specified_constraints_on_states_list_TF == TRUE)
# Use this for regular calculations (Qmat, speciation models, etc.)
states_to_use_TF = rep(TRUE, length(all_states_list))
#print("states_allowed_TF")
#print(states_allowed_TF)
#####################################################
# Make the dedf matrix for this time period
#####################################################
# If there is a distance matrix, use the first one
# (non-stratified analysis, here)
# If there is a distance matrix, take the ith one...
# (stratified analysis, here)
if ( (is.null(inputs$list_of_distances_mats) == FALSE))
{
distances_mat = inputs$list_of_distances_mats[[i]]
} else {
# Default is all areas effectively equidistant
distances_mat = matrix(1, nrow=length(areas), ncol=length(areas))
}
# Get the exponent on distance, apply to distances matrix
dispersal_multipliers_matrix = distances_mat ^ x
# Environmental distances
if ( (is.null(inputs$list_of_envdistances_mats) == FALSE))
{
envdistances_mat = inputs$list_of_envdistances_mats[[1]]
} else {
# Default is all areas effectively equidistant
envdistances_mat = matrix(1, nrow=length(areas), ncol=length(areas))
}
# Get the exponent on environmental distance, apply to distances matrix
n = BioGeoBEARS_model_object@params_table["n","est"]
dispersal_multipliers_matrix = dispersal_multipliers_matrix * envdistances_mat^n
# Apply manual dispersal multipliers, if any
# If there is a manual dispersal multipliers matrix, use the first one
# (non-stratified analysis, here)
if ( (is.null(inputs$list_of_dispersal_multipliers_mats) == FALSE))
{
manual_dispersal_multipliers_matrix = as.matrix(inputs$list_of_dispersal_multipliers_mats[[i]])
} else {
# Default is all areas effectively equidistant
manual_dispersal_multipliers_matrix = matrix(1, nrow=length(areas), ncol=length(areas))
}
# Get the exponent on manual dispersal multipliers
w = BioGeoBEARS_model_object@params_table["w","est"]
# Apply element-wise
dispersal_multipliers_matrix = dispersal_multipliers_matrix * manual_dispersal_multipliers_matrix ^ w
#######################################################
# multiply parameter d by dispersal_multipliers_matrix
#######################################################
dmat_times_d = dispersal_multipliers_matrix * matrix(d, nrow=length(areas), ncol=length(areas))
amat = dispersal_multipliers_matrix * matrix(a, nrow=length(areas), ncol=length(areas))
#######################################################
#######################################################
# Do area-dependence and extinction multipliers list
#######################################################
#######################################################
if ( (is.null(inputs$list_of_area_of_areas) == FALSE))
{
area_of_areas = inputs$list_of_area_of_areas[[i]]
} else {
# Default is all areas effectively equidistant
area_of_areas = rep(1, length(areas))
}
# Get the exponent on extinction, apply to extinction modifiers
extinction_modifier_list = area_of_areas ^ (1 * u)
# Apply to extinction rate
elist = extinction_modifier_list * rep(e, length(areas))
# 2018 version
if (traitTF == FALSE)
{
Qmat_tmp = rcpp_states_list_to_DEmat(areas_list=allareas_list, states_list=all_states_list[states_allowed_TF], dmat=dmat_times_d, elist=elist, amat=amat, include_null_range=include_null_range, normalize_TF=TRUE, makeCOO_TF=force_sparse)
#print(dim(Qmat_tmp))
# } else {
# # If Qmat is pre-specified
# Qmat_tmp = Qmat
# }
} # END if (traitTF == FALSE)
# Analysis with a trait modifying dispersal rate
if (traitTF == TRUE)
{
num_geog_states = length(all_states_list[states_allowed_TF])
numstates_geogtrait = num_trait_states * num_geog_states
# UPPASS definition of states_allowed_TF with traits
wTrait_states_allowed_TF =c(rep(states_allowed_TF, times=num_trait_states))
# print("num_geog_states")
# print(num_geog_states)
#
# print("num_trait_states")
# print(num_trait_states)
#
# print("numstates_geogtrait")
# print(numstates_geogtrait)
if (ncol(tip_condlikes_of_data_on_each_state[,wTrait_states_allowed_TF]) != numstates_geogtrait)
{
txt = paste0("STOP ERROR in calc_loglike_sp_stratified(): ncol(tip_condlikes_of_data_on_each_state)=", ncol(tip_condlikes_of_data_on_each_state), ", and numstates_geogtrait=", numstates_geogtrait, ". They must be equal to proceed.")
cat("\n\n")
cat(txt)
cat("\n\n")
stop(txt)
} # END if (ncol(tip_condlikes_of_data_on_each_state) != numstates_geogtrait)
# Get the modified Qmatrix (traits + geog)
tmpres = modify_Qmat_with_trait(Qmat=NULL, BioGeoBEARS_run_object, numstates_geogtrait=numstates_geogtrait, areas_list=allareas_list, states_list=all_states_list[states_allowed_TF], dispersal_multipliers_matrix=dispersal_multipliers_matrix, elist=elist, force_sparse=force_sparse)
Qmat_tmp = tmpres$Qmat
m = tmpres$m
# If the trait can change during jump events
if (is.null(BioGeoBEARS_run_object$jts_txt_matrix) == FALSE)
{
jts_txt_matrix = BioGeoBEARS_run_object$jts_txt_matrix
jts_matrix = matrix(data=0, nrow=nrow(jts_txt_matrix), ncol=ncol(jts_txt_matrix))
TF_matrix = matrix(data=TRUE, nrow=nrow(jts_txt_matrix), ncol=ncol(jts_txt_matrix))
diag(TF_matrix) = FALSE
jts_txt_params = c(jts_txt_matrix[TF_matrix])
jts_txt_params
# Populate the numeric jts_matrix
for (jts_i in 1:nrow(jts_txt_matrix))
{
diag_val = 1
for (jts_j in 1:ncol(jts_txt_matrix))
{
if (jts_i == jts_j)
{
next()
}
jts_txt = jts_txt_matrix[jts_i,jts_j]
newval = as.numeric(BioGeoBEARS_model_object@params_table[jts_txt, "est"])
jts_matrix[jts_i,jts_j] = newval
diag_val = 1-newval
}
# Populate the diagonal
jts_matrix[jts_i,jts_i] = diag_val
} # END for (jts_i in 1:nrow(jts_txt_matrix))
} # END if (is.null(BioGeoBEARS_run_object$jts_txt_matrix) == FALSE)
} else {
num_geog_states = length(all_states_list[states_allowed_TF])
numstates_geogtrait = num_geog_states
} # END if (traitTF == TRUE)
if (force_sparse == TRUE)
{
tmpQmat_in_REXPOKIT_coo_fmt = Qmat_tmp
# Make a CRS-formatted matrix, for kexpmv
# DO THE TRANSPOSE HERE, trait+geog matrices assembled transposed
tmpQmat_in_kexpmv_crs_fmt = coo2crs(
ia=tmpQmat_in_REXPOKIT_coo_fmt[,"ia"],
ja=tmpQmat_in_REXPOKIT_coo_fmt[,"ja"],
a =tmpQmat_in_REXPOKIT_coo_fmt[,"a"],
n=numstates_geogtrait, transpose_needed=FALSE)
} # END if (force_sparse == TRUE)
# if (sparse == TRUE)
# {
# # Sparse matrix exponentiation
# original_Qmat = Qmat_tmp
#
# # number of states in the original matrix
# coo_n = ncol(Qmat_tmp)
# anorm = as.numeric(norm(original_Qmat, type="O"))
# matvec = original_Qmat
#
# # *DO* TRANSPOSE; we want to go FORWARDS in time, NOT BACKWARDS!
# tmatvec = base::t(matvec)
# tmatvec = matvec
# tmpQmat_in_REXPOKIT_coo_fmt = mat2coo(tmatvec)
# }
# Now. IF you have a subtree structure, you need to run this with a cladogenesis matrix,
# through calc_loglike_sp(), like normal.
# If there's just one tree, store it in the object
if (is.null(inputs$timeperiods) || length(inputs$timeperiods) == 1)
{
#tr = read.tree(inputs$trfn)
tr = check_trfn(trfn=inputs$trfn)
tree_to_chainsaw = NULL
tree_to_chainsaw[[1]] = tr
return_pieces_list = NULL
return_pieces_list[[1]] = tr
return_pieces_basenames = NULL
# Merge THEN split THEN sort!!
tmp_labels_merge = paste(tr$tip.label, collapse=",", sep="")
tmp_labels_split = strsplit(tmp_labels_merge, split=",")[[1]]
return_pieces_basenames[[1]] = paste(sort(tmp_labels_split), collapse=",", sep="")
chainsaw_object = list()
chainsaw_object$tree_to_chainsaw = tree_to_chainsaw
chainsaw_object$return_pieces_list = return_pieces_list
chainsaw_object$return_pieces_basenames = return_pieces_basenames
attr(chainsaw_object, "class") = "chainsaw_result"
inputs$tree_sections_list[[1]] = chainsaw_object
}
# OK, if you have a tree here, do that
# if not, exp the branch
#######################################################
# Cladogenic model
#######################################################
spPmat_inputs = get_spPmat_inputs_from_BGB(BioGeoBEARS_run_object=BioGeoBEARS_run_object, states_list=all_states_list[states_allowed_TF], dispersal_multipliers_matrix=dispersal_multipliers_matrix)
dmat = dispersal_multipliers_matrix
maxent01s_param = spPmat_inputs$maxent01s_param
maxent01v_param = spPmat_inputs$maxent01v_param
maxent01j_param = spPmat_inputs$maxent01j_param
maxent01y_param = spPmat_inputs$maxent01y_param
# Store the states_list in "l"
l = spPmat_inputs$l
#######################################################
# Calculate the speciation model, and put in COO_weights_columnar
#######################################################
#Old, seems bogus
#numareas = max(unlist(spPmat_inputs$l), na.rm=TRUE) + 1
numareas = max(sapply(X=spPmat_inputs$l, FUN=length), na.rm=TRUE) + 0
maxent01s = relative_probabilities_of_subsets(max_numareas=numareas, maxent_constraint_01=maxent01s_param, NA_val=0)
maxent01v = relative_probabilities_of_vicariants(max_numareas=numareas, maxent_constraint_01v=maxent01v_param, NA_val=0)
maxent01j = relative_probabilities_of_subsets(max_numareas=numareas, maxent_constraint_01=maxent01j_param, NA_val=0)
maxent01y = relative_probabilities_of_subsets(max_numareas=numareas, maxent_constraint_01=maxent01y_param, NA_val=0)
# Matrix of probs for each ancsize
maxprob_as_function_of_ancsize_and_decsize = mapply(FUN=max, maxent01s, maxent01v, maxent01j, maxent01y, MoreArgs=list(na.rm=TRUE))
maxprob_as_function_of_ancsize_and_decsize = matrix(data=maxprob_as_function_of_ancsize_and_decsize, nrow=nrow(maxent01s), ncol=ncol(maxent01s))
maxprob_as_function_of_ancsize_and_decsize[maxprob_as_function_of_ancsize_and_decsize > 0] = 1
maxprob_as_function_of_ancsize_and_decsize[maxprob_as_function_of_ancsize_and_decsize <= 0] = 0
# Now, go through, and make a list of the max minsize for each decsize
max_minsize_as_function_of_ancsize = apply(X=maxprob_as_function_of_ancsize_and_decsize, MARGIN=1, FUN=maxsize)
# -1 for null range
if (include_null_range == TRUE)
{
state_space_size_Qmat_to_cladoMat = -1
} else {
state_space_size_Qmat_to_cladoMat = 0
}
# -1, assumes NULL range is allowed
# tmpca_1 = rep(1, sum(states_allowed_TF)-1)
# tmpcb_1 = rep(1, sum(states_allowed_TF)-1)
tmpca_1 = rep(1, sum(states_allowed_TF)+state_space_size_Qmat_to_cladoMat)
tmpcb_1 = rep(1, sum(states_allowed_TF)+state_space_size_Qmat_to_cladoMat)
# Bug here: maxent01s_param instead of spPmat_inputs$s
#COO_weights_columnar = rcpp_calc_anclikes_sp_COOweights_faster(Rcpp_leftprobs=tmpca_1, Rcpp_rightprobs=tmpcb_1, l=l, s=maxent01s_param, v=maxent01v_param, j=maxent01j_param, y=maxent01y_param, dmat=dmat, maxent01s=maxent01s, maxent01v=maxent01v, maxent01j=maxent01j, maxent01y=maxent01y, max_minsize_as_function_of_ancsize=max_minsize_as_function_of_ancsize, printmat=FALSE, m=m)
# 2020-08-27_NJM
COO_weights_columnar = rcpp_calc_anclikes_sp_COOweights_faster(Rcpp_leftprobs=tmpca_1, Rcpp_rightprobs=tmpcb_1, l=l, s=spPmat_inputs$s, v=spPmat_inputs$v, j=spPmat_inputs$j, y=spPmat_inputs$y, dmat=dmat, maxent01s=maxent01s, maxent01v=maxent01v, maxent01j=maxent01j, maxent01y=maxent01y, max_minsize_as_function_of_ancsize=max_minsize_as_function_of_ancsize, printmat=FALSE, m=m)
# This gives 15 states
Rsp_rowsums = rcpp_calc_rowsums_for_COOweights_columnar(COO_weights_columnar=COO_weights_columnar)
cppSpMethod = 3
# Check to make sure you have the necessary inputs
if (exists("COO_weights_columnar") == FALSE)
{
stop("\nERROR_A: calc_loglike_sp requires 'COO_weights_columnar', 'Rsp_rowsums', and cppSpMethod==3 for marginal ancestral state estimations.\n")
}
if (exists("Rsp_rowsums") == FALSE)
{
stop("\nERROR_B: calc_loglike_sp requires 'COO_weights_columnar', 'Rsp_rowsums', and cppSpMethod==3 for marginal ancestral state estimations.\n")
}
if (cppSpMethod != 3)
{
stop("\nERROR_C: calc_loglike_sp requires 'COO_weights_columnar', 'Rsp_rowsums', and cppSpMethod==3 for marginal ancestral state estimations.\n")
}
#######################################################
# UPPASS THROUGH THE TREE PIECES - CALCULATIONS
# Go through the tree pieces in this stratum
#######################################################
chainsaw_result = inputs$tree_sections_list[[i]]
# Set up a new list item to store uppass tip probs
inputs$tree_sections_list[[i]]$pieces_relprobs_at_tips = list()
# UPPASS: Go through tree pieces in this stratum (bottom first)
for (jj in 1:length(chainsaw_result$return_pieces_list))
{
treepiece = chainsaw_result$return_pieces_list[[jj]]
#cat("\ni=", i, "; jj=",jj, "; length(treepiece)=", length(treepiece), sep="")
# If it's just a branch section
if (is.numeric(treepiece) )
{
do_exponentiation = TRUE # default
# Also, exclude the case where there is a branch at the bottom below the bottom root node
if (i == num_timeperiods)
{
errortxt = "ERROR: In stratified analysis, your tree must start with a root node, not a branch below the root node."
stop(errortxt)
}
# Get the length of this branch
subbranch_length = treepiece
# Get the anc node in the original tree
TF1 = inputs$master_table$stratum == i
TF2 = inputs$master_table$piecenum == jj
TF3 = inputs$master_table$piececlass == "subbranch"
TF = (TF1 + TF2 + TF3) == 3
anc_node_original_tree = inputs$master_table$node[TF]
# Check for fossil
# Find the row
rownum = (1:nrow(inputs$master_table))[TF]
tmp_master_table_row = inputs$master_table[rownum, ]
# Error check
if (nrow(tmp_master_table_row) != 1)
{
stoptxt = paste("\n\nFATAL ERROR in stratified loglike UPPASS calculation at i=", i, "; jj=", jj,
"; ", 'inputs$master_table$piececlass == "subbranch"',
"\nnrow(tmp_master_table_row) should =1 but instead =", nrow(tmp_master_table_row), "\n", sep="")
stop(stoptxt)
}
# Now check if it's a fossil that appears in this time bin
master_tip_time_bp = tmp_master_table_row$time_bp
time_top = tmp_master_table_row$time_top
time_bot = tmp_master_table_row$time_bot
is_fossil = tmp_master_table_row$fossils
# 2016-02-29: rearranged
# UPPASS:
# THIS (FOSSIL_HASNT_OCCURRED_YET) MUST GO *BEFORE* THE 'FOSSIL_TIP_DOES_OCCUR_IN_BIN' IF/THEN
# If this is TRUE, this fossil hasn't occured yet, and you are looking at the "phantom limb".
# In this case, DON'T do matrix exponentiation, just copy the probabilities up!!
if ( master_tip_time_bp < time_top )
{
do_exponentiation = FALSE
}
# UPPASS:
# THIS (FOSSIL_TIP_DOES_OCCUR_IN_BIN) MUST GO *AFTER* THE 'FOSSIL_HASNT_OCCURRED_YET' IF/THEN
# If this is TRUE, there's a match and the fossil tip appears in this time period
if ( (master_tip_time_bp >= time_top) && (master_tip_time_bp < time_bot) && (is_fossil == TRUE))
{
# Shorten the branchlength by master_tip_time_bp-time_top
amount_to_shorten_by = master_tip_time_bp-time_top
subbranch_length = subbranch_length - amount_to_shorten_by
do_exponentiation = TRUE
}
# If FALSE, you're below all this and hopefully don't care
# 2016-02-29
# UPPASS:
# Also, DON'T do exponentiation if the branch length in the master branch
# is a direct ancestor, i.e., less than min_branchlength
if (tmp_master_table_row$edge.length < min_branchlength)
{
#print("It's a direct ancestor, so DON'T do matrix exponentiation!")
# It's a direct ancestor, so DON'T do matrix exponentiation
do_exponentiation = FALSE
}
# Get the uppass probs from the correct piece in the previous (below, older) stratum
previous_stratum = i + 1
# Get the number of the previous treepiece
previous_stratum_TF = inputs$master_table$stratum == previous_stratum
node_TF = inputs$master_table$node == anc_node_original_tree
TF = (previous_stratum_TF + node_TF) == 2
master_table_row_corresponding_to_anctip = inputs$master_table[TF,]
previous_treepiece_num = master_table_row_corresponding_to_anctip$piecenum
# The previous treepiece
previous_treepiece = inputs$tree_sections_list[[previous_stratum]]$return_pieces_list[[previous_treepiece_num]]
# Relprobs from previous treepiece
relprobs_at_tips_of_anc_treepiece = inputs$tree_sections_list[[previous_stratum]]$pieces_relprobs_at_tips[[previous_treepiece_num]]
relprobs_at_branch_bottoms_below_tips_from_previous_stratum = inputs$tree_sections_list[[previous_stratum]]$pieces_relprobs_at_bottoms_below_tips[[previous_treepiece_num]]
# If ancestor was a sub-branch
if (is.numeric(previous_treepiece) == TRUE)
{
ancprobs_at_subbranch_bottom = relprobs_at_tips_of_anc_treepiece
ancprobs_at_bottom_of_total_branch = relprobs_at_branch_bottoms_below_tips_from_previous_stratum
} else {
# Ancestor was a sub-tree
# Which tip in the previous treepiece?
tipnum_in_previous_treepiece = master_table_row_corresponding_to_anctip$SUBnode
# Extract those relative probabilities
ancprobs_at_subbranch_bottom = relprobs_at_tips_of_anc_treepiece[tipnum_in_previous_treepiece, ]
ancprobs_at_bottom_of_total_branch = relprobs_at_branch_bottoms_below_tips_from_previous_stratum[tipnum_in_previous_treepiece, ]
}
# Do the exponentiation, unless it's a "phantom limb"!
if (do_exponentiation == TRUE)
{
# Then do a forward matrix exponentiation step
# Do sparse or dense matrix exponentiation
if (sparse==FALSE)
{
# Dense matrix exponentiation
# Need to do a forward matrix exponentiation
if (traitTF == FALSE)
{
actual_probs_after_forward_exponentiation = calc_prob_forward_onebranch_dense(relprobs_branch_bottom=ancprobs_at_subbranch_bottom[states_allowed_TF], branch_length=subbranch_length, Qmat_tmp)
} else {
actual_probs_after_forward_exponentiation = calc_prob_forward_onebranch_dense(relprobs_branch_bottom=ancprobs_at_subbranch_bottom[wTrait_states_allowed_TF], branch_length=subbranch_length, Qmat_tmp)
}
if (include_null_range == TRUE)
{
# NULL range is impossible
actual_probs_after_forward_exponentiation[1] = 0
} # END if (include_null_range == TRUE)
actual_probs_after_forward_exponentiation = actual_probs_after_forward_exponentiation / sum(actual_probs_after_forward_exponentiation)
} else {
# Sparse matrix exponentiation
# print("this1")
if (traitTF == FALSE)
{
actual_probs_after_forward_exponentiation = calc_prob_forward_onebranch_sparse(relprobs_branch_bottom=ancprobs_at_subbranch_bottom[states_allowed_TF], branch_length=subbranch_length, tmpQmat_in_REXPOKIT_coo_fmt=tmpQmat_in_REXPOKIT_coo_fmt, coo_n=numstates_geogtrait, anorm=NULL, check_for_0_rows=TRUE)
} else {
actual_probs_after_forward_exponentiation = calc_prob_forward_onebranch_sparse(relprobs_branch_bottom=ancprobs_at_subbranch_bottom[wTrait_states_allowed_TF], branch_length=subbranch_length, tmpQmat_in_REXPOKIT_coo_fmt=tmpQmat_in_REXPOKIT_coo_fmt, coo_n=numstates_geogtrait, anorm=NULL, check_for_0_rows=TRUE)
}
if (include_null_range == TRUE)
{
# NULL range is impossible
actual_probs_after_forward_exponentiation[1] = 0
} # END if (include_null_range == TRUE)
actual_probs_after_forward_exponentiation = actual_probs_after_forward_exponentiation / sum(actual_probs_after_forward_exponentiation)
}
if (traitTF == FALSE)
{
# 2015 fix (and states_allowed_TF above)
actual_probs_after_forward_exponentiation_new = rep(0, length(states_allowed_TF))
actual_probs_after_forward_exponentiation_new[states_allowed_TF] = actual_probs_after_forward_exponentiation
} else {
actual_probs_after_forward_exponentiation_new = rep(0, length(wTrait_states_allowed_TF))
actual_probs_after_forward_exponentiation_new[wTrait_states_allowed_TF] = actual_probs_after_forward_exponentiation
} # END if (traitTF == FALSE)
actual_probs_after_forward_exponentiation = actual_probs_after_forward_exponentiation_new
# Zero out impossible states in this zone (but NOT for "phantom limbs")
# This CAN work, since they've been reset to main state space
if (!is.null(states_allowed_TF))
{
actual_probs_after_forward_exponentiation[states_allowed_TF==FALSE] = 0
actual_probs_after_forward_exponentiation = actual_probs_after_forward_exponentiation / sum(actual_probs_after_forward_exponentiation)
}
} else {
# Just pass up the ancestral probabilities, without modification, on the "phantom limb"
# Don't do 2015 fix here, since this is a fossil branch and
# we are just passing up the probabilities
actual_probs_after_forward_exponentiation = ancprobs_at_subbranch_bottom
}
if (any(is.na(actual_probs_after_forward_exponentiation)))
{
print("i, jj, anc")
print(i)
print(jj)
print(anc)
print("actual_probs_after_forward_exponentiation")
print(actual_probs_after_forward_exponentiation)
print("ancprobs_at_subbranch_bottom")
print(ancprobs_at_subbranch_bottom)
print("ancprobs_at_bottom_of_total_branch")
print(ancprobs_at_bottom_of_total_branch)
stop("ERROR #1: see stratified code")
}
##########################################################
# tip probabilities for next stratum up
##########################################################
relprobs_at_tips_for_next_stratum_up = actual_probs_after_forward_exponentiation
relprobs_at_branch_bottoms_below_tips_for_next_stratum_up = ancprobs_at_bottom_of_total_branch
# Store the relprobs at the tips, so that the next stratum up can access them...
inputs$tree_sections_list[[i]]$pieces_relprobs_at_tips[[jj]] = relprobs_at_tips_for_next_stratum_up
relprobs_at_tips_for_next_stratum_up
inputs$tree_sections_list[[i]]$pieces_relprobs_at_bottoms_below_tips[[jj]] = relprobs_at_branch_bottoms_below_tips_for_next_stratum_up
# If this is a tip in the master tree, store the tmp_relprobs_at_branchtop_AT_node_UPPASS in the master tree
# (do this after the pass through the whole tree)
# END if (is.numeric(treepiece) )
} else {
######################################################
# UPPASS: Treepiece is a subtree!!
tmp_subtree = treepiece
######################################################
######################################################
# Check if this subtree contains a fixed internal node on the master tree
######################################################
# 2014-03-20_NJM
# FIXNODES FOR UPPASS
# NON-BUG BUG BUG (I don't think anyone has ever addressed UPPASS
# calculations with fixed ancestral states)
use_fixnodes_on_uppass = TRUE # turn off if desired/buggy
if (use_fixnodes_on_uppass) {
# Match the master fixnodes to the fixnodes in *JUST* this subtree
# We will then pass these fixnodes to the subtree loglike calculation
# First, we need to get the master node number, iff it's internal
#
tmp_fixnode = NULL # Default
tmp_fixlikes = NULL # Default
if ((!is.null(fixnode)) && (length(fixnode) > 0))
{
# Check for multiple fixnodes
if (length(fixnode) > 1)
{
# If there are multiple fixnodes,
# Get the matching nodes in this subtree
TF1 = inputs$master_table$stratum == i
TF2 = inputs$master_table$piecenum == jj
TF3 = inputs$master_table$piececlass == "subtree"
TF = ((TF1 + TF2 + TF3) == 3)
tmprows = inputs$master_table[TF,]
# Get the fixnodes found in this subtree
fixnodes_in_subtree_TF = fixnode %in% tmprows$node
# *IF* the subtree contains fixnodes, do this stuff
# otherwise, set to NULL
if (sum(fixnodes_in_subtree_TF) > 0)
{
#master_nodes_in_fixnode_TF = inputs$master_table$node %in% fixnode
#master_nodes_in_fixnode
#TF = (anc == fixnode) # old
# we do not use temporary_fixnode, since we need the fixnodes in the subtree numbering (tmprow$SUBnode)
temporary_fixnodes = fixnode[fixnodes_in_subtree_TF]
# But we will use these
temporary_fixlikes = fixlikes[fixnodes_in_subtree_TF,]
# The subtree nodenums corresponding to the subset temporary_fixnodes
# NOTE! THIS SUBSET THING WILL ONLY WORK IF THE NODES ARE SORTED IN ORDER FROM THE START
subtree_rows_in_fixnodes_TF = tmprows$node %in% fixnode
subtree_fixnode_master_nodenums = tmprows$node[subtree_rows_in_fixnodes_TF]
subtree_fixnode_nums = tmprows$SUBnode[subtree_rows_in_fixnodes_TF]
# We have to order these subtree fixnodes, and order the subtree fixlikes the same way
order_subtree_fixnode_nums = order(subtree_fixnode_nums)
subtree_fixnode_nums = subtree_fixnode_nums[order_subtree_fixnode_nums]
# Only reorder if there are 2 or more rows, i.e. if it's a matrix not a vector
if (length(order_subtree_fixnode_nums) > 1)
{
temporary_fixlikes = temporary_fixlikes[order_subtree_fixnode_nums, ]
}
} else {
# If *NO* fixnodes in subtree:
temporary_fixnodes = NULL
subtree_fixnode_master_nodenums = NULL
subtree_fixnode_nums = NULL
temporary_fixlikes = NULL
} # end if (sum(fixnodes_in_subtree_TF) > 0)
# Check if we're in the right stratum / piece / piececlass
# (have account for possible multiple rows)
TF1 = unique(tmprows$stratum) == i
TF2 = unique(tmprows$piecenum) == jj
TF3 = unique(tmprows$piececlass) == "subtree"
TF = ((TF1 + TF2 + TF3) == 3)
if (TF == TRUE)
{
#txt = paste("Master tree node ", fixnode, " matched to i=", i, "; jj=", jj, "; piececlass=", piececlass, "; subtree subnode=", tmprow$SUBnode, sep="")
#print(txt)
#print(fixlikes)
# Determine the number of the subnode in the subtree
if (length(subtree_fixnode_nums) == 0)
{
subtree_fixnode_nums = NULL
temporary_fixlikes = NULL
}
tmp_fixnode = subtree_fixnode_nums
tmp_fixlikes = temporary_fixlikes
} else {
tmp_fixnode = NULL
tmp_fixlikes = NULL
} # end if (TF == TRUE)
} else {
# Only 1 fixnode
temporary_fixnode = fixnode
temporary_fixlikes = c(fixlikes)
# e.g.
# fixnode=20
TF1 = inputs$master_table$node == temporary_fixnode
TF2 = inputs$master_table$SUBnode.type == "root"
TF3 = inputs$master_table$SUBnode.type == "internal"
TF = ((TF1 + TF2 + TF3) == 2)
tmprow = inputs$master_table[TF,]
# Check if we're in the right stratum / piece / piececlass
TF1 = tmprow$stratum == i
TF2 = tmprow$piecenum == jj
TF3 = tmprow$piececlass == "subtree"
TF = ((TF1 + TF2 + TF3) == 3)
if (TF == TRUE)
{
#txt = paste("Master tree node ", fixnode, " matched to i=", i, "; jj=", jj, "; piececlass=", piececlass, "; subtree subnode=", tmprow$SUBnode, sep="")
#print(txt)
#print(fixlikes)
# cat("\n\n")
# print(fixnode)
# print(temporary_fixnode)
# print(tmprow$SUBnode)
# print(temporary_fixlikes)
# Determine the number of the subnode in the subtree
tmp_fixnode = tmprow$SUBnode
tmp_fixlikes = temporary_fixlikes
# end if (TF == TRUE)
} else {
tmp_fixnode = NULL
tmp_fixlikes = NULL
}
} # end if (length(fixnode) > 1)
} # end if (!is.null(fixnode))
} # end if (use_fixnodes_on_uppass)
####################################################
# Check for fossils on the UPPASS, shorten branches
# appropriately if found
####################################################
tmp_subtree_tipnums = 1:length(tmp_subtree$tip.label)
for (iter in 1:length(tmp_subtree_tipnums))
{
# Find the row in the master table corresponding to this subtree_tip
subtree_tip = tmp_subtree_tipnums[iter]
TF1 = inputs$master_table$stratum == i
TF2 = inputs$master_table$piecenum == jj
TF3 = inputs$master_table$piececlass == "subtree"
TF4 = inputs$master_table$SUBnode == subtree_tip
TF = (TF1 + TF2 + TF3 + TF4) == 4
# Find the row
rownum = (1:nrow(inputs$master_table))[TF]
tmp_master_table_row = inputs$master_table[rownum, ]
# Error check
if (nrow(tmp_master_table_row) != 1)
{
stoptxt = paste("\n\nFATAL ERROR in stratified loglike UPPASS calculation at i=", i, "; jj=", jj, "; ",
'inputs$master_table$piececlass == "subtree"', "; subtree_tip=", subtree_tip,
"\nnrow(tmp_master_table_row) should =1 but instead =", nrow(tmp_master_table_row), "\n", sep="")
stop(stoptxt)
}
# Now check if it's a fossil that appears in this time bin
master_tip_time_bp = tmp_master_table_row$time_bp
time_top = tmp_master_table_row$time_top
time_bot = tmp_master_table_row$time_bot
is_fossil = tmp_master_table_row$fossils
# If this is TRUE, there's a match and the fossil tip appears in this time period
if ( (master_tip_time_bp >= time_top) && (master_tip_time_bp < time_bot) && is_fossil == TRUE)
{
# Shorten the branchlength by master_tip_time_bp-time_top
amount_to_shorten_by = master_tip_time_bp-time_top
# Find the branch of the subtree!
tmp2_edgeTF = tmp_subtree$edge[,2] == subtree_tip
tmp2_edgenum = (1:nrow(tmp_subtree$edge))[tmp2_edgeTF]
# 2016-02-29: this is now done in section_the_tree()
# Edit the length of the branch on this subtree tip
#tmp_subtree$edge.length[tmp2_edgenum] = tmp_subtree$edge.length[tmp2_edgenum] - amount_to_shorten_by
# do_exponentiation = TRUE # not needed here
}
} # end forloop through subtree tips
# End fossils check
# (Do on tmp_subtree BEFORE it's converted to phy2!!)
# Reorder the subtree
# This is CRUCIAL
phy2 <- reorder(tmp_subtree, "pruningwise")
# Get the names of the tips in this subtree
tipnames = phy2$tip.label
# Get the root node of the subtree (was visit edges in reverse order from the downpass)
num_internal_nodes = phy2$Nnode
edges_to_visit_uppass = seq(from=(num_internal_nodes*2), by=-2, length.out=num_internal_nodes)
tmpj = edges_to_visit_uppass[1]
tmpi = tmpj - 1
anc <- phy2$edge[tmpi, 1]
# Temporary matrix for UPPASS probabilities
numnodes = num_internal_nodes + length(tmp_subtree$tip.label)
# Get the starting uppass probabilities
#inputs$tree_sections_list[[2]]$return_pieces_list[[3]]$root.edge
# Otherwise, it's just the ancprobs at the node
# Get the node number of subtree root node in the original tree
# Get anc_node_original_tree
TFi = inputs$master_table$stratum == i
TFjj = inputs$master_table$piecenum == jj
TF_SUBnode = inputs$master_table$SUBnode == anc
TF = ((TFi + TFjj + TF_SUBnode) == 3)
anc_node_original_tree = inputs$master_table$node[TF]
# Get the SUBnode numbers of the subtree, and their corresponding nodenums in the
# master table
if (traitTF == FALSE)
{
# 2014 version
#tmp_relprobs_at_branchtop_AT_node_UPPASS = matrix(data=NA, nrow=numnodes, length(states_to_use_TF))
#tmp_relprobs_at_branchbot_BELOW_node_UPPASS = matrix(data=NA, nrow=numnodes, length(states_to_use_TF))
# 2015 version
tmp_relprobs_at_branchtop_AT_node_UPPASS = matrix(data=0, nrow=numnodes, sum(states_allowed_TF))
tmp_relprobs_at_branchbot_BELOW_node_UPPASS = matrix(data=0, nrow=numnodes, sum(states_allowed_TF))
} else {
tmp_relprobs_at_branchtop_AT_node_UPPASS = matrix(data=0, nrow=numnodes, sum(states_allowed_TF)*num_trait_states)
tmp_relprobs_at_branchbot_BELOW_node_UPPASS = matrix(data=0, nrow=numnodes, sum(states_allowed_TF)*num_trait_states)
} # END if (traitTF == FALSE)
master_tree_nodenums = NULL
for (rownum in 1:nrow(tmp_relprobs_at_branchtop_AT_node_UPPASS))
{
# Store the relative probabilities at branch tops, WHEN
# the internal nodes correspond to the master tree
tmp_relprobs = tmp_relprobs_at_branchtop_AT_node_UPPASS[rownum,]
subtree_node = rownum
# We could use EITHER
# (1) Internal nodes and tips of subtrees
TF1 = inputs$master_table$stratum == i
TF2 = inputs$master_table$piecenum == jj
TF3 = inputs$master_table$piececlass == "subtree"
TF4 = inputs$master_table$SUBnode == subtree_node
TF5a = inputs$master_table$node.type == "internal" # Store only if node corresponds to a node or tip in orig_tree
TF5b = inputs$master_table$node.type == "tip" # Store only if node corresponds to a node or tip in orig_tree
TF5c = inputs$master_table$node.type == "root" # Store only if node corresponds to a node or tip in orig_tree
TF_subtrees = (TF1 + TF2 + TF3 + TF4 + TF5a + TF5b + TF5c) == 5
master_tree_nodenums = c(master_tree_nodenums, inputs$master_table$node[TF_subtrees])
} # END for (rownum in 1:nrow(tmp_relprobs_at_branchtop_AT_node_UPPASS))
# Also, we need to get the saved DOWNPASS stuff at branch BOTTOMS, for the subtree uppass
# print("dim(relative_probs_of_each_state_at_branch_bottom_below_node_DOWNPASS)")
# print(dim(relative_probs_of_each_state_at_branch_bottom_below_node_DOWNPASS))
# print(master_tree_nodenums)
# print(states_allowed_TF)
# print(relative_probs_of_each_state_at_branch_bottom_below_node_DOWNPASS[master_tree_nodenums,])
# print(relative_probs_of_each_state_at_branch_bottom_below_node_DOWNPASS[,states_allowed_TF])
# print(relative_probs_of_each_state_at_branch_bottom_below_node_DOWNPASS[master_tree_nodenums,][,states_allowed_TF])
if (traitTF == FALSE)
{
tmp_relprobs_at_branchbot_BELOW_node_DOWNPASS = relative_probs_of_each_state_at_branch_bottom_below_node_DOWNPASS[master_tree_nodenums,][,states_allowed_TF]
} else {
tmp_relprobs_at_branchbot_BELOW_node_DOWNPASS = relative_probs_of_each_state_at_branch_bottom_below_node_DOWNPASS[master_tree_nodenums,][,wTrait_states_allowed_TF]
}
# 2016-05-28_bug_fix
# Fix this error, e.g. when DEC* model + areas_allowed means that
# ranges_list = NULL, Kauai is just
# ranges_list = Kauai
# This means that:
# subtree_tip_relative_probs_of_each_state
# and thus
# tip_condlikes_of_data_on_each_state
# ...are just a list of numbers, not a matrix, thus
# rowSums fails in calc_loglike_sp() in that time-stratum.
#
#
# This was the error message:
#
# Error in rowSums(tip_condlikes_of_data_on_each_state) :
# 'x' must be an array of at least two dimensions
# Calls: bears_optim_run ... calc_loglike_sp_stratified -> calc_loglike_sp -> rowSums
#
if (sum(states_allowed_TF) == 1)
{
if (traitTF == FALSE)
{
tmp_relprobs_at_branchbot_BELOW_node_DOWNPASS = matrix(data=tmp_relprobs_at_branchbot_BELOW_node_DOWNPASS, ncol=1)
} else {
tmp_relprobs_at_branchbot_BELOW_node_DOWNPASS = matrix(data=tmp_relprobs_at_branchbot_BELOW_node_DOWNPASS, ncol=sum(wTrait_states_allowed_TF))
} # END if (traitTF == FALSE)
} # END if (sum(states_allowed_TF) == 1)
# i.e., if i==5 in the Psychotria dataset
if (i == num_timeperiods) # You are at the bottom tree piece, just use root node
{
#ancprobs_at_subtree_root = starting_probs
# Anc node of the subtree
if (traitTF == FALSE)
{
tmp_relprobs_at_branchtop_AT_node_UPPASS[anc, ] = relative_probs_of_each_state_at_branch_top_AT_node_UPPASS[anc_node_original_tree, ][states_allowed_TF]
} else {
#print("Here1")
#print(tmp_relprobs_at_branchtop_AT_node_UPPASS[anc, ])
#print(relative_probs_of_each_state_at_branch_top_AT_node_UPPASS[anc_node_original_tree, ][wTrait_states_allowed_TF])
#print(wTrait_states_allowed_TF)
tmp_relprobs_at_branchtop_AT_node_UPPASS[anc, ] = relative_probs_of_each_state_at_branch_top_AT_node_UPPASS[anc_node_original_tree, ][wTrait_states_allowed_TF]
} # END if (traitTF == FALSE)
# None of this, at the root
# NO: tmp_relprobs_at_branchbot_BELOW_node_UPPASS[anc, ] = ancprobs_at_bottom_of_total_branch
} else {
# You are NOT at the bottom tree piece
if ((is.numeric(phy2$root.edge) == TRUE) && (!is.null(phy2$root.edge)) && (phy2$root.edge > 0))
{
# Get the length of this branch
root_edge_length = phy2$root.edge
# Get the uppass probs from the correct piece in the previous stratum
previous_stratum = i + 1
# Get the number of the previous treepiece
previous_stratum_TF = inputs$master_table$stratum == previous_stratum
node_TF = inputs$master_table$node == anc_node_original_tree
TF = (previous_stratum_TF + node_TF) == 2
master_table_row_corresponding_to_anctip = inputs$master_table[TF,]
previous_treepiece_num = master_table_row_corresponding_to_anctip$piecenum
# The previous treepiece
previous_treepiece = inputs$tree_sections_list[[previous_stratum]]$return_pieces_list[[previous_treepiece_num]]
# Relprobs from previous treepiece
relprobs_at_tips_of_anc_treepiece = inputs$tree_sections_list[[previous_stratum]]$pieces_relprobs_at_tips[[previous_treepiece_num]]
#cat("\ni=", i, "; jj=", jj, "; previous_stratum=", previous_stratum, "; previous_treepiece_num=", previous_treepiece_num, "\n", sep="")
#print(inputs$tree_sections_list[[previous_stratum]]$pieces_relprobs_at_bottoms_below_tips)
relprobs_at_branch_bottoms_below_tips_from_previous_stratum = inputs$tree_sections_list[[previous_stratum]]$pieces_relprobs_at_bottoms_below_tips[[previous_treepiece_num]]
# If ancestor was a sub-branch
if (is.numeric(previous_treepiece) == TRUE)
{
ancprobs_at_subbranch_bottom = relprobs_at_tips_of_anc_treepiece
ancprobs_at_bottom_of_total_branch = relprobs_at_branch_bottoms_below_tips_from_previous_stratum
} else {
# Ancestor was a sub-tree
# Which tip in the previous treepiece?
tipnum_in_previous_treepiece = master_table_row_corresponding_to_anctip$SUBnode
# Extract those relative probabilities
ancprobs_at_subbranch_bottom = relprobs_at_tips_of_anc_treepiece[tipnum_in_previous_treepiece, ]
ancprobs_at_bottom_of_total_branch = relprobs_at_branch_bottoms_below_tips_from_previous_stratum[tipnum_in_previous_treepiece, ]
}
# Then do a forward matrix exponentiation step
# Do sparse or dense matrix exponentiation
if (sparse==FALSE)
{
# Dense matrix exponentiation
# Need to do a forward matrix exponentiation
if (traitTF == FALSE)
{
actual_probs_after_forward_exponentiation = calc_prob_forward_onebranch_dense(relprobs_branch_bottom=ancprobs_at_subbranch_bottom[states_allowed_TF], branch_length=root_edge_length, Qmat_tmp)
} else {
actual_probs_after_forward_exponentiation = calc_prob_forward_onebranch_dense(relprobs_branch_bottom=ancprobs_at_subbranch_bottom[wTrait_states_allowed_TF], branch_length=root_edge_length, Qmat_tmp)
} # END if (traitTF == FALSE)
} else {
# Sparse matrix exponentiation
# print(tmpQmat_in_REXPOKIT_coo_fmt)
# print("this2")
# print("numstates_geogtrait")
# print(numstates_geogtrait)
# print(ancprobs_at_subbranch_bottom[states_allowed_TF])
if (traitTF == FALSE)
{
actual_probs_after_forward_exponentiation = calc_prob_forward_onebranch_sparse(relprobs_branch_bottom=ancprobs_at_subbranch_bottom[states_allowed_TF], branch_length=root_edge_length, tmpQmat_in_REXPOKIT_coo_fmt=tmpQmat_in_REXPOKIT_coo_fmt, coo_n=numstates_geogtrait, anorm=NULL, check_for_0_rows=TRUE)
} else {
actual_probs_after_forward_exponentiation = calc_prob_forward_onebranch_sparse(relprobs_branch_bottom=ancprobs_at_subbranch_bottom[wTrait_states_allowed_TF], branch_length=root_edge_length, tmpQmat_in_REXPOKIT_coo_fmt=tmpQmat_in_REXPOKIT_coo_fmt, coo_n=numstates_geogtrait, anorm=NULL, check_for_0_rows=TRUE)
} # END if (traitTF == FALSE)
} # END if (sparse==FALSE)
if (traitTF == FALSE)
{
# 2015 fix
actual_probs_after_forward_exponentiation_new = rep(0, length(states_allowed_TF))
actual_probs_after_forward_exponentiation_new[states_allowed_TF] = actual_probs_after_forward_exponentiation
actual_probs_after_forward_exponentiation = actual_probs_after_forward_exponentiation_new
} else {
actual_probs_after_forward_exponentiation_new = rep(0, length(wTrait_states_allowed_TF))
actual_probs_after_forward_exponentiation_new[wTrait_states_allowed_TF] = actual_probs_after_forward_exponentiation
actual_probs_after_forward_exponentiation = actual_probs_after_forward_exponentiation_new
}
if (include_null_range == TRUE)
{
# NULL range is impossible
actual_probs_after_forward_exponentiation[1] = 0
} # END if (include_null_range == TRUE)
actual_probs_after_forward_exponentiation = actual_probs_after_forward_exponentiation / sum(actual_probs_after_forward_exponentiation)
# This CAN work, since they've been reset to main state space
# Zero out impossible states
if (!is.null(states_allowed_TF))
{
actual_probs_after_forward_exponentiation[states_allowed_TF==FALSE] = 0
actual_probs_after_forward_exponentiation = actual_probs_after_forward_exponentiation / sum(actual_probs_after_forward_exponentiation)
}
# 2015 fix:ancprobs_at_subtree_root is for FULL state space
ancprobs_at_subtree_root = actual_probs_after_forward_exponentiation
if (traitTF == FALSE)
{
# But this is for reduced state space
tmp_relprobs_at_branchtop_AT_node_UPPASS[anc, ] = actual_probs_after_forward_exponentiation[states_allowed_TF]
# This is also for reduced state space
tmp_relprobs_at_branchbot_BELOW_node_UPPASS[anc, ] = ancprobs_at_bottom_of_total_branch[states_allowed_TF]
} else {
# But this is for reduced state space
tmp_relprobs_at_branchtop_AT_node_UPPASS[anc, ] = actual_probs_after_forward_exponentiation[wTrait_states_allowed_TF]
# This is also for reduced state space
tmp_relprobs_at_branchbot_BELOW_node_UPPASS[anc, ] = ancprobs_at_bottom_of_total_branch[wTrait_states_allowed_TF]
}
} else {
if (traitTF == FALSE)
{
# No trait
# No root edge; just use probs at anc of subtree
# 2015 reduced state space
tmp_relprobs_at_branchtop_AT_node_UPPASS[anc, ] = relative_probs_of_each_state_at_branch_top_AT_node_UPPASS[anc_node_original_tree, ][states_allowed_TF]
# 2015 reduced state space
tmp_relprobs_at_branchbot_BELOW_node_UPPASS[anc, ] = ancprobs_at_bottom_of_total_branch[states_allowed_TF]
} else {
# With trait
#print("Here2")
#print(tmp_relprobs_at_branchtop_AT_node_UPPASS[anc, ])
#print(relative_probs_of_each_state_at_branch_top_AT_node_UPPASS[anc_node_original_tree, ][wTrait_states_allowed_TF])
#print(wTrait_states_allowed_TF)
tmp_relprobs_at_branchtop_AT_node_UPPASS[anc, ] = relative_probs_of_each_state_at_branch_top_AT_node_UPPASS[anc_node_original_tree, ][wTrait_states_allowed_TF]
tmp_relprobs_at_branchbot_BELOW_node_UPPASS[anc, ] = ancprobs_at_bottom_of_total_branch[wTrait_states_allowed_TF]
} # END if (traitTF == FALSE)
} # END if ((is.numeric(phy2$root.edge) == TRUE) && (!is.null(phy2$root.edge)) && (phy2$root.edge > 0))
# END check for root edge
if (any(is.na(tmp_relprobs_at_branchtop_AT_node_UPPASS[anc, ])))
{
print("i, jj, anc")
print(i)
print(jj)
print(anc)
print("actual_probs_after_forward_exponentiation")
print(actual_probs_after_forward_exponentiation)
print("tmp_relprobs_at_branchtop_AT_node_UPPASS[anc, ]")
print(tmp_relprobs_at_branchtop_AT_node_UPPASS[anc, ])
print("anc_node_original_tree")
print(anc_node_original_tree)
print("states_allowed_TF")
print(states_allowed_TF)
if (traitTF == FALSE)
{
print("ancprobs_at_subbranch_bottom[states_allowed_TF]")
print(ancprobs_at_subbranch_bottom[states_allowed_TF])
} else {
print("wTrait_states_allowed_TF")
print(wTrait_states_allowed_TF)
print("ancprobs_at_subbranch_bottom[wTrait_states_allowed_TF]")
print(ancprobs_at_subbranch_bottom[wTrait_states_allowed_TF])
}
stop("ERROR #2: see stratified code")
}
if (any(is.na(tmp_relprobs_at_branchbot_BELOW_node_UPPASS[anc, ])))
{
print("i, jj, anc")
print(i)
print(jj)
print(anc)
print("actual_probs_after_forward_exponentiation")
print(actual_probs_after_forward_exponentiation)
print("ancprobs_at_bottom_of_total_branch")
print(ancprobs_at_bottom_of_total_branch)
print("tmp_relprobs_at_branchbot_BELOW_node_UPPASS[anc, ]")
print(tmp_relprobs_at_branchbot_BELOW_node_UPPASS[anc, ])
stop("ERROR #3: see stratified code")
}
} # END if (i == num_timeperiods) # You are at the bottom tree piece, just use root node
# Do dense matrix exponentiation of the subtree branches ahead of time
if (sparse==FALSE)
{
# Get probmats for each branch, put into a big array
# Create empty array to store results
#independent_likelihoods_on_each_branch = array(0, dim=c(nrow(Qmat), ncol(Qmat), length(phy2$edge.length)))
independent_likelihoods_on_each_branch = vector("list", length(phy2$edge.length))
tmpmatrix = matrix(data=0, nrow=nrow(Qmat_tmp), ncol=ncol(Qmat_tmp))
for (m in 1:length(phy2$edge.length))
{
independent_likelihoods_on_each_branch[[m]] = tmpmatrix
}
# Calculate the conditional likelihoods for each branch
# dgexpv NOT ALLOWED when you have a null range state
# (maybe try very very small values here)
# clusterApply and other multicore stuff (e.g. doMC) are apparently dangerous on R.app
if (!is.null(cluster_already_open))
{
#
if (.Platform$GUI == "AQUA")
{
cat("In calc_loglike_sp(), cluster_already_open=", cluster_already_open, " which means you want to calculate likelihoods on branches using a multicore option.\n", sep="")
cat("But .Platform$GUI='AQUA', which means you are running the Mac GUI R.app version of R. Parallel multicore functions, e.g. as accessed via \n", sep="")
cat("library(parallel), are apparently dangerous/will crash R.app (google multicore 'R.app'). So, changing to cluster_already_open=NULL.\n", sep="")
cluster_already_open=NULL
}
}
# clusterApply etc. appear to NOT work on R.app
if (!is.null(cluster_already_open))
{
# mcmapply
#library(parallel)
#independent_likelihoods_on_each_branch = mcmapply(FUN=expokit_dgpadm_Qmat, Qmat=list(Qmat), t=phy2$edge.length, transpose_needed=TRUE, SIMPLIFY="array", mc.cores=Ncores)
independent_likelihoods_on_each_branch = clusterApply(cl=cluster_already_open, x=phy2$edge.length, fun=expokit_dgpadm_Qmat2, Qmat=Qmat_tmp, transpose_needed=TRUE)
} else {
# Not parallel processing
#independent_likelihoods_on_each_branch = mapply(FUN=expokit_dgpadm_Qmat, Qmat=list(Qmat), t=phy2$edge.length, transpose_needed=TRUE, SIMPLIFY="array")
independent_likelihoods_on_each_branch = mapply_likelihoods(Qmat_tmp, phy2, transpose_needed=TRUE)
#independent_likelihoods_on_each_branch
}
}
#######################################################
# UPPASS loop here through the subtree
#######################################################
# The root node of this subtree
rootnode = length(phy2$tip.label) + 1
for (uj in edges_to_visit_uppass) # Since we are going backwards
{
# First edge visited is ui
#print(ui)
#print("Qmat_tmp")
#print(Qmat_tmp)
#print(dim(Qmat_tmp))
# Its sister is uj
#uj <- ui - 1
ui <- uj - 1 # Since we are going backwards
# Get the node numbers at the tips of these two edges
left_desc_nodenum <- phy2$edge[ui, 2]
right_desc_nodenum <- phy2$edge[uj, 2]
# And for the ancestor edge (i or j shouldn't matter, should produce the same result!!!)
anc <- phy2$edge[ui, 1]
# ancedge
anc_edgenum_TF = phy2$edge[,2] == anc
anc_edgenum = (1:length(anc_edgenum_TF))[anc_edgenum_TF]
# For the marginal state probability, uppass calculations
# Get the mother and sister of "anc" (which is the focal node)
mother_of_anc_TF = phy2$edge[,2] == anc
mother_of_anc = phy2$edge[mother_of_anc_TF,1]
sister_of_anc_TF = phy2$edge[,1] == mother_of_anc
sister_of_anc_TF2 = (sister_of_anc_TF + mother_of_anc_TF) == 1
sister_of_anc = phy2$edge[sister_of_anc_TF2,2]
mother_of_anc
sister_of_anc
# Is the sister left or right?
# (note: these are reversed from what you would get with:
# plot(tr); nodelabels()
sister_is_LR = "rootnode"
if (anc != rootnode)
{
if (sister_of_anc > anc)
{
sister_is_LR = "right"
} else {
sister_is_LR = "left"
} # END if (sister_of_anc > anc)
} # END if (anc != rootnode)
# get the correct edges
left_edge_TF = phy2$edge[,2] == left_desc_nodenum
right_edge_TF = phy2$edge[,2] == right_desc_nodenum
left_edgenum = (1:length(left_edge_TF))[left_edge_TF]
right_edgenum = (1:length(right_edge_TF))[right_edge_TF]
# Check the branchlength of each edge
# It's a hook if either branch is super-short
is_leftbranch_hook_TF = phy2$edge.length[left_edge_TF] < min_branchlength
is_rightbranch_hook_TF = phy2$edge.length[right_edge_TF] < min_branchlength
hooknode_TF = (is_leftbranch_hook_TF + is_rightbranch_hook_TF) > 0
#cat(i, j, left_desc_nodenum, right_desc_nodenum, hooknode_TF, "\n", sep=" ")
# You start with these uppass probs, for this node
tmp_relprobs_at_branchtop_AT_node_UPPASS[anc, ]
#print("tmp_relprobs_at_branchtop_AT_node_UPPASS")
#print(dim(tmp_relprobs_at_branchtop_AT_node_UPPASS))
#print("tmp_relprobs_at_branchtop_AT_node_UPPASS[anc, ]")
# 2014 version
#numstates = ncol(tip_condlikes_of_data_on_each_state)
# 2015 version
sum_states_allowed = sum(states_allowed_TF)
if (traitTF == TRUE)
{
wTrait_sum_states_allowed = sum(states_allowed_TF) * num_trait_states
} # END if (traitTF == TRUE)
#print("states_allowed_TF")
#print(states_allowed_TF)
# Apply speciation model to get the uppass probs at the base of the two descendant branches
if (hooknode_TF == TRUE)
{
# Just copy the probs up, since a time-continuous model was assumed.
# If you have a "hooknode" (short branch = direct ancestor), for
# the uppass, it is simpler to convert the cladogenesis model
# to an all-1s model
temp_COO_weights_columnar = COO_weights_columnar
# NJM 2016-02-24 -- see:
# /drives/Dropbox/_njm/__packages/BioGeoBEARS_setup/inst/extdata/examples/AAAB_M3_ancestor_check
# ...for an example that traces the -2 issue in detail.
# Or:
# http://phylo.wikidot.com/fossil-data-in-biogeographical-analysis-in-biogeobears#toc3
#
# Basically, this converts 1-based state numbers (e.g., 1-16, with
# 1: null range
# 2: A
# 3: B
# ...etc..
#
# ...to the 0-based state names in a cladogenesis matrix, where the
# null range is automatically excluded (if used in the first place).
#
# Then the states in the cladogenesis matrix are numbered, starting from 0.
# E.g.:
# 0: A
# 1: B
# 2: C
# ...etc...
if (include_null_range == TRUE)
{
#highest_clado_state_0based_considering_null_range = numstates - 1
highest_clado_state_0based_considering_null_range = sum_states_allowed - 2
} else {
#highest_clado_state_0based_considering_null_range = numstates
highest_clado_state_0based_considering_null_range = sum_states_allowed - 1
} # if (include_null_range == TRUE)
# If you have a "hooknode" (short branch = direct ancestor), for
# the uppass, it is simpler to convert the cladogenesis model
# to an all-1s model
# Ancestral, left, and right states all the same
temp_COO_weights_columnar[[1]] = 0:highest_clado_state_0based_considering_null_range
temp_COO_weights_columnar[[2]] = 0:highest_clado_state_0based_considering_null_range
temp_COO_weights_columnar[[3]] = 0:highest_clado_state_0based_considering_null_range
temp_COO_weights_columnar[[4]] = rep(1, highest_clado_state_0based_considering_null_range+1)
} else {
temp_COO_weights_columnar = COO_weights_columnar
} # END if (hooknode_TF == TRUE)
#print("temp_COO_weights_columnar")
#print(temp_COO_weights_columnar)
##############################################################################################
# Apply regular speciation model, with the weights given in COO_weights_columnar, and the
# normalization factor (sum of the weights across each row/ancestral state) in Rsp_rowsums.
##############################################################################################
num_nonzero_split_scenarios = length(COO_weights_columnar[[1]])
# Probs at the mother have been predetermined, in the uppass
# 1. Get uppass probabilities at the base of the branch below the
# focal (anc) node, including the probabilities coming down
# from the sister, and up from the mother.
# Check if you are at the global root
TFi = inputs$master_table$stratum == i
TFjj = inputs$master_table$piecenum == jj
TF_SUBnode = inputs$master_table$SUBnode == anc
TF = ((TFi + TFjj + TF_SUBnode) == 3)
anc_node_original_tree = inputs$master_table$node[TF]
global_root_TF = inputs$master_table$node.type[TF]
if ((anc == rootnode) && (global_root_TF == TRUE))
#if (anc == rootnode)
{
# You ARE at the global ancestor (root) node
probs_at_mother = 1/length(starting_probs)
likes_at_sister = 1/length(starting_probs)
left_branch_downpass_likes = NULL
right_branch_downpass_likes = NULL
#tmp_relprobs_at_branchbot_BELOW_node_UPPASS[anc,] = NA
probs_of_mother_and_sister_uppass_to_anc = 1/length(starting_probs)
tmp_relprobs_at_branchbot_BELOW_node_UPPASS[anc,] = probs_of_mother_and_sister_uppass_to_anc
} else {
# You ARE NOT at the global ancestor (root) node
if (anc == rootnode)
{
probs_at_mother = 1/length(starting_probs)
probs_of_mother_and_sister_uppass_to_anc = tmp_relprobs_at_branchtop_AT_node_UPPASS[anc,]
} else {
probs_at_mother = tmp_relprobs_at_branchtop_AT_node_UPPASS[mother_of_anc,]
} # END if (anc == rootnode)
likes_at_sister_branch_bottom = tmp_relprobs_at_branchbot_BELOW_node_DOWNPASS[sister_of_anc,]
if (sister_is_LR == "left")
{
left_branch_downpass_likes = likes_at_sister_branch_bottom
right_branch_downpass_likes = NULL
}
if (sister_is_LR == "right")
{
left_branch_downpass_likes = NULL
right_branch_downpass_likes = likes_at_sister_branch_bottom
}
# Calculate the uppass probs at the branch
#print("calculation of uppass at split")
#print(probs_at_mother)
#print(left_branch_downpass_likes)
#print(right_branch_downpass_likes)
if (anc != rootnode)
{
#print("Here123")
#print("probs_at_mother")
#print(probs_at_mother)
#print("left_branch_downpass_likes")
#print(left_branch_downpass_likes)
#print("right_branch_downpass_likes")
#print(right_branch_downpass_likes)
if (traitTF == FALSE)
{
uppass_probs_at_bottom_below_anc_results = calc_uppass_probs_new2(probs_ancstate=probs_at_mother, COO_weights_columnar=temp_COO_weights_columnar, numstates=sum_states_allowed, include_null_range=include_null_range, left_branch_downpass_likes=left_branch_downpass_likes, right_branch_downpass_likes=right_branch_downpass_likes, Rsp_rowsums=NULL)
} else {
uppass_probs_at_bottom_below_anc_results = calc_uppass_probs_new2(probs_ancstate=probs_at_mother, COO_weights_columnar=temp_COO_weights_columnar, numstates=wTrait_sum_states_allowed, include_null_range=include_null_range, left_branch_downpass_likes=left_branch_downpass_likes, right_branch_downpass_likes=right_branch_downpass_likes, Rsp_rowsums=NULL)
} # END if (traitTF == FALSE)
#print("...finished uppass at split")
# Store
if (sister_is_LR == "left")
{
Rprobs_brbot_below_anc = uppass_probs_at_bottom_below_anc_results$relprobs_just_after_speciation_UPPASS_Right
tmp_relprobs_at_branchbot_BELOW_node_UPPASS[anc,] = Rprobs_brbot_below_anc
}
if (sister_is_LR == "right")
{
Lprobs_brbot_below_anc = uppass_probs_at_bottom_below_anc_results$relprobs_just_after_speciation_UPPASS_Left
tmp_relprobs_at_branchbot_BELOW_node_UPPASS[anc,] = Lprobs_brbot_below_anc
}
# 2. Exponentiate up from the mother to the focal/anc node
probs_at_branch_bot = tmp_relprobs_at_branchbot_BELOW_node_UPPASS[anc,]
if (force_sparse == FALSE)
{
probs_of_mother_and_sister_uppass_to_anc = probs_at_branch_bot %*% expokit_dgpadm_Qmat2(times=phy2$edge.length[anc_edgenum], Qmat=Qmat_tmp, transpose_needed=TRUE)
} else {
probs_of_mother_and_sister_uppass_to_anc = calc_prob_forward_onebranch_sparse(relprobs_branch_bottom=probs_at_branch_bot, branch_length=phy2$edge.length[anc_edgenum], tmpQmat_in_REXPOKIT_coo_fmt=tmpQmat_in_REXPOKIT_coo_fmt, coo_n=length(probs_at_branch_bot), anorm=NULL, check_for_0_rows=TRUE)
}
} else {
# Subtree rootnode, so, the uppass probability was already
# determined in processing the root branch of the subtree
probs_of_mother_and_sister_uppass_to_anc
} # END if (anc != rootnode)
# Store in uppass
tmp_relprobs_at_branchtop_AT_node_UPPASS[anc,] = probs_of_mother_and_sister_uppass_to_anc
} # END if (anc == rootnode)
##################################################################
# Finish uppass to tips
##################################################################
# Check if either the left or right descendant nodes are tips;
# if so, do the exponentiation here, so as to completely fill
# in the UPPASS table
##################################################################
# If Left descendant is a tip
if (left_desc_nodenum <= length(phy2$tip.label))
{
probs_at_anc = tmp_relprobs_at_branchtop_AT_node_UPPASS[anc,]
left_branch_downpass_likes = NULL
right_branch_downpass_likes = tmp_relprobs_at_branchbot_BELOW_node_DOWNPASS[right_desc_nodenum,]
if (traitTF == FALSE)
{
uppass_probs_at_bottom_below_tip_results = calc_uppass_probs_new2(probs_ancstate=probs_at_anc, COO_weights_columnar=temp_COO_weights_columnar, numstates=sum_states_allowed, include_null_range=include_null_range, left_branch_downpass_likes=left_branch_downpass_likes, right_branch_downpass_likes=right_branch_downpass_likes, Rsp_rowsums=NULL)
} else {
uppass_probs_at_bottom_below_tip_results = calc_uppass_probs_new2(probs_ancstate=probs_at_anc, COO_weights_columnar=temp_COO_weights_columnar, numstates=wTrait_sum_states_allowed, include_null_range=include_null_range, left_branch_downpass_likes=left_branch_downpass_likes, right_branch_downpass_likes=right_branch_downpass_likes, Rsp_rowsums=NULL)
}
# The UPPASS probabilities below the tip:
Lprobs_brbot_below_tip = uppass_probs_at_bottom_below_tip_results$relprobs_just_after_speciation_UPPASS_Left
#print(dim(Qmat_tmp))
#print(Lprobs_brbot_below_tip)
#print(dim(Lprobs_brbot_below_tip))
#print(length(Lprobs_brbot_below_tip))
# The UPPASS probabilities AT the tip:
if (force_sparse == FALSE)
{
Lprobs_brtop_AT_tip = Lprobs_brbot_below_tip %*% expokit_dgpadm_Qmat2(times=phy2$edge.length[left_edgenum], Qmat=Qmat_tmp, transpose_needed=TRUE)
} else {
# Sparse matrix exponentiation
Lprobs_brtop_AT_tip = calc_prob_forward_onebranch_sparse(relprobs_branch_bottom=Lprobs_brbot_below_tip, branch_length=phy2$edge.length[left_edgenum], tmpQmat_in_REXPOKIT_coo_fmt=tmpQmat_in_REXPOKIT_coo_fmt, coo_n=numstates_geogtrait, anorm=NULL, check_for_0_rows=TRUE)
}
# Store: branch bottoms
tmp_relprobs_at_branchbot_BELOW_node_UPPASS[left_desc_nodenum,] = Lprobs_brbot_below_tip
# Store: branch tops
tmp_relprobs_at_branchtop_AT_node_UPPASS[left_desc_nodenum,] = Lprobs_brtop_AT_tip
} # END if (left_desc_nodenum <= length(phy2$tip.label))
# If Right descendant is a tip
if (right_desc_nodenum <= length(phy2$tip.label))
{
probs_at_anc = tmp_relprobs_at_branchtop_AT_node_UPPASS[anc,]
right_branch_downpass_likes = NULL
# print("left_desc_nodenum:")
# print(left_desc_nodenum)
#
# print("tmp_relprobs_at_branchbot_BELOW_node_DOWNPASS:")
# print(tmp_relprobs_at_branchbot_BELOW_node_DOWNPASS)
#
# print("dim(tmp_relprobs_at_branchbot_BELOW_node_DOWNPASS):")
# print(dim(tmp_relprobs_at_branchbot_BELOW_node_DOWNPASS))
#
# print("states_allowed_TF:")
# print(states_allowed_TF)
# print("length(states_allowed_TF):")
# print(length(states_allowed_TF))
#
# print("c(uj, ui, jj, i):")
# print(c(uj, ui, jj, i))
left_branch_downpass_likes = tmp_relprobs_at_branchbot_BELOW_node_DOWNPASS[left_desc_nodenum,]
if (traitTF == FALSE)
{
uppass_probs_at_bottom_below_tip_results = calc_uppass_probs_new2(probs_ancstate=probs_at_anc, COO_weights_columnar=temp_COO_weights_columnar, numstates=sum_states_allowed, include_null_range=include_null_range, right_branch_downpass_likes=right_branch_downpass_likes, left_branch_downpass_likes=left_branch_downpass_likes, Rsp_rowsums=NULL)
} else {
uppass_probs_at_bottom_below_tip_results = calc_uppass_probs_new2(probs_ancstate=probs_at_anc, COO_weights_columnar=temp_COO_weights_columnar, numstates=wTrait_sum_states_allowed, include_null_range=include_null_range, right_branch_downpass_likes=right_branch_downpass_likes, left_branch_downpass_likes=left_branch_downpass_likes, Rsp_rowsums=NULL)
}
# The UPPASS probabilities below the tip:
Rprobs_brbot_below_tip = uppass_probs_at_bottom_below_tip_results$relprobs_just_after_speciation_UPPASS_Right
#print(dim(Qmat_tmp))
#print(Rprobs_brbot_below_tip)
#print(dim(Rprobs_brbot_below_tip))
#print(length(Rprobs_brbot_below_tip))
# The UPPASS probabilities AT the tip:
if (force_sparse == FALSE)
{
Rprobs_brtop_AT_tip = Rprobs_brbot_below_tip %*% expokit_dgpadm_Qmat2(times=phy2$edge.length[right_edgenum], Qmat=Qmat_tmp, transpose_needed=TRUE)
} else {
# Sparse matrix exponentiation
Rprobs_brtop_AT_tip = calc_prob_forward_onebranch_sparse(relprobs_branch_bottom=Rprobs_brbot_below_tip, branch_length=phy2$edge.length[right_edgenum], tmpQmat_in_REXPOKIT_coo_fmt=tmpQmat_in_REXPOKIT_coo_fmt, coo_n=numstates_geogtrait, anorm=NULL, check_for_0_rows=TRUE)
}
# Store: branch bottoms
tmp_relprobs_at_branchbot_BELOW_node_UPPASS[right_desc_nodenum,] = Rprobs_brbot_below_tip
# Store: branch tops
tmp_relprobs_at_branchtop_AT_node_UPPASS[right_desc_nodenum,] = Rprobs_brtop_AT_tip
} # END if (right_desc_nodenum <= length(phy2$tip.label))
##################################################################
# END finish uppass to tips
##################################################################
# Zero out impossible states
# Do NOT do this, since we are in the subset state space
# if (!is.null(states_allowed_TF))
# {
# tmp_relprobs_at_branchbot_BELOW_node_UPPASS[anc,][states_allowed_TF==FALSE] = 0
# tmp_relprobs_at_branchbot_BELOW_node_UPPASS[anc,][states_allowed_TF==FALSE] = 0
#
# tmp_relprobs_at_branchbot_BELOW_node_UPPASS[left_desc_nodenum,][states_allowed_TF==FALSE] = 0
# tmp_relprobs_at_branchbot_BELOW_node_UPPASS[right_desc_nodenum,][states_allowed_TF==FALSE] = 0
#
# tmp_relprobs_at_branchbot_BELOW_node_UPPASS[left_desc_nodenum,][states_allowed_TF==FALSE] = 0
# tmp_relprobs_at_branchbot_BELOW_node_UPPASS[right_desc_nodenum,][states_allowed_TF==FALSE] = 0
#
# } # END if (!is.null(states_allowed_TF))
# 2014-03-20_NJM
# FIXNODES FOR UPPASS
# NON-BUG BUG BUG (I don't think anyone has ever addressed UPPASS
# calculations with fixed ancestral states)
use_fixnodes_on_uppass = TRUE # turn off if desired/buggy
if (use_fixnodes_on_uppass)
{
#######################################################
# If the states/likelihoods have been fixed at a particular node
# (check top of anc branch)
#######################################################
if (!is.null(fixnode))
{
# For multiple fixnodes
# 2016-03-15_old
# if (length(fixnode) > 1)
# 2016-03-15_new by Torsten
if (length(tmp_fixnode) > 1)
{
# Get the matching node
TF = (anc == tmp_fixnode)
temporary_fixnode = tmp_fixnode[TF]
temporary_fixlikes = c(tmp_fixlikes[TF,])
} else {
temporary_fixnode = tmp_fixnode
temporary_fixlikes = c(tmp_fixlikes)
}
if ((length(temporary_fixnode) > 0) && (anc == temporary_fixnode))
{
# If the node is fixed, ignore the calculation for this node, and
# instead use the fixed likelihoods (i.e., the "known" state) for
# this node.
# fix the likelihoods of the (NON-NULL) states
# 2016-03-15_old
# tmp_relprobs_at_branchtop_AT_node_UPPASS[anc,] = tmp_relprobs_at_branchtop_AT_node_UPPASS[anc,] * temporary_fixlikes
# 2016-03-15_new by Torsten
if (traitTF == FALSE)
{
tmp_relprobs_at_branchtop_AT_node_UPPASS[anc,] = tmp_relprobs_at_branchtop_AT_node_UPPASS[anc,] * temporary_fixlikes[states_allowed_TF]
} else {
tmp_relprobs_at_branchtop_AT_node_UPPASS[anc,] = tmp_relprobs_at_branchtop_AT_node_UPPASS[anc,] * temporary_fixlikes[wTrait_states_allowed_TF]
}
}
} # end if (!is.null(fixnode))
} # end if (use_fixnodes_on_uppass)
# Normalize and save these probabilities
#tmp_relprobs_at_branchtop_AT_node_UPPASS[left_desc_nodenum,] = condprobs_Left_branch_top / sum(condprobs_Left_branch_top)
#tmp_relprobs_at_branchtop_AT_node_UPPASS[right_desc_nodenum,] = condprobs_Right_branch_top / sum(condprobs_Right_branch_top)
#######################################################
# End of UPPASS loop for this ancnode. Move to next ancnode.
#######################################################
} # End uppass loop
# Store the UPPASS relprobs in the main matrix
# Temporary matrix for UPPASS probabilities
for (rownum in 1:nrow(tmp_relprobs_at_branchtop_AT_node_UPPASS))
{
# Store the relative probabilities at branch tops, WHEN
# the internal nodes correspond to the master tree
tmp_relprobs = tmp_relprobs_at_branchtop_AT_node_UPPASS[rownum,]
subtree_node = rownum
# We could use EITHER
# (1) Internal nodes and tips of subtrees
TF1 = inputs$master_table$stratum == i
TF2 = inputs$master_table$piecenum == jj
TF3 = inputs$master_table$piececlass == "subtree"
TF4 = inputs$master_table$SUBnode == subtree_node
TF5a = inputs$master_table$node.type == "internal" # Store only if node corresponds to a node or tip in orig_tree
TF5b = inputs$master_table$node.type == "tip" # Store only if node corresponds to a node or tip in orig_tree
TF5c = inputs$master_table$node.type == "root" # Store only if node corresponds to a node or tip in orig_tree
TF_subtrees = (TF1 + TF2 + TF3 + TF4 + TF5a + TF5b + TF5c) == 5
# (2) Tips of subbranches that are also tips of the master tree
# (see below)
TF = TF_subtrees
# Store in the FINAL table
relative_probs_of_each_state_at_branch_top_AT_node_UPPASS_rownum = inputs$master_table$node[TF]
if (traitTF == FALSE)
{
relative_probs_of_each_state_at_branch_top_AT_node_UPPASS[relative_probs_of_each_state_at_branch_top_AT_node_UPPASS_rownum, ][states_allowed_TF] = tmp_relprobs
} else {
relative_probs_of_each_state_at_branch_top_AT_node_UPPASS[relative_probs_of_each_state_at_branch_top_AT_node_UPPASS_rownum, ][wTrait_states_allowed_TF] = tmp_relprobs
} # END if (traitTF == FALSE)
# the above works
#print(relative_probs_of_each_state_at_branch_bottom_below_node_UPPASS)
#print(dim(relative_probs_of_each_state_at_branch_bottom_below_node_UPPASS))
#print(relative_probs_of_each_state_at_branch_top_AT_node_UPPASS_rownum)
#print(tmp_relprobs)
# Storing the UPPass relative probabilities at branch bottoms doesn't work so well, probably
# since the branch bottom is disconnected from the top sometimes.
# You WANT to pick the subtree tips when they are cut off by a stratum boundary, in the
# case where you are doing node bottoms.
# inputs$master_table[75:100,]
#TF5a = inputs$master_table$node.type == "internal" # Store only if node corresponds to a node or tip in orig_tree
#TF5b = inputs$master_table$node.type == "tip" # Store only if node corresponds to a node or tip in orig_tree
#TF = (TF1 + TF2 + TF3 + TF4 + TF5a + TF5b) == 5
tmp_relprobs = tmp_relprobs_at_branchbot_BELOW_node_UPPASS[rownum,]
if (traitTF == FALSE)
{
relative_probs_of_each_state_at_branch_bottom_below_node_UPPASS[relative_probs_of_each_state_at_branch_top_AT_node_UPPASS_rownum, ][states_allowed_TF] = tmp_relprobs
} else {
relative_probs_of_each_state_at_branch_bottom_below_node_UPPASS[relative_probs_of_each_state_at_branch_top_AT_node_UPPASS_rownum, ][wTrait_states_allowed_TF] = tmp_relprobs
}
} # END Store the UPPASS relprobs in the main matrix
##########################################################
# tip probabilities for next stratum up
##########################################################
if (traitTF == FALSE)
{
relprobs_at_tips_for_next_stratum_up = matrix(0, nrow=length(phy2$tip.label), ncol=length(states_allowed_TF))
relprobs_at_tips_for_next_stratum_up[,states_allowed_TF] = tmp_relprobs_at_branchtop_AT_node_UPPASS[1:length(phy2$tip.label), ]
# Branch bottoms below tips can also be transferred up
relprobs_at_branch_bottoms_below_tips_for_next_stratum_up = matrix(0, nrow=length(phy2$tip.label), ncol=length(states_allowed_TF))
relprobs_at_branch_bottoms_below_tips_for_next_stratum_up[,states_allowed_TF] = tmp_relprobs_at_branchbot_BELOW_node_UPPASS[1:length(phy2$tip.label), ]
} else {
relprobs_at_tips_for_next_stratum_up = matrix(0, nrow=length(phy2$tip.label), ncol=length(wTrait_states_allowed_TF))
relprobs_at_tips_for_next_stratum_up[,wTrait_states_allowed_TF] = tmp_relprobs_at_branchtop_AT_node_UPPASS[1:length(phy2$tip.label), ]
# Branch bottoms below tips can also be transferred up
relprobs_at_branch_bottoms_below_tips_for_next_stratum_up = matrix(0, nrow=length(phy2$tip.label), ncol=length(wTrait_states_allowed_TF))
relprobs_at_branch_bottoms_below_tips_for_next_stratum_up[,wTrait_states_allowed_TF] = tmp_relprobs_at_branchbot_BELOW_node_UPPASS[1:length(phy2$tip.label), ]
} # END if (traitTF == FALSE)
# Store the relprobs at the tips, so that the next stratum up can access them...
inputs$tree_sections_list[[i]]$pieces_relprobs_at_tips[[jj]] = relprobs_at_tips_for_next_stratum_up
inputs$tree_sections_list[[i]]$pieces_relprobs_at_bottoms_below_tips[[jj]] = relprobs_at_branch_bottoms_below_tips_for_next_stratum_up
} # End if/then on branch vs. subtree
} # End loop through jj tree pieces WITHIN a stratum
} # End loop through i strata
# (2) Tips of subbranches that are also tips of the master tree
# (see below)
# Tips of the master tree
tipnums_of_master_tree = 1:length(original_phy$tip.labe)
for (tn in 1:length(tipnums_of_master_tree))
{
# Find the row of the master table
TF1 = inputs$master_table$piececlass == "orig_tip"
TF2 = inputs$master_table$node == tn
TF = ((TF1 + TF2) == 2)
tmprow = inputs$master_table[TF,]
# Now find the subbranch or subtree tip that corresponds to this tip (which could be living or extinct)
TF1 = inputs$master_table$node == tmprow$node
TF2 = inputs$master_table$time_top == tmprow$time_top
TF3 = inputs$master_table$piececlass != "orig_tip"
TF = ((TF1 + TF2 + TF3) == 3)
tmprow2 = inputs$master_table[TF,]
tmp_stratum = tmprow2$stratum
tmp_piecenum = tmprow2$piecenum
if (tmprow2$piececlass == "subtree")
{
tmp_tipnum = tmprow2$SUBnode
tmp_tipprobs_at_top_UPPASS = inputs$tree_sections_list[[tmp_stratum]]$pieces_relprobs_at_tips[[tmp_piecenum]][tmp_tipnum, ]
}
if (tmprow2$piececlass == "subbranch")
{
tmp_tipnum = tmprow2$SUBnode
tmp_tipprobs_at_top_UPPASS = inputs$tree_sections_list[[tmp_stratum]]$pieces_relprobs_at_tips[[tmp_piecenum]]
}
# Store these tip uppass probs in the final uppass probs
relative_probs_of_each_state_at_branch_top_AT_node_UPPASS[tn,] = tmp_tipprobs_at_top_UPPASS
if (any(is.na(relative_probs_of_each_state_at_branch_top_AT_node_UPPASS[tn,])))
{
print("i, jj, anc, tn")
print(i)
print(jj)
print(anc)
print(tn)
print("relative_probs_of_each_state_at_branch_top_AT_node_UPPASS[tn,]")
print(relative_probs_of_each_state_at_branch_top_AT_node_UPPASS[tn,])
print("tmp_tipprobs_at_top_UPPASS")
print(tmp_tipprobs_at_top_UPPASS)
print("tmprow2$piececlass")
print(tmprow2$piececlass)
stop("ERROR #4: see stratified code")
}
# The branch bottoms should be fine
}
cat("\nUppass completed for (STRATIFIED) marginal ancestral states estimation!\n", sep="")
} # End UPPASS calculations, if calc_ancprobs == TRUE
#return(grand_total_likelihood)
# If you are storing ALL of the conditional likelihoods that were calculated
#print(return_condlikes_table)
#print(calc_TTL_loglike_from_condlikes_table)
if ((return_condlikes_table == TRUE) || (calc_TTL_loglike_from_condlikes_table == TRUE))
{
# Return an object with the condlikes_table, AND the grand conditional likelihood
calc_loglike_sp_stratified_results = NULL
calc_loglike_sp_stratified_results$final_all_condlikes_of_each_state = final_all_condlikes_of_each_state
# Downpass conditional likelihoods at nodes and subtree tips
# (master tree tip likelihoods in stratum 0 of master_table)
calc_loglike_sp_stratified_results$condlikes_table = condlikes_table
if (calc_ancprobs == TRUE)
{
# Relative downpass probabilities (rescaled conditional likelihoods)
# at the BOTTOMS of branches of subtrees
# 2014-05-26_NJM: now INCLUDES the probabilities at the BOTTOM of the subtree ROOT BRANCH
# 2014-05-26_NJM: AND the probabilities at the bottom of sub-branches (except for the master_tree tip nodes,
# which have tip likelihoods)
calc_loglike_sp_stratified_results$relative_probs_of_each_state_at_branch_bottom_below_node_DOWNPASS_TABLE = relative_probs_of_each_state_at_branch_bottom_below_node_DOWNPASS_TABLE
# Uppass probabilities
calc_loglike_sp_stratified_results$relative_probs_of_each_state_at_branch_top_AT_node_UPPASS = relative_probs_of_each_state_at_branch_top_AT_node_UPPASS
calc_loglike_sp_stratified_results$relative_probs_of_each_state_at_branch_bottom_below_node_UPPASS = relative_probs_of_each_state_at_branch_bottom_below_node_UPPASS
# tmpres$relative_probs_of_each_state_at_branch_top_AT_node_UPPASS
# tmpres$relative_probs_of_each_state_at_branch_bottom_below_node_UPPASS
#######################################################
# For branch bottoms
#######################################################
ML_marginal_prob_each_state_at_branch_bottom_below_node = relative_probs_of_each_state_at_branch_bottom_below_node_DOWNPASS * relative_probs_of_each_state_at_branch_bottom_below_node_UPPASS
# NJM - 2015-01-06: But, the root probabilities
# should NOT be multiplied, that would
# be downpass * downpass, which
# results in focusing probability on the
# most-probable downpass state
# Get the root node
anc_row_of_master_table_TF = inputs$master_table$node.type=="root"
anc_node_original_tree = inputs$master_table$node[anc_row_of_master_table_TF]
anc_node_original_tree
ML_marginal_prob_each_state_at_branch_bottom_below_node[anc_node_original_tree,] = relative_probs_of_each_state_at_branch_bottom_below_node_DOWNPASS[anc_node_original_tree, ]
# print
ML_marginal_prob_each_state_at_branch_bottom_below_node
rowSums(ML_marginal_prob_each_state_at_branch_bottom_below_node)
ML_marginal_prob_each_state_at_branch_bottom_below_node = ML_marginal_prob_each_state_at_branch_bottom_below_node / rowSums(ML_marginal_prob_each_state_at_branch_bottom_below_node)
# print
ML_marginal_prob_each_state_at_branch_bottom_below_node
sum_MLs_bot = rowSums(ML_marginal_prob_each_state_at_branch_bottom_below_node)
sum_MLs_bot
# Check for NaN rows
NaN_TF = is.nan(sum_MLs_bot)
numNaNs = sum(NaN_TF)
numNaNs
if (numNaNs > 0)
{
nannodenums = (1:length(NaN_TF))[NaN_TF]
nannodenums_txt = paste(nannodenums, collapse=", ", sep="")
txt = paste("\n\nWARNING! ML marginal states at branch bottoms produced ", numNaNs, " NaNs for nodes:\n",
nannodenums_txt, "\n",
"This probably means your downpass probabilities resulted in all 0 probabilities for the node.\n",
"This might occur in a highly constrained model, or if your data strongly contradicts your manual fixed\n",
"likelihoods ('fixlikes') at some node(s) ('fixnode').\n",
"As a 'fix', the downpass probabilities are being used for those nodes. But this is NOT RECOMMENDED!\n",
"You should instead figure out what is causing the problem.", sep="")
cat(txt)
cat("\n\nPrinting (partial) downpass, uppass, and probability matrices to screen:\n\n", sep="")
print(relative_probs_of_each_state_at_branch_bottom_below_node_DOWNPASS)
print(relative_probs_of_each_state_at_branch_bottom_below_node_UPPASS)
print(ML_marginal_prob_each_state_at_branch_bottom_below_node)
ML_marginal_prob_each_state_at_branch_bottom_below_node[NaN_TF,] = relative_probs_of_each_state_at_branch_bottom_below_node_DOWNPASS[NaN_TF,]
}
#######################################################
# For state probabilities at branch tops
#######################################################
# State estimates under specified model (usually global ML)
# are downpass * uppass
ML_marginal_prob_each_state_at_branch_top_AT_node = relative_probs_of_each_state_at_branch_top_AT_node_DOWNPASS * relative_probs_of_each_state_at_branch_top_AT_node_UPPASS
# NJM - 2015-01-06: But, the root probabilities
# should NOT be multiplied, that would
# be downpass * downpass, which
# results in focusing probability on the
# most-probable downpass state
# Instead, the probabilities are just the downpass probabilities
# The root node just gets the downpass probabilities
ML_marginal_prob_each_state_at_branch_top_AT_node[anc_node_original_tree, ] = relative_probs_of_each_state_at_branch_top_AT_node_DOWNPASS[anc_node_original_tree, ]
# print
ML_marginal_prob_each_state_at_branch_top_AT_node
rowSums(ML_marginal_prob_each_state_at_branch_top_AT_node)
ML_marginal_prob_each_state_at_branch_top_AT_node = ML_marginal_prob_each_state_at_branch_top_AT_node / rowSums(ML_marginal_prob_each_state_at_branch_top_AT_node)
# print
ML_marginal_prob_each_state_at_branch_top_AT_node
#print("ML_marginal_prob_each_state_at_branch_top_AT_node[62,14]")
#print(ML_marginal_prob_each_state_at_branch_top_AT_node[62,14])
sum_MLs_top = rowSums(ML_marginal_prob_each_state_at_branch_top_AT_node)
sum_MLs_top
# Check for NaN rows
NaN_TF = is.nan(sum_MLs_top)
numNaNs = sum(NaN_TF)
numNaNs
if (numNaNs > 0)
{
nannodenums = (1:length(NaN_TF))[NaN_TF]
nannodenums_txt = paste(nannodenums, collapse=", ", sep="")
txt = paste("\n\nWARNING! ML marginal states at branch tops produced ", numNaNs, " NaNs for nodes:\n",
nannodenums_txt, "\n",
"This probably means your downpass probabilities resulted in all 0 probabilities for the node.\n",
"This might occur in a highly constrained model, or if your data strongly contradicts your manual fixed\n",
"likelihoods ('fixlikes') at some node(s) ('fixnode').\n",
"As a 'fix', the downpass probabilities are being used for those nodes. But this is NOT RECOMMENDED!\n",
"You should instead figure out what is causing the problem.", sep="")
cat(txt)
cat("\n\nPrinting (partial) downpass, uppass, and probability matrices to screen:\n\n", sep="")
print(relative_probs_of_each_state_at_branch_top_AT_node_DOWNPASS)
print(relative_probs_of_each_state_at_branch_top_AT_node_UPPASS)
print(ML_marginal_prob_each_state_at_branch_top_AT_node)
ML_marginal_prob_each_state_at_branch_top_AT_node[NaN_TF,] = relative_probs_of_each_state_at_branch_top_AT_node_DOWNPASS[NaN_TF,]
}
# Save them
calc_loglike_sp_stratified_results$ML_marginal_prob_each_state_at_branch_bottom_below_node = ML_marginal_prob_each_state_at_branch_bottom_below_node
calc_loglike_sp_stratified_results$ML_marginal_prob_each_state_at_branch_top_AT_node = ML_marginal_prob_each_state_at_branch_top_AT_node
# tmpres$ML_marginal_prob_each_state_at_branch_top_AT_node
# tmpres$ML_marginal_prob_each_state_at_branch_bottom_below_node
}
calc_loglike_sp_stratified_results$grand_total_likelihood = grand_total_likelihood
# 2014-02-05_NJM fix
calc_loglike_sp_stratified_results$total_loglikelihood = grand_total_likelihood
if (calc_ancprobs == TRUE)
{
calc_loglike_sp_stratified_results$relative_probs_of_each_state_at_branch_bottom_below_node_DOWNPASS = relative_probs_of_each_state_at_branch_bottom_below_node_DOWNPASS
calc_loglike_sp_stratified_results$relative_probs_of_each_state_at_branch_top_AT_node_DOWNPASS = relative_probs_of_each_state_at_branch_top_AT_node_DOWNPASS
}
#return(calc_loglike_sp_stratified_results)
#return(condlikes_table)
}
if (return_what == "loglike")
{
return(grand_total_likelihood)
}
if (return_what == "all")
{
return(calc_loglike_sp_stratified_results)
}
# Just return the grand_total_likelihood (default)
return(grand_total_likelihood)
} # END calc_loglike_sp_stratified
# Negative version of calc_loglike_for_optim_stratified()
# (for e.g. minimization with GenSA)
calc_loglike_for_optim_stratified_neg <- function(params, BioGeoBEARS_run_object, phy, tip_condlikes_of_data_on_each_state, print_optim=TRUE, areas_list, states_list, force_sparse=FALSE, cluster_already_open=FALSE, min_branchlength=0.000001)
{
logLike = calc_loglike_for_optim_stratified(params, BioGeoBEARS_run_object, phy, tip_condlikes_of_data_on_each_state, print_optim=print_optim, areas_list=areas_list, states_list=states_list, force_sparse=force_sparse, cluster_already_open=cluster_already_open, min_branchlength=min_branchlength)
neg_logLike = -1 * logLike
return(neg_logLike)
}
#######################################################
# calc_loglike_for_optim_stratified
#######################################################
#' Take model parameters and the data and calculate the log-likelihood -- stratified version
#'
#' This is the stratified version of \code{\link{calc_loglike_for_optim}}. This function is an input to optim or optimx, the ML
#' estimation routines.
#'
#' @param params A vector of parameters for optimization.
#' @param BioGeoBEARS_run_object Object containing the run parameters, and the model.
#' @param phy An ape tree object
#' @param tip_condlikes_of_data_on_each_state Conditional likelihoods at tips. A numeric
#' matrix with rows representing tips, and columns representing states/geographic ranges. The cells
#' give the likelihood of the observation data under the assumption that the tip has that
#' state; typically this means that the known geographic range gets a '1' and all
#' other states get a 0.
#' @param print_optim If TRUE (default), print the optimization steps as ML estimation progresses.
#' @param areas_list A list of the desired area names/abbreviations/letters (?).
#' @param states_list A list of the possible states/geographic ranges, in 0-based index form.
#' @param force_sparse Should sparse matrix exponentiation be used? Default \code{FALSE}.
#' @param cluster_already_open The cluster object, if it has already been started.
#' @param min_branchlength Nodes with branches below this branchlength will not be treated as cladogenesis events; instead, they will be treated as
#' if an OTU had been sampled from an anagenetic lineage, i.e. as if you had a direct ancestor. This is useful for putting fossils into the biogeography analysis,
#' when you have fossil species that range through time. (Note: the proper way to obtain such trees, given that most phylogenetic methods force all OTUs to be tips
#' rather than direct ancestors, is another question subject to active research. However, one method might be to just set a branch-length cutoff, and treat any
#' branches sufficiently small as direct ancestors.)
#' @return \code{ttl_loglike} The log-likelihood of the data under the input model and parameters.
#' @export
#' @seealso \code{\link[stats]{convolve}} chainsaw_result
#' @note Go BEARS!
#' @author Nicholas J. Matzke \email{matzke@@berkeley.edu}
#' @references
#' \url{http://phylo.wikidot.com/matzke-2013-international-biogeography-society-poster}
#' @bibliography /Dropbox/_njm/__packages/BioGeoBEARS_setup/BioGeoBEARS_refs.bib
#' @cite Matzke_2012_IBS
#' @examples
#' test=1
#'
calc_loglike_for_optim_stratified <- function(params, BioGeoBEARS_run_object, phy, tip_condlikes_of_data_on_each_state, print_optim=TRUE, areas_list, states_list, force_sparse=FALSE, cluster_already_open=FALSE, min_branchlength=0.000001)
{
# Fix states_lists with "_" instead of NA for null range
if (is.null(states_list) == FALSE)
{
if (is.na(states_list[[1]]) == FALSE)
{
if (states_list[[1]] == "_")
{
states_list[[1]] = NA
} # END if (states_list[[1]] == "_")
} # END if (is.na(states_list[[1]]) == FALSE)
} # END if (is.null(states_list) == FALSE)
# Fix states_lists with "_" instead of NA for null range
if (is.null(BioGeoBEARS_run_object$states_list) == FALSE)
{
if (is.na(BioGeoBEARS_run_object$states_list[[1]]) == FALSE)
{
if (BioGeoBEARS_run_object$states_list[[1]] == "_")
{
BioGeoBEARS_run_object$states_list[[1]] = NA
} # END if (states_list[[1]] == "_")
} # END if (is.na(states_list[[1]]) == FALSE)
} # END if (is.null(states_list) == FALSE)
# Put the parameters into the BioGeoBEARS_model_object, so that they can be universally read out
# into any function
BioGeoBEARS_model_object = BioGeoBEARS_run_object$BioGeoBEARS_model_object
# Put the parameters into the BioGeoBEARS_model_object, so that they can be universally read out
# into any function
BioGeoBEARS_model_object = params_into_BioGeoBEARS_model_object(BioGeoBEARS_model_object=BioGeoBEARS_model_object, params=params)
######################################################
# 2016-03-23_NJM: adding rescaling
# (unscale params, if they were used before)
######################################################
if (BioGeoBEARS_run_object$rescale_params == TRUE)
{
#print("Before unscaling:")
#print(BioGeoBEARS_model_object@params_table)
BioGeoBEARS_model_object@params_table = unscale_BGB_params(scaled_params_table=BioGeoBEARS_model_object@params_table)
BioGeoBEARS_run_object$BioGeoBEARS_model_object@params_table = BioGeoBEARS_model_object@params_table
#print("After unscaling:")
#print(BioGeoBEARS_model_object@params_table)
}
# Update linked parameters
BioGeoBEARS_model_object = calc_linked_params_BioGeoBEARS_model_object(BioGeoBEARS_model_object)
# Set the dispersal and extinction rate
# d = BioGeoBEARS_model_object@params_table["d","est"]
# e = BioGeoBEARS_model_object@params_table["e","est"]
#######################################################
#######################################################
# Do branch-length exponentiation if desired
#######################################################
#######################################################
# Don't do this here, it would screw up sectioning of the tree!
# b = BioGeoBEARS_model_object@params_table["b","est"]
# # Modify the edge.lengths
# phy$edge.length = phy$edge.length ^ b
#######################################################
#######################################################
# Do distance-dependence and dispersal multipliers matrix
#######################################################
#######################################################
# Equal dispersal in all directions (unconstrained)
# Equal extinction probability for all areas
areas = areas_list
# distances_mat = matrix(1, nrow=length(areas), ncol=length(areas))
#dmat_times_d = matrix(d, nrow=length(areas), ncol=length(areas))
#elist = rep(e, length(areas))
# Set up the instantaneous rate matrix (Q matrix)
#Qmat = rcpp_states_list_to_DEmat(areas_list=areas_list, states_list=states_list, dmat=dmat_times_d, elist=elist, include_null_range=TRUE, normalize_TF=TRUE, makeCOO_TF=force_sparse)
# Cladogenic model
# j = BioGeoBEARS_model_object@params_table["j","est"]
# ysv = BioGeoBEARS_model_object@params_table["ys","est"]
# v = BioGeoBEARS_model_object@params_table["v","est"]
# ys = BioGeoBEARS_model_object@params_table["ys","est"]
# sum_SPweights = ys + j + v
# sum_SPweights
#
# Store back in the run object
BioGeoBEARS_run_object$BioGeoBEARS_model_object = BioGeoBEARS_model_object
# maxent_constraint_01 = BioGeoBEARS_model_object@params_table["mx01","est"]
#
# # Text version of speciation matrix
# maxent_constraint_01v = BioGeoBEARS_model_object@params_table["mx01v","est"]
# #spPmat = symbolic_to_relprob_matrix_sp(spmat, cellsplit="\\+", mergesym="*", ys=ys, j=j, v=v, maxent_constraint_01=maxent_constraint_01, maxent_constraint_01v=maxent_constraint_01v, max_numareas=max_numareas)
#
# # Set the parameter controlling the size distribution of
# # the smaller descendant species
# maxent01s_param = BioGeoBEARS_model_object@params_table["mx01s","est"]
# maxent01v_param = BioGeoBEARS_model_object@params_table["mx01v","est"]
# maxent01j_param = BioGeoBEARS_model_object@params_table["mx01j","est"]
# maxent01y_param = BioGeoBEARS_model_object@params_table["mx01y","est"]
#
#
# # Cladogenesis model inputs
# spPmat_inputs = NULL
# states_indices = states_list
# shorten the states_indices by 1 (cutting the
# null range state from the speciation matrix)
# if (include_null_range == TRUE)
# {
# states_indices[1] = NULL
# } # END if (include_null_range == TRUE)
# spPmat_inputs$l = states_indices
# spPmat_inputs$s = ys
# spPmat_inputs$v = v
# spPmat_inputs$j = j
# spPmat_inputs$y = ys
# spPmat_inputs$dmat = distances_mat
# spPmat_inputs$maxent01s_param = maxent01s_param
# spPmat_inputs$maxent01v_param = maxent01v_param
# spPmat_inputs$maxent01j_param = maxent01j_param
# spPmat_inputs$maxent01y_param = maxent01y_param
#
#######################################################
# Get the detection model
#######################################################
if (BioGeoBEARS_run_object$use_detection_model == TRUE)
{
mean_frequency = BioGeoBEARS_model_object@params_table["mf","est"]
dp = BioGeoBEARS_model_object@params_table["dp","est"]
fdp = BioGeoBEARS_model_object@params_table["fdp","est"]
# Calculate the initial tip likelihoods, using the detection model
# Assumes correct order, double-check this
numareas = length(areas)
detects_df = BioGeoBEARS_run_object$detects_df
controls_df = BioGeoBEARS_run_object$controls_df
# return_LnLs=TRUE ensures no under-flow
tip_condlikes_of_data_on_each_state = tiplikes_wDetectionModel(states_list_0based_index=states_list, phy=phy, numareas=numareas, detects_df=detects_df, controls_df=controls_df, mean_frequency=mean_frequency, dp=dp, fdp=fdp, null_range_gets_0_like=TRUE, return_LnLs=TRUE, relative_LnLs=TRUE, exp_LnLs=TRUE, error_check=TRUE)
# Multiply tip likelihoods by a prior on given range sizes, if
# specified in the inputs
if (is.null(BioGeoBEARS_run_object$prior_by_range_size) == FALSE)
{
#cat("\n\nNOTE: BioGeoBEARS is multiplying the initial tip conditional likelihoods ('tip_condlikes_of_data_on_each_state') by the user-specified 'BioGeoBEARS_run_object$prior_by_range_size'")
for (iii in 1:nrow(tip_condlikes_of_data_on_each_state))
{
tip_condlikes_of_data_on_each_state[iii,] = tip_condlikes_of_data_on_each_state[iii,] * BioGeoBEARS_run_object$prior_by_range_size
}
#cat("\n...done.\n")
}
}
if (print_optim == TRUE)
{
#outvars = as.data.frame(t(BioGeoBEARS_model_object@params_table$est))
#names(outvars) = rownames(BioGeoBEARS_model_object@params_table)
#outvars = c(BioGeoBEARS_model_object@params_table$est)
#cat("\n")
#cat(outvars, sep=" ")
# Before calculating the log likelihood, print it, in case there is e.g. a bug
#cat("d=", d, "; e=", e, "; j=", j, "; ys=", ys, "; v=", v, "; maxent01=", maxent_constraint_01, "; maxent01v=", maxent_constraint_01v, "; sum=", sum_SPweights, "; LnL=", sep="")
}
# Calculate the log-likelihood of the data, given the model parameters during this iteration
# ttl_loglike = calc_loglike_sp(tip_condlikes_of_data_on_each_state=tip_condlikes_of_data_on_each_state, phy=phy, Qmat=Qmat, spPmat=NULL, return_what="loglike", sparse=force_sparse, use_cpp=TRUE, input_is_COO=force_sparse, spPmat_inputs=spPmat_inputs, printlevel=0, cluster_already_open=cluster_already_open)
# ttl_loglike
#tmpinputs = BioGeoBEARS_run_object
fixnode = BioGeoBEARS_run_object$fixnode
fixlikes = BioGeoBEARS_run_object$fixlikes
return_condlikes_table = BioGeoBEARS_run_object$return_condlikes_table
calc_TTL_loglike_from_condlikes_table = BioGeoBEARS_run_object$calc_TTL_loglike_from_condlikes_table
include_null_range=BioGeoBEARS_run_object$include_null_range
ttl_loglike = try(calc_loglike_sp_stratified(tip_condlikes_of_data_on_each_state=tip_condlikes_of_data_on_each_state, phy=phy, Qmat=NULL, spPmat=NULL, min_branchlength=min_branchlength, return_what="loglike", probs_of_states_at_root=NULL, rootedge=TRUE, sparse=force_sparse, printlevel=printlevel, use_cpp=TRUE, input_is_COO=force_sparse, spPmat_inputs=NULL, cppSpMethod=3, cluster_already_open=cluster_already_open, calc_ancprobs=FALSE, include_null_range=include_null_range, fixnode=fixnode, fixlikes=fixlikes, inputs=BioGeoBEARS_run_object, allareas=areas, all_states_list=states_list, return_condlikes_table=return_condlikes_table, calc_TTL_loglike_from_condlikes_table=calc_TTL_loglike_from_condlikes_table))
if (("try-error" %in% class(ttl_loglike)) == TRUE)
{
ttl_loglike = BioGeoBEARS_run_object$on_NaN_error
print_optim = FALSE
}
if (print_optim == TRUE)
{
LnL = ttl_loglike
# If the log likelihood is successful, print it
outvars = adf(t(c(BioGeoBEARS_model_object@params_table$est, LnL)))
#outvars = cbind(outvars, LnL)
names(outvars) = c(rownames(BioGeoBEARS_model_object@params_table), "LnL")
print(round(outvars,3))
#cat(ttl_loglike, "\n", sep="")
}
return(ttl_loglike)
} # END calc_loglike_for_optim_stratified
|
93369bbf16fe30e09f686bf6fc310e00453b0564
|
0a906cf8b1b7da2aea87de958e3662870df49727
|
/ggforce/inst/testfiles/enclose_points/libFuzzer_enclose_points/enclose_points_valgrind_files/1610031308-test.R
|
525d00d2518dc46f9e596952ef64c41d2b46c143
|
[] |
no_license
|
akhikolla/updated-only-Issues
|
a85c887f0e1aae8a8dc358717d55b21678d04660
|
7d74489dfc7ddfec3955ae7891f15e920cad2e0c
|
refs/heads/master
| 2023-04-13T08:22:15.699449
| 2021-04-21T16:25:35
| 2021-04-21T16:25:35
| 360,232,775
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 484
|
r
|
1610031308-test.R
|
testlist <- list(id = integer(0), x = c(0, 3.97874210805989e-315, 2.12199579096527e-314, 9.30777432175926e-315, NaN, -2.05226840064919e-289, -1.36845551117563e-48, 1.53573725136183e-308, 3.95027222738494e-317, 1.19530919826631e-253, 1.10468206765364e-255, -6.08576388110954e+250, NaN, 1.53005706653461e-308, 1.80107070498765e-255, NaN, 8.26615253862675e-317, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), y = numeric(0))
result <- do.call(ggforce:::enclose_points,testlist)
str(result)
|
f71bf52f3959a2ad60bb0f37bbe2f199ad467537
|
90707cf49a4e22a64d044b0fe5858f3b66c9d854
|
/CSI032_2019update_MED&BS.R
|
607db56e0fd64a34e257ec7f4a7da984bf85cc46
|
[] |
no_license
|
ices-tools-dev/ETC-indicators
|
d16d0f182175fa067ed841e80600e9c13b781c7f
|
97ca14e39435c1d38a84e90fe20ba8e55d9eaad1
|
refs/heads/master
| 2021-12-12T01:42:40.255017
| 2021-10-12T08:44:38
| 2021-10-12T08:44:38
| 175,591,470
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,070
|
r
|
CSI032_2019update_MED&BS.R
|
# The data for Figure 3 for the Mediterranean and Black Sea are directly taken
# from the latest STECF report on the CFP indicators update.
# Here we only aggregate the data for Figures 1 and 2, which refer to the
# current status, as 2016, which is the latest year for which a higher number of
# assessments are available.
library(dplyr)
library(tidyr)
library(data.table)
# Dataframe with STECF and GFCM stock assessments between 2016 and 2018, from STECF report
data1 <- read.csv("STECF_CFP_2019.csv", header = T)
data1$key <- paste0(data1$Stock, data1$Area, "STECF")
data1$ID <- paste0(data1$Stock, data1$Area)
data2 <- read.csv("GFCM_SA_2019.csv", header = T)
data2$key <- paste0(data2$Stock, data2$Area, "GFCM")
data2$ID <- paste0(data2$Stock, data2$Area)
data <- rbind(data1,data2)
names(data)
unique(data$key)
df<- dplyr::select(data,Year,
key,
EcoRegion,
asses_year,
F_Fref,
B_Bref,
catch,
ID)
df2<-dplyr::full_join(df %>% dplyr::group_by(key) %>%
dplyr::filter(Year == 2016)%>%
dplyr::select(key,
EcoRegion,
F_Fref,
catch,
ID),
df %>%
dplyr::group_by(key) %>%
dplyr::filter(Year == 2016) %>%
dplyr::select(key,
EcoRegion,
B_Bref,
ID))
df2
# This list of stocks all have ref points, but there are other assessments
# without ref points, I have to include them in figure1
df2$color_fig1 <- case_when(df2$F_Fref != "NA" & df2$B_Bref != "NA" ~ "GREEN",
is.na(df2$F_Fref) & is.na(df2$B_Bref) ~"RED",
TRUE ~ "ORANGE")
figure1 <- df2 %>%
group_by(EcoRegion, color_fig1) %>%
summarise(landings = sum(catch)) %>%
ungroup() %>%
spread(color_fig1, landings, fill=0)
# To be finished, with total number of assessed stocks and with GFCM landings
# http://www.fao.org/gfcm/data/capture-production
# I will use the full list of latest assessments downloaded from
# https://stecf.jrc.ec.europa.eu/web/stecf/dd/medbs/sambs, the 25th April 2019
# We don´t have the full list of assessments from GFCM.
fullMed <- STECF_full_database_25April2019
names(fullMed)
unique(fullMed$ID)
fullMed$Areaw <- gsub("SA ", "", fullMed$Areaw)
fullMed$Areaw <- gsub(" ", "_", fullMed$Areaw)
fullMed$ID <- paste0(fullMed$Stock, fullMed$Areaw)
unique(fullMed$ID)
names(df2)
unique(df2$ID)
new_assessments <- fullMed %>% filter(ID %in% df2$ID)
old_assessments <- anti_join(fullMed, new_assessments)
unique(old_assessments$Areaw)
old_assessments$EcoRegion <- case_when(old_assessments$Areaw %in% c("01", "05","06", "07" ,
"09","01_05_06_07", "10", "11") ~ "Western Med.",
old_assessments$Areaw %in% c("12_13_14_15_16","15_16","17_18",
"18_19","19","20", "16") ~ "Central Med.",
old_assessments$Areaw %in% c("22_23", "25") ~ "Eastern Med.",
old_assessments$Areaw %in% c("29") ~ "Black Sea")
old_assessments <- old_assessments%>% filter(Year==2016)
old_assessments <- unique(old_assessments)
# Sepia shows up in two differetn rows but is the same assessment
figure1$RED <- c(20657.4, 4160.0, 0, 0)
gfcm <- Med_catches2016_GFCM
figure1 <- left_join(figure1, gfcm)
write.csv(figure1, file = "CSI032_figure1MED&BS_update2019.csv")
#Note, 23 May 2019:
# We can't consider B2003 a valid reference point for GES, so, all landings attributed
# to the GREEN in figure one will be moved to orange in the Mediterranean.
df2$color_fig2 <- case_when(df2$F_Fref < 1 & df2$B_Bref > 1 ~ "GREEN",
df2$F_Fref < 1 | df2$B_Bref > 1 ~ "ORANGE",
df2$F_Fref < 1 & is.na(df2$B_Bref) ~ "ORANGE",
is.na(df2$F_Fref) & df2$B_Bref > 1 ~ "ORANGE",
df2$F_Fref > 1 & df2$B_Bref < 1 ~ "RED",
is.na(df2$F_Fref) & is.na(df2$B_Bref) ~ "GREY",
TRUE ~ "RED")
# check <- unique(df2[c("color_fig2", "F_Fref", "B_Bref")])
figure2 <- df2 %>%
group_by(EcoRegion, color_fig2) %>%
summarise(n= n()) %>%
ungroup() %>%
spread(color_fig2, n, fill=0)
DT <- data.table(df2)
n <- DT[, .(number_of_assessed_stocks = length(unique(key))), by = EcoRegion]
figure2 <- left_join(figure2, n)
write.csv(figure2, file = "CSI032_figure2MED&BS_update2019.csv")
|
9102aee502500e33989a2a37cfeb28651201799c
|
9519269fcf8afb04b023cdc9d834b5db4de97287
|
/best.R
|
77364635157b07e3018e479233fa5402c37768d5
|
[] |
no_license
|
jmscarrillo/ProgrammingAssignment3
|
3d2606cb093c4ecdd9dd87874a8bf621648dfe0a
|
16f467ddc3399c746543acf597e86e369a19eacd
|
refs/heads/master
| 2021-08-27T21:26:55.767373
| 2017-12-10T11:32:51
| 2017-12-10T11:32:51
| 113,744,322
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,346
|
r
|
best.R
|
## Coursera - Data Science - Universidad Johns Hopkins
## R Programming - Week 4
## Programming Assignment 3: instructions-hospital-quality
## José Mª Sebastián Carrillo
best <- function(state, outcome) {
# Author <- "José Mª Sebastián Carrillo"
# Validate if the outcomed passed is valid (before the read, better efficiency!).
validOutcomes = c("heart attack", "heart failure", "pneumonia")
if (!is.element(outcome, validOutcomes)) {
stop("invalid outcome")
}
# Metadata of input file, for performance efficiency.
notAvailableTexts <- c("Not Available",
"No data are available from the hospital for this measure",
"NA")
# Read the external data.
careMeasures <- read.csv("outcome-of-care-measures.csv",
na.strings=notAvailableTexts)
# Check if the state is valid
if (!is.element(state, careMeasures$State)) {
stop("invalid state")
}
# Determine the value of the external data to be processed
targetColumnNumber <- switch(outcome,
"heart attack" = 11, # Hospital.30.Day.Death..Mortality..Rates.from.Heart.Attack
"heart failure" = 17, # Hospital.30.Day.Death..Mortality..Rates.from.Heart.Failure
"pneumonia" = 23) # Hospital.30.Day.Death..Mortality..Rates.from.Pneumonia
# Construct the data frame for work...
targetHospitalData <- careMeasures[c(2,7,targetColumnNumber)]
# ...And give it the correct names
colnames(targetHospitalData) <- c("hospital name", "state", outcome)
# Filter the data frame by...
# ...the state to be processed, ...
targetHospitalData <- targetHospitalData[targetHospitalData$state == state,]
# ...the complete cases and...
targetHospitalData <- targetHospitalData[complete.cases(targetHospitalData), ]
# ...the min value of the 'outcome'.
targetHospitalData <- targetHospitalData[targetHospitalData[ ,3] == min(targetHospitalData[ ,3]), ]
# Order the results to obtain the first (alphabetically) hospital name.
targetHospitalData <- targetHospitalData[order(targetHospitalData$"hospital name"), ]
# Finally return the name of the selected hospital.
as.character(targetHospitalData[, "hospital name"][1])
}
|
3c61ca5d6de5df86b786e43e83ea444c02f0f9b9
|
5874ae0a22213c3a692763f285a64433bd512f94
|
/R/parking.R
|
5fe48f6e457eddd3c9ab1ca92f09d7d156411cf8
|
[] |
no_license
|
d8aninja/code
|
8356486291a2db9f419549edaa525d9bbe39abfc
|
80a836db49a31ecd6d0e62aaf8b6e4417c49df68
|
refs/heads/master
| 2021-09-06T11:35:13.956195
| 2018-02-06T04:11:33
| 2018-02-06T04:11:33
| 80,898,202
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 329
|
r
|
parking.R
|
require(XML)
require(curl)
require(RCurl)
require(jsonlite)
require(httr)
url = 'http://api.parkwhiz.com/search/?lat=39.74&lng=-104.99&start=1476119660&end=1476130460&key=d7f78323035560d02287cb40977b035d'
j = GET(url)
raw = content(j, as = 'text')
json = fromJSON(url, flatten = TRUE)
write(json, "test_json.json")
|
265b2ebc8c17f43dd0b188910b3c7d71f238bfff
|
e4ae7301cf27a013d9283a672d76b7885157cd41
|
/Regex.R
|
7cc96a20c865419df9349b82c41e970b49bd78ee
|
[] |
no_license
|
rishi10819/data_wrangling
|
02b06d4433914bd9f88bd73940f061dcad36e82c
|
da02e443ce96c61bb70c2444b991e1c0784b855f
|
refs/heads/master
| 2020-05-23T20:41:56.824001
| 2019-06-07T00:03:19
| 2019-06-07T00:03:19
| 186,936,617
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 492
|
r
|
Regex.R
|
library(tidyverse)
#getwd()
#setwd("C:/Edx/projects")
#getwd()
s <- c("70", "5 ft", "4'11", "", ".", "Six feet")
pattern1 <- "\\d|ft"
str_view_all (s, pattern1) # Highlighted- "70", "5 ft", "4'11"
pattern2 <- "\d|ft"
str_view_all (s, pattern2) # Error: '\d' is an unrecognized escape in character string starting ""\d"
pattern3 <- "\\d\\d|ft"
str_view_all (s, pattern3) # Highlighted- 70, ft, 11
pattern4 <- "\\d|feet"
str_view_all (s, pattern4) # Highlighted- "70", "5", "4'11", "feet"
|
f6ee5eb77ad2b98df8a6a45b4b4939e28dd36639
|
bae57f27c447250ef182abe8c6d12e13aea24ba2
|
/tests/testthat/test-sample-retrieval.R
|
2ee1422657f3573c8811bba5873683f54a053fd9
|
[] |
no_license
|
denalitherapeutics/archs4
|
a254680554856fdcb18a81a3f0fc6f71d045fd46
|
be3aa0e5b7eb3321223d5f63ef193d77a6f9a2fc
|
refs/heads/master
| 2021-04-15T16:54:55.559093
| 2018-09-19T21:54:21
| 2018-09-19T21:54:21
| 126,898,823
| 10
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,058
|
r
|
test-sample-retrieval.R
|
context("Sample and Covariate Retrieval")
if (!exists("a4")) {
# This is loaded by the testthat/helper-all.R script when testthat is running
# the unit tests, but included here for convenience when doing interactive
# test development
a4 <- Archs4Repository()
}
test_that("All expected samples come back when queried by series", {
series <- "GSE52564"
expected <- sample_table(a4) %>%
filter(series_id == series)
info <- sample_info(a4, series)
full <- expected %>%
full_join(info, by = c("series_id", "sample_id"))
expect_equal(nrow(full), nrow(expected))
})
test_that("(archs4_)sample_status identifies samples missing from GEO series", {
missing.none <- "GSE52564" # The Ben Barres dataset has all samples in ARCHS4
# The blurton jones dataset (GSE89189) used to be missing some, but those have
# been filled out
missing.some <- "GSE43366" # The Chiu et al. SOD1 datasets is missing some
ss.none <- series_status(a4, missing.none)
expect_true(all(ss.none[["in_archs4"]]))
ss.some <- series_status(a4, missing.some)
expect_true(!all(ss.some[["in_archs4"]]))
})
test_that("sample_info call handlies missing IDs gracefully", {
# GSE43366 should have 42 samples, but is missing 7 of them as of April 8, 2018
# when we are using "v2" of the datasets.
expected <- tibble(
series_id = "GSE43366",
sample_id = paste0("GSM10611", 43:84))
ex.missing <- c("GSM1061148", "GSM1061149", "GSM1061150", "GSM1061151",
"GSM1061152", "GSM1061153", "GSM1061154")
ex.present <- setdiff(expected$sample_id, ex.missing)
# Tests a mix of existing and missing sample identifiers.
# Missing samples should have NAs in a number of colums. One colume that is
# always returned is `organism`.
res <- expect_warning(sample_info(a4, expected$sample_id), "not found")
# Expect that we have one row for each entry in `expected.all`
# we don't join on series_id because the sample_id's that were queried for
# and are missing come back with NA series_id values.
xx <- expected %>%
full_join(res, by = c("sample_id"))
expect_equal(nrow(xx), nrow(expected))
expect_setequal(res$sample_id, expected$sample_id)
found <- filter(res, !is.na(organism))
expect_setequal(found$sample_id, ex.present)
missed <- filter(res, is.na(organism))
expect_setequal(missed$sample_id, ex.missing)
# When we query for all missing samples, we still return a tibble of the same
# form as res.all, with all NAs where you expect them to be. We don't expect
# to throw an error.
amiss <- expect_warning({
sample_info(a4, ex.missing)
}, "not found")
expect_setequal(ex.missing, amiss$sample_id)
expect_true(all(is.na(amiss$organism)))
})
# Tests that are no longer necessary in v4+ matrices (they were made for v2).
# These tests:
# 1. Looked for missing samples in certain seriesexercised
# 2. Identified discordant sample covariates among the mouse and human datasets
# test_that("(archs4_)sample_info warns when querying series with missing samples", {
# missing.none <- "GSE52564" # The Ben Barres dataset has all samples in ARCHS4
# missing.some <- "GSE89189" # The blurton jones iPSC paper is missing some
#
# # This series should have no missing samples
# si.none <- expect_silent(sample_info(a4, missing.none))
#
# # This series has a few missing samples
# wregex <- sprintf("%s series .*missing samples", missing.some)
# status.some <- expect_warning(sample_info(a4, missing.some), wregex)
#
# # as.DGEList should also warn when we are missng samples
# y <- expect_warning(as.DGEList(a4, missing.some), wregex)
# })
# The v4 data matrices have the same covariates in the data matrices among
# the mouse and human data.
# ------------------------------------------------------------------------------
# test_that("sample_info returns desired covariate columns", {
# # the code here looks a bit convoluted because it should be cleaned/updated
# # to support testing "universal" covariates, mouse- and human-only covariates
# # as well.
#
# ids.all <- tribble(
# ~id, ~type, ~organism, ~complete,
# "GSE69354", "series", "mouse", TRUE,
# "GSE79525", "series", "mouse", TRUE,
# "GSE98041", "series", "mouse", TRUE,
# "GSE85702", "series", "mouse", FALSE,
# "GSE99095", "series", "human", FALSE,
# "GSE88681", "series", "human", TRUE)
#
# ids.query <- tribble(
# ~id, ~type, ~organism,
# "GSE69354", "series", "mouse",
# "GSE88681", "series", "human",
# "GSM1095128", "sample", "mouse",
# "GSM1095129", "sample", "mouse",
# "GSM1095130", "sample", "mouse")
#
# def.cols <- c("Sample_title", "Sample_source_name_ch1")
# extra.cols <- c("Sample_molecule_ch1", "Sample_treatment_protocol_ch1",
# "Sample_description")
#
# # The human data have these covariates that are not in mouse:
# # * Sample_contact_laboratory
# # * Sample_description
# # * Sample_supplementary_file_2
# h.only <- c("Sample_contact_laboratory", "Sample_description",
# "Sample_supplementary_file_2")
# #
# # The mouse data have these covariates thata are not in human:
# # * Sample_contact_state
# # * Sample_growth_protocol_ch1
# # * Sample_treatment_protocol_ch1
# m.only <- c("Sample_contact_state", "Sample_growth_protocol_ch1",
# "Sample_treatment_protocol_ch1")
#
# all.cols <- c(def.cols, extra.cols)
# info <- sample_info(a4, ids.query$id, all.cols)
#
# for (col in all.cols) {
# expect_is(info[[col]], "character", info = col)
# }
#
# info.m <- filter(info, organism == "mouse")
# for (col in intersect(h.only, colnames(info))) {
# expect_true(all(is.na(info.m[[col]])), info = col)
# }
#
# info.h <- filter(info, organism == "human")
# for (col in intersect(m.only, colnames(info))) {
# expect_true(all(is.na(info.h[[col]])), info = col)
# }
# })
|
94362e604121db685a71328572627907d7be83d1
|
a1a702de311f4ff1671b27215421a7fb0677a3c9
|
/man/postr_sensplot.Rd
|
e4da4d2e73f06e030d0dae341d19a4d14d7d0393
|
[
"CC0-1.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
josherrickson/postr
|
f16b1c943ca02380d0d99956fc2487fd57a6688e
|
10c594b409d07bbe7f6f69834bacbb9ffeaeeda2
|
refs/heads/master
| 2021-06-29T04:15:00.806107
| 2020-11-07T01:03:37
| 2020-11-07T01:03:37
| 182,087,218
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 589
|
rd
|
postr_sensplot.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/sensplot.R
\name{postr_sensplot}
\alias{postr_sensplot}
\alias{pr_sensplot}
\title{plot Sensitivity (TPR) and Specificity (TNR) curves}
\usage{
postr_sensplot(model, tpr.color = "red", tnr.color = "blue")
pr_sensplot(model, tpr.color = "red", tnr.color = "blue")
}
\arguments{
\item{model}{A glm logistic model}
\item{tpr.color}{Color for sensitivity (TPR) line}
\item{tnr.color}{Color for specificity (TNR) line}
}
\value{
ggplot object
}
\description{
plot Sensitivity (TPR) and Specificity (TNR) curves
}
|
9673bae1ac621bcfeb535781cf5fa465fe147695
|
5a45eb41973c7df1323c6ffdd38610b96993c168
|
/R/ClassEstimatorGRM.R
|
106d2901eb612f2f91180cd9d7a91fc38f86bfc3
|
[
"MIT"
] |
permissive
|
mihaiconstantin/changeIRT
|
5ea8ef34c90f63b12b34c7cc8f3835ad959e41a8
|
f49666493ba7a9e74fd53ce2cb28c863c94154a0
|
refs/heads/master
| 2021-06-17T20:13:19.617489
| 2017-04-26T12:27:02
| 2017-04-26T12:27:02
| 82,169,049
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 261
|
r
|
ClassEstimatorGRM.R
|
EstimatorGRM = R6::R6Class("EstimatorGRM",
inherit = Estimator,
# private
private = list(
),
# public
public = list(
initialize = function(data, method) {
super$initialize(data, method)
private$model = "Estimated GRM"
}
)
) # EstimatorGRM
|
1f623e3e99842454df89c0c414559ba8e390c541
|
80a2a1366bb284db761c908a7c61581e96c03fe7
|
/man/nonstandard_eval_arguments.Rd
|
412d01f7947e3728e5a31e22f1bf054112afb6c3
|
[
"LicenseRef-scancode-warranty-disclaimer",
"CC0-1.0",
"LicenseRef-scancode-public-domain-disclaimer",
"LicenseRef-scancode-public-domain"
] |
permissive
|
USGS-R/gsplot
|
cfe3816af99d43821fd8f8a31ea123cf269dd2b2
|
4e6adae6ac53decc4e26d59165c44dfc41c26548
|
refs/heads/main
| 2023-04-13T04:55:29.029629
| 2023-04-08T13:03:13
| 2023-04-08T13:03:13
| 37,219,882
| 5
| 20
|
CC0-1.0
| 2023-04-07T23:12:03
| 2015-06-10T20:08:10
|
R
|
UTF-8
|
R
| false
| true
| 632
|
rd
|
nonstandard_eval_arguments.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/add_to_view.R
\name{nonstandard_eval_arguments}
\alias{nonstandard_eval_arguments}
\title{get the embedded arguments that go into the function call}
\usage{
nonstandard_eval_arguments(fun.name, embedded.dots, parent.args)
}
\arguments{
\item{fun.name}{the name of the rendering function}
\item{embedded.dots}{expressions to be evaluated within \code{parent.args} data}
\item{parent.args}{data that should be accessible when evaluating \code{embedded.dots}}
}
\description{
get the embedded arguments that go into the function call
}
\keyword{internal}
|
c7728f0b3586e6357bfd82d709f4118a04c25e28
|
3711364cac5efb31e8293b5c8a3030353bf031ca
|
/R/cluster_regression.R
|
e7ea645b9d0aef06a2e1b1374de40f20dc0fe187
|
[] |
no_license
|
RUCyuzhao/STCMTL
|
101e7c3d7eb506496fd41e0921e23479085ce051
|
3bbe471790db6f359cfd193fcf1ff712a360c831
|
refs/heads/main
| 2023-04-07T01:07:19.040676
| 2021-04-25T01:45:02
| 2021-04-25T01:45:02
| 333,105,958
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,450
|
r
|
cluster_regression.R
|
#' @title SCMTL Algorithm(linear regression)
#'
#' @param X \code{matrix} independent variable.
#' @param Y \code{matrix} dependent variable.
#' @param group \code{matrix} specify the structure of the task.
#' @param K cluster number.
#' @param ext.prop the proportion of extreme neighbors for each cluster. Without any prior information, set ext.prop=5/min(group[-1]-group[-length(group)]).
#' @param pure.prop (optional) the expected proportion of pure tasks. By default pure.prop=0.5.
#' @param nlambda (optional) a user supplied lambda sequence. By default lambda=2^(-15,3,length=300).
#' @param alpha (optional) a 0-1 number to control the step size. By default alpha=1.
#' @param max_iter max iterations.
#' @param thresh convergence threshold for the algorithm.
#' @param show \code{TRUE/FALSE} whether show every step's result.
#' @param method2 the type of final procedure's algorithm. The value must be one of {lasso,scad}
#' @param nfold number of folds - default is 5.
#'
#' @return The estimated cluster center matrix U, membership matrix V and coefficient matrix W.
#' @import SOUP
#' @import glmnet
#' @import MASS
#' @import limSolve
#' @import ncvreg
#' @import e1071
#' @import lassoshooting
#' @export
#'
#' @examples x_train <- as.matrix(school$train$x)
#' y_train <- school$train$y[,1]
#' group <- as.numeric(school$group[1,])
#' re <- cluster_regression(x_train,y_train,group=group,K=2,ext.prop = 5/(length(group)-1)
#' ,max_iter = 10,thresh = 1e-3,method2 = "scad",show = TRUE)
cluster_regression <- function(X,Y,group,K,ext.prop=NULL,nlambda=2^(seq(-15,3,length=100)),pure.prop = 0.5,alpha = 1,max_iter = 50,thresh=1e-2,show=F,method2 = "lasso",nfold = 5){
T <- length(group)-1
p <- NCOL(X)
N <- NROW(X)
Z <- matrix(0,nrow = N,ncol = p*T)
for(i in 1:T){
Z[group[i]:(group[i+1]-1),((i-1)*p+1):(i*p)] <- X[group[i]:(group[i+1]-1),]
}
y_multi <-matrix(rep(Y,T),nrow = N,byrow = F)
I <- matrix(0,nrow = T,ncol = N)
for(i in 1:T){
I[i,group[i]:(group[i+1]-1)] <- 1
}
t1 <- Sys.time()
beta0 <- matrix(nrow = p,ncol = T)
lambda <- c()
for(i in 1:T){
lambda[i] <- cv.glmnet(X[group[i]:(group[i+1]-1),-1],Y[group[i]:(group[i+1]-1)],nfolds = nfold,type.measure = "mse",standardize=FALSE,lambda = nlambda)$lambda.min/10
fit <- glmnet(X[group[i]:(group[i+1]-1),-1],Y[group[i]:(group[i+1]-1)],lambda = lambda[i],standardize = FALSE)
beta0[,i]<- as.array(coef(fit))[1:p]
}
t_cv <- Sys.time()-t1
delta <- I%*%abs(y_multi-X%*%beta0)
soup.out <- SOUP(t(delta),Ks=K,type = "count",ext.prop = ext.prop,pure.prop = pure.prop)
V <- t(soup.out$memberships[[1]])
ind <- apply(V,2,function(x){return(sum(x^2)==0)})
if(sum(ind)>0){
temp <- sample(1:K,1)
V[temp,which(ind)] <- 1
}
U <- matrix(0,nrow = p,ncol = K)
for(i in 1:p){
U[i,] <- lsei(A=t(V),B=t(beta0)[,i],type = 2)$X
}
jilu <- c()
choice <- permutations(K)
rmse <- c(sum((Y-Z%*%as.vector(beta0))^2))
V_cha <- c(sum(svd(V)$d^2))
U_cha <- c(sum(svd(U)$d^2))
W_cha <- c(sum(svd(U%*%V)$d^2))
rmse_cha <- c(rmse)
iter <- 0
panding <- 1
step1_time <- c()
step2_time <- c()
step3_time <- c()
while(rmse_cha[iter+1] >=thresh & alpha > 0 & iter<= max_iter){
t0 <- Sys.time()
V1 <- V
U1 <- U
beta_temp <- U1%*%V1
for(i in 1:T){
X_temp <- X[group[i]:(group[i+1]-1),]
Y_temp <- Y[group[i]:(group[i+1]-1)]
temp <- min((group[i+1]-group[i]),p/2)
if(iter == 0)
temp <- 5
temp <- coordinate_decent(X_temp,Y_temp,max_iter = temp,lambda=lambda[i],beta_temp = beta_temp[,i] ,thresh = 1e-2)*alpha+(1-alpha)*beta_temp[,i]
beta_temp[,i] <- temp[1:p]
}
step1_time[iter+1] <- Sys.time()-t0
t1 <- Sys.time()
delta <- I%*%abs(y_multi-X%*%beta_temp)
soup.out <- SOUP(t(delta),Ks=K,type = "count",ext.prop = ext.prop,pure.prop = pure.prop)
V <- t(soup.out$memberships[[1]])
ind <- apply(V,2,function(x){return(sum(x^2)==0)})
if(sum(ind)>0){
temp <- sample(1:K,1)
V[temp,which(ind)] <- 1
}
step2_time[iter+1] <- Sys.time()-t1
t1 <- Sys.time()
V_cha[iter+2] <- sum(svd(V1-V)$d^2)
ind <- choice[1,]
for(j in 1:NROW(choice)){
if(V_cha[iter+2] > sum(svd(V1[choice[j,],]-V)$d^2) ){
V_cha[iter+2] <- sum(svd(V1[choice[j,],]-V)$d^2)
ind <- choice[j,]
}
}
V_cha[iter+2] <- sqrt(V_cha[iter+2]/sum(svd(V1)$d^2))
U <- U[,ind]
U1 <- U
Y_bottom <- Z%*%as.vector(U%*%V)
for(i in 1:K){
X_temp <- matrix(nrow = N,ncol = p)
for(j in 1:T){
X_temp[group[j]:(group[j+1]-1),] <- V[i,j] * X[group[j]:(group[j+1]-1),]
}
Y_temp <- Y-Y_bottom+X_temp%*%U[,i]
U[,i] <- lassoshooting::lassoshooting(X_temp,Y_temp,lambda = mean(lambda))$coefficients*alpha+(1-alpha)*U[,i]
}
U_cha[iter+2] <- sqrt(sum(svd(U1-U)$d^2)/sum(svd(U1)$d^2))
W_cha[iter+2] <- sqrt(sum(svd(beta_temp-U%*%V)$d^2)/sum(svd(beta_temp)$d^2))
rmse[iter+2] <- sum((Y-Z%*%as.vector(U%*%V))^2)
rmse_cha[iter+2] <- abs(rmse[iter+2]-rmse[iter+1])/rmse[iter+1]
step3_time[iter+1] <- Sys.time()-t1
iter <- iter+1
if(show == TRUE){
print(paste0("第",iter,"次循环 "," 目标函数收敛:",rmse_cha[iter+1]," 纠错步运行时间:",step1_time[iter]," Soup运行时间:",step2_time[iter]," U更新时间:",step3_time[iter]," 总时间:",Sys.time()-t0))
}
}
t1 <- Sys.time()
Y_bottom <- Z%*%as.vector(U%*%V)
for(i in 1:K){
X_temp <- matrix(nrow = N,ncol = p)
for(j in 1:T){
X_temp[group[j]:(group[j+1]-1),] <- V[i,j] * X[group[j]:(group[j+1]-1),]
}
Y_temp <- Y-Y_bottom+X_temp%*%U[,i]
if(method2=="lasso"){
lambda_U <- cv.glmnet(X_temp[,-1],Y_temp,nfolds = nfold,family="gaussian",standardize = FALSE,lambda = nlambda)$lambda.min
fit <- glmnet(X_temp[,-1],Y_temp,lambda = lambda_U,standardize = FALSE)
U[,i]<- as.array(coef(fit))[1:(p)]
}
if(method2=="scad"){
cvfit <- cv.ncvreg(X_temp[,-1],Y_temp,method="SCAD",nfolds = nfold,lambda = rev(nlambda),nlambda=length(nlambda))
fit <- cvfit$fit
U[,i] <- fit$beta[,cvfit$min]
}
}
t_cv<- t_cv+Sys.time()-t1
rmse <- c(rmse,sum((Y-Z%*%as.vector(U%*%V))^2))
l <- list(U,V,V_cha,U_cha,W_cha,sqrt(rmse),beta0,step1_time,step2_time,step3_time,t_cv)
names(l) <- c("U","V","V_cha","U_cha","W_cha","RSS","beta0","step1_time","step2_time","step3_time","CV_time")
return(l)
}
|
fa92c4ad87743a3f1bd9b057da12470e870887d4
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/funtimes/examples/notrend.test.Rd.R
|
32071fa3bafa6d707ac18524eafa9cf46cbc4d4f
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,205
|
r
|
notrend.test.Rd.R
|
library(funtimes)
### Name: notrend.test
### Title: Sieve Bootstrap Based Test for the Null Hypothesis of no Trend
### Aliases: notrend.test
### Keywords: htest trend ts
### ** Examples
## Not run:
##D # Fix seed for reproducible simulations:
##D set.seed(1)
##D
##D #Simulate autoregressive time series of length n with smooth linear trend:
##D n <- 200
##D tsTrend <- 1 + 2*(1:n/n)
##D tsNoise <- arima.sim(n = n, list(order = c(2, 0, 0), ar = c(0.5, -0.1)))
##D U <- tsTrend + tsNoise
##D plot.ts(U)
##D
##D #Use t-test
##D notrend.test(U)
##D
##D #Use Mann-Kendall test and Yule-Walker estimates of the AR parameters
##D notrend.test(U, test = "MK", ar.method = "yw")
##D
##D #Use WAVK test for the H0 of no trend, with m-out-of-n selection of the local window:
##D notrend.test(U, test = "WAVK", factor.length = "adaptive.selection")
##D # Sample output:
##D ## Sieve-bootstrap WAVK trend test
##D ##
##D ##data: U
##D ##WAVK test statistic = 21.654, moving window = 15, p-value < 2.2e-16
##D ##alternative hypothesis: (non-)monotonic trend.
##D ##sample estimates:
##D ##$AR_order
##D ##[1] 1
##D ##
##D ##$AR_coefficients
##D ## phi_1
##D ##0.4041848
## End(Not run)
|
544c5c3816615a083aa613ae86c6918433dc9c0d
|
7b390e486201ad9f4803a600eadf6bb854dd3677
|
/man/matrixToMarray.Rd
|
bf7af215334bf0eb2406f80520c1d8d196fe9f69
|
[] |
no_license
|
Sandy4321/dst-1
|
cfeca33e7b59d651fb44794951442bff3b67bff3
|
58d11374ed989d10e84864cce5acce457c7a9c2e
|
refs/heads/master
| 2020-04-23T03:57:45.865513
| 2018-12-09T02:33:01
| 2018-12-09T02:33:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,157
|
rd
|
matrixToMarray.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/matrixToMarray.R
\name{matrixToMarray}
\alias{matrixToMarray}
\title{Transformation of the tt matrix of a relation}
\usage{
matrixToMarray(rel)
}
\arguments{
\item{rel}{An object of class bcaspec, i.e. a mass function of one variable or a relation.}
}
\value{
mtt The array (product space) representation of the tt matrix.
}
\description{
The matrix representation of a relation is converted to the array representation or product space representation.
}
\examples{
wr_tt <- matrix(c(0,1,rep(0,5),rep(c(1,0),2),1,1,0,1,0,
rep(1,3),0,1,0,rep(1,6)), ncol=4, byrow = TRUE)
colnames(wr_tt) <- c("rWdy Ry", "rWdy Rn", "rWdn Ry", "rWdn Rn")
wr_spec = matrix(c(1:7, 0.0476, 0.7619, 0.1905, 0,0,0,0),
ncol = 2, dimnames = list(NULL, c("specnb", "mass")))
wr_infovar = matrix(c(4,5,2,2), ncol = 2,
dimnames = list(NULL, c("varnb", "size")) )
wr_rel <- list(tt=wr_tt, con=0.16, spec=wr_spec,
infovar=wr_infovar,
infovaluenames= list( RdWorks=c("rWdy", "rWdn") , Rain=c("Ry", "Rn")))
class(wr_rel)="bcaspec"
z <- matrixToMarray(wr_rel)
}
\author{
Claude Boivin, Stat.ASSQ
}
|
93dc20fbe82bbb6e565702b57de56244bfb4c4f6
|
6a28ba69be875841ddc9e71ca6af5956110efcb2
|
/Applied_Statistics_And_Probability_For_Engineers_by_Douglas_C._Montgomery_And_George_C._Runger/CH2/EX2.28/EX2_28.R
|
b60643fe9353584f70672ea3990e8bd4432ee90f
|
[] |
permissive
|
FOSSEE/R_TBC_Uploads
|
1ea929010b46babb1842b3efe0ed34be0deea3c0
|
8ab94daf80307aee399c246682cb79ccf6e9c282
|
refs/heads/master
| 2023-04-15T04:36:13.331525
| 2023-03-15T18:39:42
| 2023-03-15T18:39:42
| 212,745,783
| 0
| 3
|
MIT
| 2019-10-04T06:57:33
| 2019-10-04T05:57:19
| null |
UTF-8
|
R
| false
| false
| 470
|
r
|
EX2_28.R
|
#Semiconductor failures(Pg no. 47)
high = 0.20 #probability of high level of contamination
medium = 0.30 #probability of medium level of contamination
low = 0.50 #probability of low level of contamination
product_fail_given_high = 0.10
product_fail_given_medium = 0.01
product_fail_given_low = 0.001
product_fail = (product_fail_given_high*high)+(product_fail_given_medium*medium)+(product_fail_given_low*low)
cat(product_fail,"is probability that the product fails")
|
a94d1ae2500facff07c8e6bcded1e159713ece4c
|
36419342a0ca69d678a4e35e27990c6660ea0898
|
/rMODEL_PERFOMANCE_V2.R
|
9f86477c8ce8be21b0c64fdbf891c2d74a16e615
|
[] |
no_license
|
jhanratty/HarvardXDSCapstone-git
|
f0e07c16a7f0f66bbc5ba8454198b87c7e81a774
|
c9ee89a6ab54b6779082884988d66d7bdfc7b264
|
refs/heads/main
| 2023-01-02T09:17:31.625877
| 2020-10-24T13:48:49
| 2020-10-24T13:48:49
| 306,893,839
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,124
|
r
|
rMODEL_PERFOMANCE_V2.R
|
library(tidyverse)
library(dplyr)
library(stringr)
library(formatR)
library(tibble)
library(lubridate)
library(caret)
source(file="rCapFUNCTIONS.R")
###############
## Read training file and create batch statistics
#################
## get test data set
HIST_VERSION = "v25"
HISTFILE = paste("z_ModelData_", HIST_VERSION, sep="")
Results <- data.frame()
Results <- rbind(Results, GetTrainResults("zum", "_msu_test"))
saveRDS(Results, HISTFILE)
####### TABLE OF TRAINING INFORMATION
tab1 <- Results %>%
mutate(Accuracy = sprintf("%.2f%%", Accuracy),
FalsePos = sprintf("%.2f%%", FalsePos),
FalseNeg = sprintf("%.2f%%", FalseNeg),
BestTune = trBestTune) %>%
select(batch, Type = ftype, Model = model, Testset = testset, Accuracy, FalsePos, FalseNeg, Variables, Objects, trBestTune) %>% arrange(batch, Type, Model)
tab1
#cap_out <- paste("Model Training Information")
#knitr::kable(tab1, align = c("c"), caption = cap_out) %>%
# kable_styling(latex_options =
# c("striped", "left"),
# full_width = F)
##############################################
|
2cb62222a80bc4d03f156db92434dd37b3f3d0fd
|
6a28ba69be875841ddc9e71ca6af5956110efcb2
|
/Statistics_For_Business_And_Economics_by_Anderson_Sweeney_And_Williams/CH15/EX15.4a/Ex15_4a.R
|
4bd36f037b1587ea16c5ee55de411d38c2dd8acf
|
[] |
permissive
|
FOSSEE/R_TBC_Uploads
|
1ea929010b46babb1842b3efe0ed34be0deea3c0
|
8ab94daf80307aee399c246682cb79ccf6e9c282
|
refs/heads/master
| 2023-04-15T04:36:13.331525
| 2023-03-15T18:39:42
| 2023-03-15T18:39:42
| 212,745,783
| 0
| 3
|
MIT
| 2019-10-04T06:57:33
| 2019-10-04T05:57:19
| null |
UTF-8
|
R
| false
| false
| 2,078
|
r
|
Ex15_4a.R
|
# Page no. : 660 - 662
# Testing for Significance
driving_assignment <- c(1,2,3,4,5,6,7,8,9,10)
x1 <- c(100,50,100,100,50,80,75,65,90,90) # Miles Traveled
x2 <- c(4,3,4,2,2,2,3,4,3,2) # Number of Deliveries
y <- c(9.3,4.8,8.9,6.5,4.2,6.2,7.4,6.0,7.6,6.1) # Travel Time (hours)
DF <- data.frame(driving_assignment, x1, x2, y)
model <- lm(y ~ x1 + x2, data = DF)
summ <- summary(model)
SSR <- 21.6252 # Sum of Squares due to Regression
SSE <- 2.2952 # Sum of Squares due to Error
n <- nrow(DF) # Total Observations
p <- 2 # Number of Independent Variables
MSR <- SSR / p # Mean Square due to Regression
cat("Value of MSR is",MSR)
MSE <- SSE / (n - p - 1) # Mean Square due to Error
# F Test
fval <- summ$fstatistic[1]
alpha <- 0.01
# Upper Tail Test
# P-value Approach
pval <- summ$coefficients[11]
if(pval >= alpha)
{
cat("Since pval",pval,"is greater than or equal to 0.01 therefore we cannot reject the Null Hypothesis")
} else{
cat("Since pval",pval,"is less than 0.01 therefore we can reject the Null Hypothesis")
}
s <- summ$sigma # Standard Error of the Estimate
cat("Value of Standard Error of the Estimate is",s)
coeff <- as.data.frame(summ$coefficients)
b1 <- coeff$Estimate[2]
b2 <- coeff$Estimate[3]
s1 <- coeff$`Std. Error`[2] # Standard Error of the x1 (Miles Traveled)
s2 <- coeff$`Std. Error`[3] # Standard Error of the x2 (Number of Deliveries)
df <- n - p - 1 # Degrees of Freedom
# T Test
tval1 <- round(b1 / s1, 3)
tval2 <- round(b2 / s2, 3)
# Upper Tail Test
# P-value Approach
pval1 <- round(pt(tval1, df, lower.tail = F),5)
pval2 <- round(pt(tval2, df, lower.tail = F),5)
if(pval1 >= alpha && pval2 >= alpha)
{
cat("Since pval1",pval1,"and pval2",pval2,"is greater than or equal to 0.01 therefore we cannot reject the
Null Hypothesis")
} else{
cat("Since pval1",pval1,"and pval2",pval2,"is less than 0.01 therefore we can reject the Null Hypothesis")
}
|
610a7b6e58f453fe22ced2eb857fa339f498b085
|
4cdac045c50303d20908ebf3c86343adcfbf3231
|
/tests/testthat/testpcistep.R
|
1cf52a1e9d379a154755bade946bc2c20ef5dae0
|
[] |
no_license
|
timcdlucas/paleomorph
|
9d856dc362c0c016bd9e79aaa21dfd3312b6bf5e
|
681e75f367bc77e1fc454ab484a49945dda72905
|
refs/heads/master
| 2021-01-17T16:57:52.148003
| 2017-05-23T10:43:56
| 2017-05-23T10:43:56
| 62,060,244
| 3
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,101
|
r
|
testpcistep.R
|
context('Test the pci initialisation step')
library(abind)
test_that('pci centers shapes',{
cube1 <- rbind(c(1, 1, 1), c(1, -1, 1), c(-1, -1, 1), c(-1, 1, 1), c(1, 1, -1), c(1, -1, -1), c(-1, -1, -1), c(-1, 1, -1))
cube2 <- rbind(c(2, 2, 2), c(2, 0, 2), c(0, 0, 2), c(0, 2, 2), c(2, 2, 0), c(2, 0, 0), c(0, 0, 0), c(0, 2, 0))
expect_true(all(cube2 - cube1 == 1))
A <- abind(cube1, cube2, along = 3)
A <- pcistep(A, scale = FALSE)
expect_equal(A[, , 1], A[, , 2])
expect_equal(lcentroid(A[, , 1]), c(0, 0, 0))
expect_equal(lcentroid(A[, , 2]), c(0, 0, 0))
})
test_that('pci centers shapes with missing data',{
cube1 <- rbind(c(1, 1, 1), c(1, -1, 1), c(-1, -1, 1), c(-1, 1, 1), c(1, 1, -1), c(1, -1, -1), c(-1, -1, -1), c(-1, 1, -1))
cube2 <- rbind(c(2, 2, 2), c(2, 0, 2), c(0, 0, 2), c(NA, NA, NA), c(2, 2, 0), c(2, 0, 0), c(0, 0, 0), c(0, 2, 0))
A <- abind(cube1, cube2, along = 3)
A <- pcistep(A)
expect_equal(lcentroid2(A[, , 1]), c(0, 0, 0))
expect_equal(lcentroid2(A[, , 2]), c(0, 0, 0))
})
test_that('pci deals with scale properly', {
})
|
2c50749dbd735b120006b402fb535e2ce067b418
|
67914b071edac3367ada71e7f9e5cbc2b395e24a
|
/man/plot.gainstable.Rd
|
e9df62508b56cb045292ff58b6829b296b22476b
|
[] |
no_license
|
cran/ROCit
|
43da0f148b2b0967ff97c55f852f80c456ffef93
|
f84177043abd5aeb36be0aa4d50f75f67464a977
|
refs/heads/master
| 2021-07-11T10:47:04.072089
| 2020-06-14T09:20:03
| 2020-06-14T09:20:03
| 168,431,270
| 3
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 2,054
|
rd
|
plot.gainstable.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plotGainsTable.R
\name{plot.gainstable}
\alias{plot.gainstable}
\title{Plot \code{"gainstable"} Object}
\usage{
\method{plot}{gainstable}(
x,
y = NULL,
type = 1,
col = c("#BEBEBE", "#26484F", "#8B4500"),
legend = TRUE,
... = NULL
)
}
\arguments{
\item{x}{An object of class \code{"gainstable"}, created with
the function \code{\link{gainstable}}.}
\item{y}{\code{NULL}.}
\item{type}{Plot type. See "Details".}
\item{col}{Colors to be used for plot.}
\item{legend}{A logical value indicating whether legend to appear.
See "Details"}
\item{...}{\code{NULL}. Used for S3 generic/method consistency.}
}
\description{
An S3 method to make different plots using
entries of gains table.
}
\details{
Currently three types are available. \code{type = 1} shows
lift and cumulative lift against population depth. \code{type = 2}
shows response
rate and cumulative response rate against population depth.
\code{type = 3} shows
cumulative capture rate of positive responses against population depth.
For \code{type} 1 and 2, three colors and for 3, two colors
are required.
If more than required specified, then first 3 (for \code{type} 1, 2) or
2 (for \code{type} 3) colors are used. If less than required specified,
then
specified colors are repeated.
If \code{legend} is \code{TRUE},
then legend appears in the plot. For \code{type} 1 and 2, legend
position is \code{"topright"}, for 3, \code{"bottomright"}.
}
\examples{
data("Loan")
class <- Loan$Status
score <- Loan$Score
rocit_emp <- rocit(score = score, class = class, negref = "FP")
# ----------------------------------------------------------------
gtable <- gainstable(rocit_emp)
# ----------------------------------------------------------------
plot(gtable)
plot(gtable, legend = FALSE)
plot(gtable, col = 2:4)
plot(gtable, type = 2, col = 2:4)
plot(gtable, type = 3, col = 2:3)
}
\seealso{
\code{\link{gainstable}}, \code{\link{rocit}}
}
|
3123e15b72723bd43d110023c162f05312a260e3
|
c174e265381f3924de8aadf3da0c498abf74f257
|
/man/predict.snnrce.Rd
|
19da93262e1bc48c5d35e8205a5be70d006ede62
|
[] |
no_license
|
mabelc/SSC
|
4cde0c396784e17a5412de2c94b27e264f5a975f
|
4565f07e0f197e823bcea8442ed9ea82b0c94712
|
refs/heads/master
| 2022-04-05T12:12:36.306805
| 2019-12-16T20:06:18
| 2019-12-16T20:06:18
| 119,087,635
| 10
| 5
| null | 2019-12-16T20:06:19
| 2018-01-26T18:24:37
|
R
|
UTF-8
|
R
| false
| true
| 813
|
rd
|
predict.snnrce.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/SNNRCE.R
\name{predict.snnrce}
\alias{predict.snnrce}
\title{Predictions of the SNNRCE method}
\usage{
\method{predict}{snnrce}(object, x, ...)
}
\arguments{
\item{object}{SNNRCE model built with the \code{\link{snnrce}} function.}
\item{x}{A object that can be coerced as matrix.
Depending on how was the model built, \code{x} is interpreted as a matrix
with the distances between the unseen instances and the selected training instances,
or a matrix of instances.}
\item{...}{This parameter is included for compatibility reasons.}
}
\value{
Vector with the labels assigned.
}
\description{
Predicts the label of instances according to the \code{snnrce} model.
}
\details{
For additional help see \code{\link{snnrce}} examples.
}
|
066a65c2c45a8bb57aa72b15ebb7e0d8cef92892
|
5b02b75b1732b4bf06b6ab45cca4409c6d8352fa
|
/plot2.R
|
d882b4ea2fef3c70f6464c47c3916b3bf66e92a5
|
[] |
no_license
|
hrishikeshdeshpande/ExData_Plotting1
|
37a36a4b0cfeadb7d5f532129c57f467f0d071fd
|
2d5d2f8c2214cebc7cc175c2d64b67a7ed118a55
|
refs/heads/master
| 2021-01-22T16:38:00.015533
| 2015-05-06T09:12:41
| 2015-05-06T09:12:41
| 35,110,680
| 0
| 0
| null | 2015-05-05T16:21:13
| 2015-05-05T16:21:13
| null |
UTF-8
|
R
| false
| false
| 901
|
r
|
plot2.R
|
#read data
powerData<- read.table("household_power_consumption.txt",
sep=";", header=TRUE, quote= "",
strip.white=TRUE, stringsAsFactors = FALSE, na.strings= "?")
# Subset the data related to first two days of Feb 2007
ExtrPowData<- subset(powerData,
(powerData$Date %in% c("1/2/2007","2/2/2007")
)
)
# change the date column to date/time class
ExtrPowData$Date <- as.Date(ExtrPowData$Date, format = "%d/%m/%Y")
ExtrPowData$DateTime <- as.POSIXct(paste(ExtrPowData$Date, ExtrPowData$Time))
# create Plot and save to plot2.png
png("plot2.png", width=480, height= 480)
plot(ExtrPowData$DateTime,
ExtrPowData$Global_active_power,
ylab= "Global Active Power (kilowatts)", xlab= "", type="l",lwd=1)
dev.off()
|
0eee2d26cc7ca6d3710820fde6603b9486158259
|
248d5d8b6dc90a84c170106a1e58e4592cf29491
|
/man/SeerMapper2010East-package.Rd
|
fad98eb6cb4e54fc4674e88af2bbc4fe21d2b71d
|
[] |
no_license
|
cran/SeerMapper2010East
|
bcbb60e4b42bdf3b74138f13a5ec7553d292b26e
|
e34319985969113136f36c955a8c86186564f6ca
|
refs/heads/master
| 2021-08-23T09:49:17.725382
| 2021-01-12T20:00:14
| 2021-01-12T20:00:14
| 88,275,493
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,726
|
rd
|
SeerMapper2010East-package.Rd
|
\name{SeerMapper2010East-package}
\alias{SeerMapper2010East-package}
\docType{package}
\title{\var{SeerMapper}-Supplemental 2010 Census Tract East States Boundary Datasets}
\description{
The \var{SeerMapper2010East} package provides supplemental 2010 Census
Tract boundary datasets for 20 states, district and territory without Seer Registries that
are east of the Mississippi river for use with \var{SeerMapper} package.
The \var{SeerMapper2010West} data only package contains the 2010 census
tract boundary datasets for 13 states west of the Mississippi river
that do not have Seer Registries. The \var{SeerMapper2010Regs}
data only package contains the 2010 Census Tract boundary datasets
for 19 states with Seer Registries.
By loading these packages (via the library function) expands the
\var{SeerMapper} package abilities to map 2010 census tract level
data in any of the 50 states, the District of Columbia and Puerto Rico.
}
\details{
\tabular{ll}{
Package: \tab SeerMapper2010East\cr
Type: \tab Package\cr
Version: \tab 1.2.4\cr
Date: \tab 2020-06-22\cr
License: \tab GPL-2\cr
LazyLoad: \tab no\cr
}
No special action is required by the user to install this package or
load it when running \var{SeerMapper}. The defined interdependencies
of the packages automatically install the supplemental 2000 census tract
boundary packages when \var{SeerMapper} is install.
When \var{SeerMapper} is called, if it determines that one or more
supplemental packages need to be loaded, it initiates the loading for
the caller.
\preformatted{
library(SeerMapper)
rateDate <- data.frame(FIBS=stateFibs,
Rate=stateRateVector,
pValue=stateRatePValue)
SeerMapper2010(rateData, Title=c("Test Map"))
}
If the package attachment or dataset load fails, the caller is
notified of the problem and mapping is terminated.
The \var{SeerMapperEast} package contains the 2000 census tract
boundary datasets for:
\preformatted{
FIPS Name
01 Alabama
10 Delaware
11 District of Columbia
12 Florida
17 Illinois
18 Indiana
23 Maine
24 Maryland
28 Mississippi
33 New Hampshire
37 North Carolina
39 Ohio
42 Pennsylvania
44 Rhode Island
45 South Carolina
47 Tennessee
50 Vermont
51 Virginia
54 West Virginia
72 Puerto Rico
}
}
\author{
Jim Pearson \email{jbpearson353@gmail.com}\cr
Maintainer: "Joe Zou" \email{zouj@imsweb.com}
}
|
decc30f3e6f1438c84815ebc9d4716543e684153
|
08a1682949e7efa5e3b5c557bc1fc3eddb01b076
|
/remove duplicate rows (rows).R
|
b0cab079ac297c0f10a5c5219c0032f36fa74650
|
[] |
no_license
|
romanmilyanik/my-simple-examples
|
b21b69a6214fb0996805d03058817b1949d71bad
|
3015970cbe0d517fa00f74865b9d82cfeb9321dc
|
refs/heads/master
| 2021-01-23T00:35:33.061581
| 2017-10-01T13:28:28
| 2017-10-01T13:28:28
| 85,748,895
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 913
|
r
|
remove duplicate rows (rows).R
|
library(dplyr)
a <- data.frame(city = c("Istanbul","Istanbul","Istanbul", "Lviv","Lviv", "Kyiv"),
country = c("Turkey","Turkey.1","Turkey.2", "Ukraine", "Ukraine", "Ukraine"))
# city country
# Istanbul Turkey
# Istanbul Turkey.1
# Istanbul Turkey.2
# Lviv Ukraine
# Lviv Ukraine
# Kyiv Ukraine
a_2 <- a %>% distinct(city, .keep_all = T) # .keep_all = T - залишити всі стовпці
or
a_2 <- a[!duplicated(a$city),]
# city country
# Istanbul Turkey
# Lviv Ukraine
# Kyiv Ukraine
# залишити лише унікальні рядки
a_3 <- a %>% distinct
# city country
# Istanbul Turkey
# Istanbul Turkey.1
# Istanbul Turkey.2
# Lviv Ukraine
# Kyiv Ukraine
# more at:
# http://stackoverflow.com/questions/13967063/remove-duplicate-rows-in-r
|
82f23e43e6ce4d63ed58e6a1f28a72e87fb06865
|
4f0b6567a19d2babeafa616c6d8ddf5572004c37
|
/data-science-scripts/zach/sample_wild.R
|
48edcaa4c71e68c7b0661207a5011b5e5ac840c0
|
[] |
no_license
|
mcohenmcohen/DataRobot
|
a9b8fff8b0532e9066a207c1914b12702abbf48c
|
156e548d94d033b032d5027f2e436a13ddb85cf8
|
refs/heads/master
| 2022-09-01T18:14:29.587815
| 2022-08-23T20:54:59
| 2022-08-23T20:54:59
| 134,179,047
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 634
|
r
|
sample_wild.R
|
# Setup
library(data.table)
TARGET_SIZE <- 1.074e+9
size_str <- tolower(gsub(' ', '_', utils:::format.object_size(TARGET_SIZE, units='Gb'), fixed=T))
print(size_str)
N <- 92918152
# Load data
data <- fread('https://s3.amazonaws.com/datarobot_public_datasets/wild_function.csv', nrows=N)
utils:::format.object_size(object.size(data), units='Gb')
# Save data
outfile <- paste0('~/wild_function_',size_str ,'.csv')
print(outfile)
fwrite(data, outfile)
fsize <- file.size(outfile)
utils:::format.object_size(fsize, units='Gb')
# Calculate size needed
bytes_per_row = fsize/N
target_rows = TARGET_SIZE/bytes_per_row
print(target_rows)
|
041cf33ec8bfd53d1e40f42d98d7aaf21d98c8c3
|
a91155acf10add99ce490b0faa7173e88176dcc0
|
/R/r1975.r
|
d9cc60e147ce6a109e69a09fe17d6ef52b027da4
|
[] |
no_license
|
kwlee58/Region_new
|
2a3e61e71d2decfe4e4ce160646bdaaeda1b4d0c
|
526d08d5dd4077f8ebe529ed144306c41361f7c8
|
refs/heads/master
| 2021-01-10T08:01:46.150143
| 2015-10-10T02:11:38
| 2015-10-10T02:11:38
| 43,968,593
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 532
|
r
|
r1975.r
|
r1975.daisy<-daisy(r1975,stand=TRUE)
print(r1975.daisy)
r1975.diana<-diana(r1975,diss=FALSE,stand=TRUE)
print(r1975.diana)
r1975.diana.k2<-cutree(as.hclust(r1975.diana),k=2)
print(r1975.diana.k2)
r1975.diana.k3<-cutree(as.hclust(r1975.diana),k=3)
print(r1975.diana.k3)
r1975.diana.k4<-cutree(as.hclust(r1975.diana),k=4)
print(r1975.diana.k4)
jpeg("r1975.tree.jpg")
plot(r1975.diana,which.plots=2,main="dendrogram",xlab="1975")
dev.off()
jpeg("r1975.banner.jpg")
plot(r1975.diana,which.plots=1,main="banner")
dev.off()
|
3e07f000e793ec22d6fd2c0d0617a0b4e42ae7d5
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/maps/examples/maps-deprecated.Rd.R
|
862c79ec1f03e44466cbb630d1923dac9dfb02d5
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 377
|
r
|
maps-deprecated.Rd.R
|
library(maps)
### Name: maps-deprecated
### Title: DEPRECATED
### Aliases: maps-deprecated Deprecated
### ** Examples
## DEPRECATED:
# map('legacy_world', fill = TRUE, col = 1:10)
## Use mapdata::worldLores instead:
#Not run
if (requireNamespace("mapdata", quietly=TRUE) && packageVersion("mapdata") >= "2.3.0") {
map("mapdata::worldLores", fill = TRUE, col = 1:10)
}
|
eb64c746fa9b0b9fa208065c08bddda2a70bff81
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/DiPhiSeq/examples/diphiseq.Rd.R
|
5c0c83285fdacf59ebbfe4ae7264bd12f4fa7730
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 476
|
r
|
diphiseq.Rd.R
|
library(DiPhiSeq)
### Name: diphiseq
### Title: Main function. For most users, this function is all what they
### need for the analysis.
### Aliases: diphiseq
### ** Examples
countmat <- matrix(rnbinom(100, size=1, mu=50), nrow=4, ncol=25)
classlab <- c(rep(1, 10), rep(2, 15))
res <- diphiseq(countmat, classlab)
countmat <- matrix(rnbinom(100, size=1, mu=50), nrow=4, ncol=25)
classlab <- c(rep(1, 10), rep(2, 15))
res <- diphiseq(countmat, classlab, phi.ini=0.5)
|
dd86bd08139880f2703517746a49ad59806f3e7a
|
02eb2c90ba274f9c12b1bf632ab7a7767f886ac9
|
/Plot3.R
|
2e4f6be8bccad8558b78bb9b9190e485a9b5c7af
|
[] |
no_license
|
lixiaoyi315/ExData_Plotting1
|
e379870174dbd1809c7e32100502da30a07c384b
|
9dd79491719fd35db4e84e8640240e02359e8bdc
|
refs/heads/master
| 2021-01-22T01:34:46.527441
| 2016-05-18T09:39:33
| 2016-05-18T09:39:33
| 58,933,097
| 0
| 0
| null | 2016-05-16T13:07:16
| 2016-05-16T13:07:16
| null |
UTF-8
|
R
| false
| false
| 1,337
|
r
|
Plot3.R
|
#read data
mydata1<-read.table("household_power_consumption.txt",header = T,sep=";",stringsAsFactors=F)
#subset
#x:2007-02-01
x<-subset(mydata1,Date=="1/2/2007",select = Date:Sub_metering_3)
#y:2007-02-01
y<-subset(mydata1,Date=="2/2/2007",select = Date:Sub_metering_3)
#row bined
#mydataSubset=x+y
mydataSubset<-rbind(x,y)
#combine Date$Time in one columen
mydataSubset$Date<-as.Date(mydataSubset$Date,"%d/%m/%Y")
mydataSubset$Date<-as.character (mydataSubset$Date,"%d/%m/%Y")
DateTime<-paste(mydataSubset$Date,mydataSubset$Time)
mydataSubsetNew<-cbind(DateTime,mydataSubset[,-(1:2)])
class(mydataSubsetNew$DateTime)
mydataSubsetNew$DateTime<-as.character(mydataSubsetNew$DateTime)
mydataSubsetNew$DateTime<-strptime(mydataSubsetNew$DateTime,"%d/%m/%Y %H:%M:%S")
head(mydataSubsetNew$DateTime)
#plot3
opar<-par(no.readonly = T)
family="mono"
png(file="plot3.png",width = 480,height = 480)
plot(mydataSubsetNew$DateTime,mydataSubsetNew$Sub_metering_1,type = "l",ylab = "Energy sub metering",xlab=" ")
lines(mydataSubsetNew$DateTime,mydataSubsetNew$Sub_metering_2,col="red")
lines(mydataSubsetNew$DateTime,mydataSubsetNew$Sub_metering_3,col="blue")
legend.txt<-c("Sub_metering_1","Sub_metering_2","Sub_metering_3")
legend("topright",legend=legend.txt,col=c("black","red","blue"),lwd=1,lty = 5,bty = "o",cex = 0.8)
par(opar)
dev.off()
|
810a73f777155fdc7e5a34ce8796af5fe76aaa29
|
65ebf63f98038d8ce33aa3501c206c1c9c49b22c
|
/figures/scripts/plotting/plot-traject-speed-persistence.R
|
dcc56d4ef8156bb45620294fdb563c31e408272b
|
[
"MIT",
"CC-BY-4.0"
] |
permissive
|
ingewortel/2022-Tcell-evolution
|
0459ff6cbac815da0f3ba1244e5b29fd35060873
|
db24a7710037c7f951cd7dd72e5f5489f5cd5711
|
refs/heads/master
| 2023-08-04T08:45:35.267447
| 2023-07-24T18:36:56
| 2023-07-24T18:36:56
| 511,920,528
| 0
| 0
|
MIT
| 2023-07-24T18:36:58
| 2022-07-08T14:18:04
|
JavaScript
|
UTF-8
|
R
| false
| false
| 2,518
|
r
|
plot-traject-speed-persistence.R
|
library( ggplot2 )
library( dplyr, quietly = TRUE, warn.conflicts = FALSE )
require( cowplot, quietly = TRUE, warn.conflicts = FALSE )
source("../scripts/plotting/mytheme.R")
argv <- commandArgs( trailingOnly = TRUE )
datafile <- argv[1]
paramfile <- argv[2] # Must be an ordered list of mact/lact params, ordered over the trajectory points.
outfile <- argv[3]
# Read datafile
d <- read.table( datafile, header=TRUE )
# Read file with params to check
parms <- read.table( paramfile )
colnames(parms) <- c("mact","lact" )
parms$id <- paste0(parms$mact,"-",parms$lact)
parms$point <- seq(1,nrow(parms))
print(parms)
# Filter these params from data
dsum <- d %>%
group_by( mact, lact ) %>%
summarise( m_speed = mean(speed), m_persistence = mean(phalf,na.rm=TRUE), sd_speed = sd(speed), sd_persistence = sd(phalf,na.rm=TRUE) ) %>%
as.data.frame()
rownames(dsum) <- paste0(dsum$mact,"-",dsum$lact)
print(dsum)
dtraject <- dsum[ parms$id, ]
dtraject$point <- seq(1,nrow(parms))
print( dtraject )
# Plotting
pspeed <- ggplot( dtraject, aes( x = point, y = m_speed ) ) +
geom_ribbon( aes( ymin = m_speed - sd_speed, ymax = m_speed + sd_speed ), alpha = 0.3, color =NA ) +
geom_point( size = 0.8) +
geom_path() +
labs( x = "trajectory point", y = "mean speed\n(pixels/MCS)" ) +
scale_y_continuous( expand=c(0,0), limits=c(0,1.05*max(dtraject$m_speed)) ) +
mytheme + theme(
axis.title.x = element_blank(),
plot.margin = unit(c(0.3, 0.5, 0, 0.3), "cm")
)
ppersis <- ggplot( dtraject, aes( x = point, y = m_persistence ) ) +
geom_ribbon( aes( ymin = m_persistence - sd_persistence, ymax = m_persistence + sd_persistence ), alpha = 0.3, color = NA ) +
geom_point( size = 0.8 ) +
geom_path() +
labs( x = "trajectory point", y = "persistence\ntime (MCS)" ) +
scale_y_log10( expand=c(0,0), limits=c(5,10000)) +
mytheme + theme(
plot.margin = unit(c(0, 0.5, 0.3, 0.3), "cm")
)
p <- plot_grid( plotlist = list(pspeed,ppersis), labels = NULL, align = "v", ncol = 1, rel_heights=c(0.9,1))
ggsave( outfile, width = 4.5, height=6, units="cm" )
#ax.breaks <- c( seq(0.01,0.1,0.01), seq(0.2,1,0.1), seq(2,10), seq(20,100,10), seq(200, 1000, 100), seq(2000,10000,1000) )
#ax.labels <- as.character( ax.breaks )
#ax.labels[ ( seq_along( ax.breaks ) - 1 ) %% 9 != 0 ] <- ""
# align multiple plots underneath each other
#cols <- c("1"="black","2"="red","3"="forestgreen","4"="blue","5"="maroon3")
# plot.margin = unit(c(0.3, 0.5, 0, 0.3), "cm") )
#plot.margin = unit(c(0, 0.5, 0.3, 0.3), "cm")
|
1def96b9fbd43ac9ff29abc9c49d4e674d2cd2dd
|
9e2d033ee6dac8d3ff4f8f6ced1f6c7b8f383136
|
/raw_data/NetDecoder_utils.R
|
9ecdee676df52341beedbdd4f6c14281d58c0848
|
[] |
no_license
|
HuLiLab/NetDecoder_Example
|
148dab89acbaa71e55859c2e4c934252dbac77ca
|
a4cfc7c1fdf38f0f8573780e29595bce3981c7b8
|
refs/heads/main
| 2022-12-22T22:52:41.878344
| 2020-09-29T22:47:56
| 2020-09-29T22:47:56
| 299,754,702
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,775
|
r
|
NetDecoder_utils.R
|
#code from CellNet:Network biology applied to stem cell engineering, Cahan P, et al, 2014, Cell.
#In the RStudio environment, copy and paste the functions Norm_cleanExp and Norm_loadAnnotation in the R command line.
#Then, go the line starting with ## START HERE -> .CEL files processing
Norm_cleanExp<-function
### returns the gene averaged matrix
(expDat,
### exp matrix
geneTab,
### gene annotation
nameCol="symbol"
### gene ann table column name to average over
){
if(!is.matrix(expDat)){
expDat<-as.matrix(expDat);
}
# Make sure the ann table and expDat have same rows:
## altered 05-03-13
## rownames(geneTab)<-as.vector(geneTab$probe_id);
## sameProbes<-intersect(rownames(expDat), as.vector(geneTab$probe_id));
rownames(geneTab)<-as.character(geneTab$probe_id);
sameProbes<-intersect(rownames(expDat), as.character(geneTab$probe_id));
expDat<-expDat[sameProbes,];
## cat(length(sameProbes),"\n");
geneTab<-geneTab[sameProbes,];
## cat(length(sameProbes),"\n");
eids<-unique(as.vector(geneTab[,nameCol]));
uSymbols<-vector(length=length(eids));
## cat(length(eids),"\n");
ans<-matrix(nrow=length(eids), ncol=ncol(expDat));
for(i in seq(length(eids))){
eid<-eids[i];
#cat(".");
xi <- which( geneTab[,nameCol]==eid );
## desProbes <- as.vector(geneTab[xi,]$probe_id);
desProbes <- as.character(geneTab[xi,]$probe_id);
if(length(xi)>1){
ans[i,]<- apply(expDat[desProbes,], 2, mean);
}
else{
ans[i,]<-expDat[desProbes,];
}
uSymbols[i]<-as.vector(geneTab[ xi[1] ,nameCol]);
}
rownames(ans)<-uSymbols;
colnames(ans)<-colnames(expDat);
ans;
### gene averaged matrix
}
Norm_loadAnnotation<-function
### load the specified gene annotation table
(pName
### platform name
){
# Need to add code to specifically download and install proper library versions.
geneTab<-'';
if(pName=="mogene10stv1"){
pName<-"mogene10sttranscriptcluster";
}
if(pName=="hugene10stv1"){
pName<-"hugene10sttranscriptcluster";
}
libName<-paste(pName, ".db",sep='');
## cat("Loading ", libName,"\n");
x<-require(libName, character.only=TRUE);
if(FALSE){
if(!x){
source("http://bioconductor.org/biocLite.R")
biocLite(libName);
x<-require(libName, character.only=TRUE);
}
}
if(!x){
cat(".loadAnnotation\tUnable to install ",libName,"\n")
return;
}
else{
entrezCmd<-paste(pName, "ENTREZID", sep='');
idType<-'entrezgeneid';
probes<-eval(parse(text=entrezCmd));
probeTable<-links(probes);
symbolsCmd<-paste(pName,"SYMBOL", sep='');
symbols<-eval(parse(text=symbolsCmd));
symbols<-links(symbols);
geneTab<-merge(probeTable, symbols);
}
geneTab;
### table of probe_id, entrez id and gene symbols
}
|
7e4c19cc03ee80889027f27f303c93f65f5a4968
|
e7d6f4c26535a283b2a705ad95dbc5cdf175d86c
|
/Code/summarizeDataQuality.R
|
7bfc4b5da54dca5b255211dfdeaf78258e0f257e
|
[] |
no_license
|
nlenssen/cruInvestigation
|
8d7d79a4d71abe73d6a8ce17bed146c9f0b4e0d2
|
af2792166ffdf24c059d831b09094f0d0fb3e560
|
refs/heads/master
| 2020-03-29T20:23:02.846434
| 2018-10-09T01:27:06
| 2018-10-09T01:27:06
| 150,309,760
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,132
|
r
|
summarizeDataQuality.R
|
source("Code/namelist.Rnl")
###############################################################################
# Load in the relevant count Data
###############################################################################
# load in the ncdf metadata
handle <- nc_open(sprintf('%s/Raw/%s',ddir,pptFile))
lon <- ncvar_get(handle,'lon')
lat <- ncvar_get(handle,'lat')
# set up all of the necessary time objects to properly subset the data
time <- ncvar_get(handle,'time')
tFull <- seq(1901,endYear+1,length=length(time)+1)[1:length(time)]
year <- floor(tFull)
month <- rep(1:12,length(time)/12)
fullTimeMat <- cbind(year,month)
# subset based on starting year and make all of the necessary time objects
timeInds <- which(year >= startYear)
timeMat <- fullTimeMat[timeInds,]
continuousTime <- tFull[timeInds]
# grab the ppt monthly time series for the single location
counts <- ncvar_get(handle, 'stn', start=c(1,1,timeInds[1]),
count=c(-1,-1,length(timeInds)) )
nc_close(handle)
# Fill array with NAs
counts[counts==-999] <- NA
###################
# mean/median plots
###################
totalMean <- apply(counts,c(1,2),mean)
totalMedian <- apply(counts,c(1,2),median)
skew <- totalMedian - totalMean
pdf(sprintf('%s/totalMean.pdf',plotdir),10,7)
image.plot(lon,lat,totalMean,ylim=c(-60,90),main="Mean Station Count")
world(add=T)
dev.off()
pdf(sprintf('%s/totalMedian.pdf',plotdir),10,7)
image.plot(lon,lat,totalMedian,ylim=c(-60,90),main="Median Station Count")
world(add=T)
dev.off()
pdf(sprintf('%s/totalSkewness.pdf',plotdir),10,7)
zmax <- max(skew,na.rm=T)
zr <- c(-zmax,zmax)
image.plot(lon,lat,skew,ylim=c(-60,90),zlim=zr,main="Median - Mean",col=redBlue())
world(add=T)
dev.off()
##############################################
# Linear trend (over entire time period) plots
##############################################
regSlope <- function(ts,time = continuousTime){
if(!all(is.na(ts))){
fit <- lm(ts ~ time)
return(coef(fit)[2])
} else{
return(NA)
}
}
fullSlope <- apply(counts,c(1,2),regSlope)
# draw the plot
pdf(sprintf('%s/fullSlope.pdf',plotdir),10,7)
zmax <- max(abs(fullSlope),na.rm=T)
zr <- c(-zmax,zmax)
image.plot(lon,lat,fullSlope,ylim=c(-60,90),zlim=zr,main="Linear Trend of Station Count (1950-2016)",col=redBlue())
world(add=T)
dev.off()
##############################################
# Make a plot for each decade
##############################################
decadeStart <- seq(1950,2010,by=10)
decadalMean <- decadalMedian <- decadalSlope <-
array(NA, dim=c(length(lon),length(lat),length(decadeStart)))
for(i in 1:(length(decadeStart))){
timeStart <- which(continuousTime==decadeStart[i])
if(decadeStart[i] < 2010){
timeEnd <- which(continuousTime==decadeStart[i+1])-1
} else{
timeEnd <- length(continuousTime)
}
subInds <- timeStart:timeEnd
# subset the data and compute the necessary functions
subDat <- counts[,,subInds]
decadalMean[,,i] <- apply(subDat,c(1,2),mean)
decadalMedian[,,i] <- apply(subDat,c(1,2),median)
decadalSlope[,,i] <- apply(subDat,c(1,2),regSlope,time=continuousTime[subInds])
}
# make all the plots
for(i in 1:length(decadeStart)){
pdf(sprintf('%s/decadal/totalMean%1.0f.pdf',plotdir,i),10,7)
image.plot(lon,lat,decadalMean[,,i],ylim=c(-60,90),
main=sprintf("Mean Station Count (%4.0fs)",decadeStart[i]))
world(add=T)
dev.off()
pdf(sprintf('%s/decadal/totalMedian%1.0f.pdf',plotdir,i),10,7)
image.plot(lon,lat,decadalMedian[,,i] ,ylim=c(-60,90),
main=sprintf("Median Station Count (%4.0fs)",decadeStart[i]))
world(add=T)
dev.off()
pdf(sprintf('%s/decadal/totalSkewness%1.0f.pdf',plotdir,i),10,7)
skew <- decadalMedian[,,i] - decadalMean[,,i]
zmax <- max(skew,na.rm=T)
zr <- c(-zmax,zmax)
image.plot(lon,lat,skew,ylim=c(-60,90),zlim=zr,
main=sprintf("Median - Mean (%4.0fs)",decadeStart[i]),col=redBlue())
world(add=T)
dev.off()
pdf(sprintf('%s/decadal/fullSlope%1.0f.pdf',plotdir,i),10,7)
zmax <- max(abs(decadalSlope[,,i]),na.rm=T)
zr <- c(-zmax,zmax)
image.plot(lon,lat,decadalSlope[,,i],ylim=c(-60,90),zlim=zr,
main=sprintf("Linear Trend of Station Count (%4.0fs)",decadeStart[i]),col=redBlue())
world(add=T)
dev.off()
}
# Make the difference plot between the 1970s and the 1990s 2000s and 2010s
meanDiff90 <- decadalMean[,,5] - decadalMean[,,3]
meanDiff00 <- decadalMean[,,6] - decadalMean[,,3]
meanDiff10 <- decadalMean[,,7] - decadalMean[,,3]
zmax <- max(abs(meanDiff90),abs(meanDiff00),abs(meanDiff10),na.rm=T)
zr <- c(-zmax,zmax)
pdf(sprintf('%s/decadalDifference1990.pdf',plotdir,i),10,7)
image.plot(lon,lat,meanDiff90,ylim=c(-60,90),zlim=zr,
main="Change in Mean Coverage from 1970s to 1990s",col=redBlue())
world(add=T)
dev.off()
pdf(sprintf('%s/decadalDifference2000.pdf',plotdir,i),10,7)
image.plot(lon,lat,meanDiff00,ylim=c(-60,90),zlim=zr,
main="Change in Mean Coverage from 1970s to 2000s",col=redBlue())
world(add=T)
dev.off()
pdf(sprintf('%s/decadalDifference2010.pdf',plotdir,i),10,7)
image.plot(lon,lat,meanDiff10,ylim=c(-60,90),zlim=zr,
main="Change in Mean Coverage from 1970s to 2010s",col=redBlue())
world(add=T)
dev.off()
|
aee4ca9dbbea4a992878d85d62a7cca4abc1708f
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/driftR/examples/dr_correctOne.Rd.R
|
69193acd97fa96910bfe0de6ff56f519600cc414
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 675
|
r
|
dr_correctOne.Rd.R
|
library(driftR)
### Name: dr_correctOne
### Title: One-point drift correction
### Aliases: dr_correctOne
### ** Examples
testData <- data.frame(
Date = c("9/18/2015", "9/18/2015", "9/18/2015", "9/18/2015", "9/18/2015", "9/18/2015"),
Time = c("12:10:49", "12:15:50", "12:20:51", "12:25:51", "12:30:51", "12:35:51"),
Temp = c(14.76, 14.64, 14.57, 14.51, 14.50, 14.63),
SpCond = c(0.754, 0.750, 0.750, 0.749, 0.749, 0.749),
corrFac = c(0.0000000, 0.2003995, 0.4007989, 0.6005326, 0.8002663, 1.0000000),
stringsAsFactors = FALSE
)
dr_correctOne(testData, sourceVar = SpCond, cleanVar = SpCond_Corr,
calVal = 1.05, calStd = 1, factorVar = corrFac)
|
8671b552240b602a76b77cd12df3504da16799f1
|
a59c25d1665ca92ac8a149d5ad26bd4573cc51d3
|
/bigdata-playcount_knn.R
|
d24d80ff4a684ab84f3b3e16d1dd701da3e05981
|
[] |
no_license
|
sachsm53/msd_mmt_emotion
|
7e596daae97141b04b2b7b0eca0fae145ffc412e
|
51fff629ffe38ff5da89b2ab6323a42e402a8684
|
refs/heads/master
| 2021-07-25T21:12:05.758140
| 2017-11-07T06:05:31
| 2017-11-07T06:05:31
| 106,644,720
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 12,018
|
r
|
bigdata-playcount_knn.R
|
#Big Data Project - Full Dataset
raw = read.csv(file.choose())
attach(data)
sad <- as.factor(sad)
data$sad <- as.factor(data$sad)
data$playcount = 0
#take only complete cases
raw.comp = raw[complete.cases(raw),]
dim(raw.comp) #number of complete cases: 340,826
dim(raw.comp[raw.comp$sad == 1,]) #number of sad songs: 13,762
#find unique cases and duplicates
msd <- raw.comp[!duplicated(raw.comp[,3]),] #remove all cases where the song ID is the same
dim(msd) #now the number of cases is 340,192
dim(msd[msd$sad == 1,]) #now, the number of sad songs is 13,617
#make sure that variable are correct
class(msd$sad)
msd$sad <- as.factor(msd$sad)
msd$beautiful <- as.factor(msd$beautiful)
msd$happy <- as.factor(msd$happy)
msd$key <- as.factor(msd$key)
msd$mode <- as.factor(msd$mode)
summary(msd)
#find and remove cases where tempo = 0
length(which(msd$tempo == 0))
msd[237,]
subset = which(msd$tempo == 0)
#row_sub = apply(msd, 1, function(row) all(row !=0 ))
#msd.sub = msd[apply(msd[,9],1,function(x) !all(x==0)),]
#msd.sub = apply(msd,1,function(x)msd[!subset,]
#msd.sub = msd[rowSums(msd[, -1] > 0) != 0, ]
msd.sub = msd[-subset,]
length(which(msd.sub$tempo == 0))
msd <- msd.sub
summary(msd)
#find cases where familiarity is 0
length(which(msd$familiarity == 0))
which(msd$familiarity == 0)
msd[718,]
#balance the dataset by undersampling the nonzero cases
indx <- which(msd$sad == 0)
sad_num <- dim(msd[msd$sad == 1,])[1]
set.seed(12)
msdNS <- msd[sample(indx,sad_num),]
msdS <- msd[msd$sad == 1,]
summary(msdNS)
dim(msdS)
dim(msdNS)
msdSamp <- rbind(msdNS,msdS)
dim(msdSamp)
prop.table(table(msdSamp$sad))
summary(msdSamp)
#divide the data into training and test based on an 80/20 split
num = round((dim(msdSamp)[1])*(.8))
num #training = 271,595
set.seed(13)
train = sample(dim(msdSamp)[1],num)
msdSamp.train = msdSamp[train,]
table(msdSamp.train$sad) #there are 10,900 sad songs in the training set
sad_num <- dim(msdSamp.train[msdSamp.train$sad == 1,])[1]
msdSamp.test = msdSamp[-train,]
dim(msdSamp.test) #test split = 67,899
class(msdSamp.test$sad)
prop.table(table(msdSamp.train$sad))
prop.table(table(msdSamp.test$sad))
#Dealing with unbalanced data
#the number of songs classified as sad is 83/3169
#therefore, we need to oversample the unrepresented cases
#One way of doing this is to use SMOTE
install.packages("DMwR")
library("DMwR")
msdSmote <- msd.train
ncols<-ncol(msdSmote)
msdSmote<-cbind(msdSmote[2:ncols],msdSmote[1])
msdSmote.train<-SMOTE(sad~familiarity+hotness+tempo+duration+mode+key+loudness,msd.train,perc.over = 100,perc.under=200) #undersample the non-sad cases
#msd.smote<-SMOTE(sad~familiarity+hotness+tempo+duration+mode+key+loudness,msdSmote,k=5,perc.over = 1400,perc.under=140) #oversample the sad cases
table(msd.smote$sad) #now the data is 21,802 not sad,
table(msdSmote.train$sad)
dim(msd.smote)
msdSmote<-cbind(msd.smote[ncols],msd.smote[1:ncols-1]) #now there are equal number of sad and not sad
#Another way of dealing with assymmetric class size
#is to incrase the weight of certain factors
m <- svm(x, y, class.weights = c(A = 0.3, B = 0.7)) #this weights B more than A
#run the Support Vector Machine using linear kernel
install.packages("e1071")
library("e1071")
set.seed(1234)
lsvm.sad=svm(sad~familiarity+hotness+tempo+duration+mode+key+loudness,data=msdSamp.train, kernel="linear",cost=1)
plot(svm.sad,msdSmote.train)
summary(lsvm.sad)
train.pred = predict(lsvm.sad,msdSamp.train)
table(msdSamp.train$sad,train.pred) #Percent correct 15949/21,766 = 73.27% (WORSE THAN RADIAL)
test.pred = predict(lsvm.sad,msdSamp.test)
table(msdSamp.test$sad,test.pred) #Percent correct: 3958/5442, 72.7% (WORSE THAN RADIAL)
# Obtain feature weights
weights.sad.linear = t(lsvm.sad$coefs) %*% lsvm.sad$SV
weights.sad.linear
#run SVM using linear kernel with SCALED and CENTERED DATA
install.packages("e1071")
library("e1071")
set.seed(1234)
install.packages("caret")
library(caret)
msdSamp.train$tempo <-scale(msdSamp.train$tempo) #Method 1
preObj1 <- preProcess(msdSamp.train2[,c(0:4,7)], method=c("center", "scale")) #Method 2
msdNorm.train <- predict(preObj1,msdSamp.train2[,c(0:4,7)])
sd(msdNorm.train$loudness)
summary(msdNorm.train)
msdNorm.train <- cbind(msdNorm.train,msdSamp.train2$mode,msdSamp.train2$key,msdSamp.train2$sad)
names(msdNorm.train)[8] <- "sad"
names(msdNorm.train)[7] <- "key"
names(msdNorm.train)[6] <- "mode"
preObj2 <- preProcess(msdSamp.test[,c(6,8,9,11,12)], method=c("center", "scale")) #Method 2
msdNorm.test <- predict(preObj2,msdSamp.test[,c(6,8,9,11,12)])
sd(msdNorm.test$loudness)
summary(msdNorm.test)
dim(msdNorm.test)
msdNorm.test <- cbind(msdNorm.test,msdSamp.test$mode,msdSamp.test$key,msdSamp.test$sad)
names(msdNorm.test)[8] <- "sad"
names(msdNorm.test)[7] <- "key"
names(msdNorm.test)[6] <- "mode"
class(msdNorm.train$sad)
class(msdNorm.test$sad)
lsvm.norm=svm(sad~familiarity+hotness+tempo+duration+mode+key+loudness,data=msdNorm.train, kernel="linear",cost=100)
summary(lsvm.norm)
train.pred = predict(lsvm.norm,msdNorm.train)
table(msdNorm.train$sad,train.pred) #Percent correct 15947/21,766 = 73.27% (SAME AS BEFORE NORM)
test.pred = predict(lsvm.norm,msdNorm.test)
table(msdNorm.test$sad,test.pred) #Percent correct: 3957/5442, 72.7% (SAME AS BEFORE NORM)
# Obtain feature weights
weights.norm.linear = t(lsvm.norm$coefs) %*% lsvm.norm$SV
weights.norm.linear <- as.data.frame(weights.norm.linear)
#ROC Curves
install.packages("pROC")
library(pROC)
auc <- roc(msdSamp.test$sad,test.pred)
library (ROCR)
rocplot =function (pred , truth , ...){
predob = prediction(pred,truth)
perf = performance(predob,"tpr","fpr")
plot(perf ,...)}
svmsad.opt=svm(sad~familiarity+hotness+tempo+duration+mode+key+loudness,data=msdSamp.train, kernel="radial",gamma=1,cost=1,decision.values=T)
fitted = attributes(predict(svmsad.opt,msdSamp.train,decision.values =TRUE))$decision.values
par(mfrow =c(1,2))
rocplot(fitted,msdSamp.train$sad, main="Training Data")
fitted = attributes(predict(svmsad.opt,msdSamp.test,decision.values =TRUE))$decision.values
rocplot(fitted,msdSamp.train$sad,main=" Training Data")
rocplot(fitted,msdSamp.test$sad,add=T,col="red")
#make the training set with only the variables that we are interested in
keeps <- c("familiarity","hotness","tempo","duration","mode","key","loudness","sad")
msdSamp.train2 <- msdSamp.train[keeps]
msdSamp.test2 <- msdSamp.train[keeps]
head(msdSamp.train2)
#cross validation to find the best choice for lambda and cost
set.seed(11)
tune.rad = tune(svm,sad~.,data=msdSamp.train,kernel="radial",ranges = list(cost=c(0.1,1,10,100),gamma=c(0.5,1,2,3)))
tune.out = tune.svm(sad~., data = msdSmote.train, kernel = "radial", ranges = list(cost=c(0.1,1,10,100,1000),gamma=c(0.5,1,2,3,4)))
summary(tune.out)
svmbest=svm(sad~familiarity+hotness+tempo+duration+mode+key+playcount,data=msdSamp.train, kernel="radial",gamma=tune.out$best.parameters$gamma,cost=tune.out$best.parameters$cost, cross = 10)
summary(svmbest)
train.pred = predict(svmbest,msdSamp.train)
table(msdSamp.train$sad,train.pred)
test.pred = predict(svmbest,msdSamp.test)
table(msdSamp.test$sad,test.pred)
#Try SVM using Caret Package
install.packages('caret')
library(caret)
tunegrid <- data.frame(.sigma = sigDist[1], .C = 2^(-2:7))
control <- trainControl(method="repeatedcv", number=10, repeats=3,classProbs=TRUE,summaryFunction=twoClassSummary)
set.seed(825)
svm.caret <- train(sad~familiarity+hotness+tempo+duration+mode+key+loudness, data=msdSamp.train, method="svmRadial", trControl=control, tunelength = 8, metric = "ROC")
testProbs <- predict(svm.caret, newdata = msdSamp.test, type = "prob")
confusionMatrix(data = plsClasses, testing$Class)
#KNN
library(class)
attach(msdSamp.train)
attach(msdSamp.test)
train.x = msdSamp.train[,keeps]
train.x <- train.x[,0:7]
train.sad <- msdSamp.train$sad
test.x = msdSamp.test[,keeps]
test.x <- test.x[,0:7]
train.x = msdNorm.train[,keeps]
train.x <- train.x[,0:7]
train.sad <- msdNorm.train$sad
test.x = msdNorm.test[,keeps]
test.x <- test.x[,0:7]
set.seed(12345)
knn.pred1=knn(train.x,test.x,train.sad,k=1)
table(knn.pred,msdNorm.test$sad)
mean(knn.pred==msdNorm.test$sad)
knn.pred2=knn(train.x,test.x,train.sad,k=2)
table(knn.pred2,msdSamp.test$sad)
mean(knn.pred2==msdSamp.test$sad)
knn.pred3=knn(train.x,test.x,train.sad,k=3)
table(knn.pred3,msdSamp.test$sad)
mean(knn.pred3==msdSamp.test$sad)
knn.pred5=knn(train.x,test.x,train.sad,k=5)
table(knn.pred5,msdSamp.test$sad)
mean(knn.pred5==msdSamp.test$sad)
knn.pred10=knn(train.x,test.x,train.sad,k=200) #best appears to be with k=200 and its only 57% accurate
table(knn.pred10,msdSamp.test$sad)
mean(knn.pred10==msdSamp.test$sad)
#Linear Regression/Logistic Regression with playcount for sad songs
install.packages('languageR')
library(languageR)
install.packages('lme4')
library(lme4)
s.h = which(msd$sad==1 & msd$happy ==1)
summary(msd)
#take only the ones that are sad and not sad and happy together
#find examples of both sad and happy
s.h = which(msd$sad==1 & msd$happy ==1)
sad_hap = tweets[s.h,2:3]
msd.hs = msd[-s.h,] #take only cases that are not both sad and happy
msd.hs$valence <- ifelse(msd.hs$sad == 1, "1",
ifelse(msd.hs$happy == 1, "0","NA"))
msd.hs$valence <- as.factor(msd.hs$valence)
summary(msd.hs)
which(msd.hs$sad==1 & msd.hs$happy ==1)
tweets.hs = tweets.hs[(tweets.hs$valence==0 | tweets.hs$valence == 1),]
dim(tweets.hs)
#Predicting playcount
lm.fit <- lm(playcount~sad, data = msd.hs)
summary(lm.fit)
lm.fit <- lm(playcount~beautiful, data = msd.hs)
summary(lm.fit)
lm.fit <- lm(playcount~duration+mode+tempo+loudness, data = msd.hs)
summary(lm.fit)
lm.fit <- lm(playcount~duration+mode+tempo+loudness, data = msd.hs)
summary(lm.fit)
lm.fit1 <- lm(playcount~familiarity+hotness+duration+mode+tempo+loudness, data = msd.hs)
summary(lm.fit1)
lm.fit2 <- lm(playcount~sad+beautiful+happy, data = msd.hs)
summary(lm.fit2) #all three are significant, suggesting that just tagging a song as something is associated with higher play count
anova(lm.fit,lm.fit1)
#select only sad songs, and look at the linear regression for play count
sad <- which(msd.hs$sad == 1)
head(sad)
msd.sad <- msd.hs[sad,]
summary(msd.hs$sad)
summary(msd.sad)
lm.sad <- lm(playcount~familiarity+hotness+duration+mode+tempo+loudness, data = msd.sad)
summary(lm.sad) #only loudness was significant with playcount
lm.sad <- lm(playcount~duration+mode+tempo+loudness+key, data = msd.sad)
summary(lm.sad)
lm.sad <- lm(playcount~familiarity+hotness+beautiful+loudness+duration+tempo, data = msd.sad)
summary(lm.sad)
lm.sad <- lm(playcount~beautiful, data = msd.sad)
summary(lm.sad) #songs tagged beautiful are more likely to have higher play count (when already tagged sad)
lm.sadall <- lm(playcount~beautiful+duration+mode+tempo+loudness, data =msd.sad)
summary(lm.sadall)
#with key included: Multiple R-squared: 0.007287, Adjusted R-squared: 0.005974
#with key not included: Multiple R-squared: 0.006845, Adjusted R-squared: 0.006435
anova(lm.sadall,lm.sad)
anova(lm.sad,lm.sadall)
#run a SVM regression and regression tree
#split msd.sad into train and test
num = round((dim(msd.sad)[1])*(.8))
num #training = 9,690
set.seed(13)
train = sample(dim(msd.sad)[1],num)
msdSad.train = msd.sad[train,]
msdSad.test = msd.sad[-train,]
dim(msdSad.test) #test split = 2,422
class(msdSnorm.test$beautiful)
prop.table(table(msdSad.train$beautiful))
prop.table(table(msdSad.test$beautiful))
summary(msdSad.train)
str(msdSad.train)
str(msdSad.test)
library(randomForest)
set.seed (1)
bag.count = randomForest(playcount~familiarity+hotness+tempo+duration+mode+key+loudness+beautiful,data=msdSad.train,mtry=3,importance =TRUE)
plot(bag.count) #out of bag (OOG) error estimate
importance(bag.count)
yhat.bagsad = predict(bag.count,newdata=msdSad.test)
mean((yhat.bagsad - msdSad.test$playcount)^2)
par(mfrow=c(2,1))
par(pty="s")
varImpPlot(bag.count, type=1, pch=19, col=1, cex=.5, main="")
varImpPlot(bag.count, type=2, pch=19, col=1, cex=.5, main="")
|
4d0f91c44dfdc4da0e4ed739e38e11b738632b85
|
b1403a419dc4687ca20a7ef5f0bfa2416604ae9e
|
/CTDLGT_R/BT06_CaiDatDanhSachLienKetDon.R
|
2b888c1a1f9d7d7f26e4b1a93a78c66c1f1a5c9b
|
[] |
no_license
|
NguyenPhuoc1207/Do_An_CTDL
|
0ed0e46960f03bea1588d27592afb7683de96bbc
|
293b92a755c8d9db8a60c308a7e5c5022a7551fb
|
refs/heads/master
| 2023-06-22T03:48:55.147042
| 2021-07-17T10:04:56
| 2021-07-17T10:04:56
| 386,885,364
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 230
|
r
|
BT06_CaiDatDanhSachLienKetDon.R
|
lst <- list() # creates an empty (length zero) list
lst[[1]] <- 3 # automagically extends the lst
lst[[2]] <- 4 # ditto
lst
elist <- list(vec=1:4,df=data.frame(a=1:3, b=4:6),mat=matrix(1:4, nrow=2), name="pks")
elist[["vec"]]
|
7422cd9eed8ed956083c8557a3ff17d571ee21f4
|
c970de6ba932cb4322acf4da4250a4ebc955b266
|
/R/regex-methods.R
|
91dfb75a06c2fe9ef35aa2028112ad07b234f3f8
|
[] |
no_license
|
trinker/regex
|
bfd7798f671726122d03db0a8a63836466dee8dc
|
22245e85853692c8597653e936263a99fe81cf28
|
refs/heads/master
| 2021-01-24T20:53:23.954837
| 2014-12-22T12:33:21
| 2014-12-22T12:33:21
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,464
|
r
|
regex-methods.R
|
#' Convert or test for regex objects
#'
#' \code{as.regex} gives objects the class \code{"regex"}. \code{is.regex} tests
#' for objects of class \code{"regex"}.
#' @param x An object to test or convert.
#' @return \code{as.regex} returns the inputs object, with class
#' \code{c("regex", "character")}.
#' \code{is.regex} returns \code{TRUE} when the input inherits from class
#' \code{"regex"} and \code{FALSE} otherwise.
#' @export
as.regex <- function(x)
{
structure(x, class = c("regex", "character"))
}
#' @rdname as.regex
#' @export
is.regex <- function(x)
{
inherits(x, "regex")
}
#' Create a regex
#'
#' Creates a regex object.
#' @param ... Passed to \code{paste0}.
#' @return An object of class \code{regex}.
#' @note This works like \code{paste0}, but the returns value has class
#' \code{c("regex", "character")}.
#' @seealso \code{\link[base]{paste0}}, \code{\link{as.regex}}
#' @export
regex <- function(...)
{
as.regex(paste0(...))
}
#' Print or format regex objects
#'
#' Prints/formats objects of class \code{regex}.
#' @param x A regex object.
#' @param ... Passed from other format methods. Currently ignored.
#' @return \code{format.regex} returns a character vector. \code{print.regex}
#' is invoked for the side effect of printing the regex object.
#' @export
format.regex = function(x, ...)
{
paste0("<regex> ", x)
}
#' @rdname format.regex
#' @export
print.regex = function(x, ...)
{
cat(format(x, ...), sep = "\n")
}
|
0fc835fabfa4ebb22d41a7522d65d059d471f6c7
|
60154d631a8d50b54fe6a83b3d5911d7ef14ca1a
|
/man/schisto_mod.Rd
|
08219c4864eaf13843fb3ca049b1ac87720f5162
|
[
"MIT"
] |
permissive
|
cmhoove14/DDNTD
|
afe2176b2634c6f2a5702ad05bca52d5dd1cfb58
|
55523725be8d5b560d36395ee1bee0bced9ce8a7
|
refs/heads/master
| 2020-06-08T04:42:56.671145
| 2019-11-20T19:24:43
| 2019-11-20T19:24:43
| 193,160,044
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 959
|
rd
|
schisto_mod.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/schisto_dynamic_models.R
\name{schisto_mod}
\alias{schisto_mod}
\title{Barebones schistosomiasis model}
\usage{
schisto_mod(t, n, pars)
}
\arguments{
\item{t}{Vector of timepoints to return state variable estiamtes}
\item{n}{Vector of state variable initial conditions}
\item{pars}{Named vector of model parameters}
}
\value{
A matrix of the state variables at all requested time points
}
\description{
A dynamic schistosomiasis model with SEI snail infection dynamics, a single mean worm burden population,
negative (crowding induced reductions in fecundity)
and positive (mating limitation) density dependencies and functionality
to simulate mass drug administration, snail control, and other interventions.
Note this is a function that is wrapped into `sim_schisto_base_mod`` which
should be used to simulate the model, this function is fed into the ode solver
from deSolve
}
|
e6db9a276fbf2bf9342b7eb7497c790f01d35fd4
|
384c3dbc571be91c6f743d1427dec00f13e0d8ae
|
/r/kernels/revolution4all-titanic-machine-learning-from-disaster-3/script/titanic-machine-learning-from-disaster-3.R
|
0ba0b28e4e1eb464a6eaaa802e778e1f77600ad6
|
[] |
no_license
|
helenaK/trustworthy-titanic
|
b9acdd8ca94f2fa3f7eb965596eed4a62821b21e
|
ade0e487820cf38974561da2403ebe0da9de8bc6
|
refs/heads/master
| 2022-12-09T20:56:30.700809
| 2020-09-10T14:22:24
| 2020-09-10T14:22:24
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,108
|
r
|
titanic-machine-learning-from-disaster-3.R
|
#Read data into R enviorment
titanicData<-read.csv(file="../input/train.csv",header = TRUE,sep = "," )
#explicitly declare atribute "survived" as factor, by default R treats this as numeric
titanicData$Survived<-factor(titanicData$Survived)
#set libary randomforest to access built in functions
library(randomForest)
#perform missing value treatment on the data
titanicData2<-na.roughfix(titanicData)
#slit the data into test and evaluation set
ind <- sample(2, nrow(titanicData2),replace = TRUE,prob = c(0.8,0.2))
#test data - 80% sample of the orginal data
tdata <-titanicData2[ind==1,]
#validation data - 20% sample of the orginal data
vdata <-titanicData2[ind==2,]
#declare prediction variable to train on the test data
#assumption has been made that pclass, age and sex are important attributes that helped survive
fcn=randomForest(Survived~Pclass+Sex+Age ,data=tdata,mtry=3,ntree=100)
#make the preiction on validation set based on learning from test data
predicted<- predict(fcn,newdata=vdata,type="class")
#convert predicted from atomic vector into data frame
predicted <- as.data.frame(predicted)
#add predicted column to vdata to compare the prediction with actuals
vdata <- cbind(vdata, predicted)
#compute accurcy as no. of coorect prediction to total number of predcitions
accuracy_rf <- sum(predicted$predicted == vdata$Survived)/NROW(vdata)
accuracy_rf
#identify the important factors that helped in survival
importance(fcn)
#the accuracy was 72.88% and the most important factors helping in survival
#was identified to be "sex" followed by "age" and "Pclass"
#Approach2 -since the accuracy was only 72.88% lets try adding
#more attributes to the above to make the predictions more accuarte
#define variable fmem as numebr of family members(siblings+parents)
#add it to test and validation sets
tdata2<-cbind(tdata,fmem=tdata$SibSp+tdata$Parch)
vdata2<-cbind(vdata,fmem=vdata$SibSp+vdata$Parch)
#define prediction variable as
fcn2=randomForest(Survived~Pclass+Sex+Age+fmem ,data=tdata2,mtry=4,ntree=100,importance=TRUE)
predicted2<- predict(fcn2,newdata=vdata2,type="class")
predicted2 <- as.data.frame(predicted2)
vdata2 <- cbind(vdata2, predicted2)
accuracy_rf2 <- sum(predicted2$predicted2 == vdata2$Survived)/NROW(vdata2)
accuracy_rf2
importance(fcn2)
#the accuracy improved slightly from 72.88% tp 74% by adding
#family member attribute to prediction
#the most important factors helping in survival
#was identified to be "sex" followed by "age", "Pclass" and fmem
#make submission to titanic contest
write.csv(vdata2, file = "random_forest_r_submission.csv", row.names=FALSE)
imp <- importance(fcn2, type=1)
featureImportance <- data.frame(Feature=row.names(imp), Importance=imp[,1])
library(ggplot2)
p <- ggplot(featureImportance, aes(x=reorder(Feature, Importance), y=Importance)) +
geom_bar(stat="identity", fill="#53cfff") +
coord_flip() +
theme_light(base_size=20) +
xlab("") +
ylab("Importance") +
ggtitle("Random Forest Feature Importance\n") +
theme(plot.title=element_text(size=18))
ggsave("2_feature_importance.png", p)
|
cdd083939127d02a50f91c0f6a724379fc177242
|
f441b443f1e959ce33009b9d9c6573ad07163991
|
/viz/prepare_truth_data.R
|
18b26be8f00bdf2f927661c6ea77022dba06a0c0
|
[
"MIT"
] |
permissive
|
pmarcosa/covid19-forecast-hub-europe
|
58a786c4c09561271d792de5eff1ff8598ceb7be
|
30273785d2a3393ff133c5b1fff7cac034e83aa2
|
refs/heads/main
| 2023-08-15T22:17:02.326313
| 2021-09-21T21:31:00
| 2021-09-21T21:31:00
| 365,716,002
| 0
| 0
|
NOASSERTION
| 2021-05-09T09:35:42
| 2021-05-09T09:35:42
| null |
UTF-8
|
R
| false
| false
| 1,227
|
r
|
prepare_truth_data.R
|
library(dplyr)
library(here)
library(readr)
df_case <- read_csv(here("data-truth/JHU/truth_JHU-Incident Deaths.csv")) %>%
rename(inc_death = value)
df_death <- read_csv(here("data-truth/JHU/truth_JHU-Incident Cases.csv")) %>%
rename(inc_case = value)
df_hosp <- read_csv(here("data-truth/ECDC/truth_ECDC-Incident Hospitalizations.csv")) %>%
rename(inc_hosp = value)
df <- full_join(df_case, df_death, by = c("date", "location", "location_name"))
df <- full_join(df, df_hosp, by = c("date", "location", "location_name"))
df <- df %>%
# add epi weeks for aggregation
mutate(date = lubridate::ymd(date),
epi_week = lubridate::epiweek(date),
epi_year = lubridate::epiyear(date)) %>%
group_by(location, location_name, epi_year, epi_week) %>%
# aggregate to weekly incidence
summarise(date = max(date),
inc_death = sum(inc_death),
inc_case = sum(inc_case),
inc_hosp = sum(inc_hosp)) %>%
ungroup() %>%
# only keep Saturdays
filter(lubridate::wday(date, label = TRUE) == "Sat") %>%
# reformat
select(date, location, location_name, inc_case, inc_death, inc_hosp) %>%
arrange(date, location)
write_csv(df, "viz/truth_to_plot.csv", quote = "needed")
|
8b06f1f509c215110d3ee0cd54c8c15a14920a70
|
573611c1c4fb728749abc203bcb6ebdd229bf394
|
/Calculate_AlternateSumPCBs.R
|
bcc3f38f233f23881482cea3f22b33b1533fcbfc
|
[] |
no_license
|
ppgibson/MSQ_DataPaper_Anl
|
6dc96edd3dbb76aaec8960e1008e67cf58b14ce9
|
b1e66642d7172907d56f6c4cedaa2cb26e92897e
|
refs/heads/master
| 2021-01-10T04:39:42.958811
| 2016-02-19T21:48:17
| 2016-02-19T21:48:17
| 48,077,958
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,235
|
r
|
Calculate_AlternateSumPCBs.R
|
##################################################################
## MSQ Data paper, data analysis: ##
## Generate sum-PCB values for each sample according to several ##
## different approaches to handling censored data ##
##################################################################
#
# Four different approaches to handling censored data:
# 1. substitute zero (standard approach)
# 2. substitute half the detection limit
# 3. estimate a sample sum-PCB value using the Kaplan-Meier approach,
# as described by Helsel (2009)
# 4. estimate concentrations for censored data based on the concentration
# of a set of common congeners
# (*for now, this method is applied to 2012-13 spider samples only)
#
# Input: \congeners\ df from setup
#
# Output- in the Output folder, a .csv table with a sum-PCB conc for
# each sample according to the four methodsin the output folder:
# - AllSampleData_CompareSums.csv
# Packages
library(NADA) #Helsel's package for estimating summary statistics including censored data
#### A. CALCULATE KAPLAN-MEIER SUMS ####
## As described by Helsel (2009), use the non-parametric Kaplan Meier approach
## to estimate a sum for each sample containing nondetects -
## despite the issues, in our data, that
## (a) Many samples have < 0.5 rate of detection across all congeners.
## (b) Concentrations of congeners are not independent from one another-
## they occur according, loosely, to a characteristic congener profile.
# To calculate KM, need ss.MDL value for each nondetect result
congeners <- mutate(congeners, resval.km=result_value) #For all detected obs, use the reported value.
congeners$resval.km[congeners$censor.ind==1] <- congeners$mdl.ss[congeners$censor.ind==1]
# Convert censor.ind to a TRUE/FALSE for use in the NADA function
congeners$censor.ind <- as.logical(congeners$censor.ind)
# Split the cong-level data into separate dfs, one for each sample
smp.list <- split(x=congeners, f=congeners$sys_sample_code) #n=307 congeners; shd be 119 or 162 rows (congeners) per df.
# Function to calculate the KM sum for a given data set
kmsum <- function(df){
kmfit.cur <- cenfit(obs=df$resval.km, censored=df$censor.ind)
mean.cur <- mean(kmfit.cur)[1]
n.cur <- kmfit.cur@survfit$n
est.sum <- mean.cur*n.cur
return(est.sum)
}
# Calculate KM-sum for each sample
kmsums.l <- lapply(X=smp.list, FUN=kmsum)
kmsums <- melt(unlist(kmsums.l))
kmsums$sys_sample_code <- names(kmsums.l)
kmsums <- rename(kmsums, sum.km=value)
rownames(kmsums) <- NULL
# Clean up
rm(kmsums.l); rm(smp.list)
#### B. ESTIMATE VALUES FOR CENSORED DATA USING CONGENER PROFILES ####
# Calculate rate-of-detection by congener
# (i.e., which are the most frequently detected congeners, across 2012-13 spid smp?)
bycon <- group_by(filter(congeners, sample_year!=2011 & category!="Sediment"), chemical_name)
consum <- summarize(bycon,
n.det=sum(!is.na(result_value)),
n.smp=n())
consum <- arrange(consum, desc(n.det))
rm(bycon)
# 6 congeners appear in all (n=119) or all but one (n=118) of the eligible samples
# (134 total samples minus 15 complete nondetects = 119 eligible samples).
# These 6 congners will be the reference congeners.
refcons <- consum$chemical_name[consum$n.det>=118]
# Calculate the sum total concentration of the refcons in each 2012-13 sample
# A new column with result values only for the ref cons
spid.cons <- filter(congeners, category!="Sediment" & sample_year!=2011) #Separate data frame for 2012-13 spider data only.
spid.cons$resval.ref <- NA #most congeners should have NA in this column...
spid.cons$resval.ref[spid.cons$chemical_name %in% refcons] <- spid.cons$result_value[spid.cons$chemical_name %in% refcons] #...but the ref cons should have a value (if anything was detected).
# Now sum the refcon result values for each sample
bysmp <- group_by(spid.cons, sys_sample_code)
refsums <- summarize(bysmp,
sum.ref.cons = sum(resval.ref, na.rm=TRUE))
# Clean up
rm(bysmp); rm(consum); rm(spid.cons); rm(refcons)
#### C. CALCULATE PCB-SUMS ####
# New columns containing the desired type of result value for each sum-type
# (Remove the current 'resval.km' (replaces res_val with ss.mdl for censored),
# as this is not for summing and it is just confusing)
congeners <- select(congeners, -resval.km)
# Substitute zero
congeners <- mutate(congeners, resval.zero=result_value) #For all detected obs, use the reported value.
congeners$resval.zero[congeners$censor.ind==1] <- 0 #For all censored obs (nondetect or below ssMDL), substitute zero.
# Substitute half det lim
congeners <- mutate(congeners, resval.halfdl=result_value) #For all detected obs, use the reported value.
congeners$resval.halfdl[congeners$censor.ind==1] <- 0.5 * (congeners$mdl.ss[congeners$censor.ind==1]) #For all censored obs, substitute half the ssMDL.
# Calculate sums
bysmp <- group_by(congeners, sample_year, category, site_number, stn_id, sys_sample_code)
smp.sums <- summarize(bysmp,
n.det = sum(!is.na(result_value)),
pct.det = (sum(!is.na(result_value)))/n(),
avg.mdl = mean(mdl.ss),
smp.mass = min(smp.mass), #all smp.mass values for a given sample should be same
sum.zero = sum(resval.zero), #sum of all observations above MDL
sum.halfdl = sum(resval.halfdl) ) #substitute half det lim for censored obs #substitute zero for censored obs, but sum concentrations for good cons only.
# Add in KM-estimated sums and refcon sums
smp.sums <- merge(smp.sums, kmsums, by="sys_sample_code", all=TRUE)
smp.sums <- merge(smp.sums, refsums, by="sys_sample_code", all=TRUE) #only applies to 2012-13 spider smp, others will be NA.
#### D. WRITE STANDARD OUTPUT TABLE ####
smp.sums <- arrange(ungroup(smp.sums), sample_year, category, site_number, stn_id)
write.csv(smp.sums, paste(DirOut, "AllSampleData_CompareSums.csv", sep=""), row.names=FALSE)
##### END SCRIPT ####
|
fd6a0e942025006ca2287e9082ee6cb3eab3391f
|
11005f482340db087b59269c32b9060719ce4f4c
|
/glmm/tests/basicBHtest.R
|
b462751df69d794953f1c4db7f74f7b4d02bb88b
|
[] |
no_license
|
bensonsyd/GLMMwithParallel
|
4ff2445ced15088c76ab6d9d5a6848f6d6598655
|
338ad34cf2eae96015ef706dc7d3f6e9edde6048
|
refs/heads/master
| 2020-03-18T21:59:58.693749
| 2018-08-03T18:20:16
| 2018-08-03T18:20:16
| 128,808,926
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 193
|
r
|
basicBHtest.R
|
library(glmm)
data(BoothHobert)
set.seed(123)
mod.mcml1<-glmm(y~0+x1, list(y~0+z1), varcomps.names=c("z1"), data=BoothHobert, family.glmm=bernoulli.glmm, m=1000, doPQL=TRUE)
summary(mod.mcml1)
|
3debc4b1708bc601847eb36b171b9970ef6105ea
|
d75a1e1e95ae70ce048a0c26fb0f9c283fd5dd70
|
/man/BREC_2A.Rd
|
f55758f3f599bf2025cf74b67da447d5d97c1a93
|
[] |
no_license
|
Owain-S/kmdata
|
49d65b279e7e84e170550f7d1fbdc8573f28784c
|
22569373a88f64ef480ea895c8ef7b7b5ced260e
|
refs/heads/master
| 2023-05-25T22:58:06.758825
| 2021-06-01T19:36:49
| 2021-06-01T19:36:49
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 909
|
rd
|
BREC_2A.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{BREC_2A}
\alias{BREC_2A}
\title{BREC, figure 2A}
\format{
A data frame of 277 observations and 3 variables:
\tabular{lll}{
\tab \code{time} \tab event time (in months) \cr
\tab \code{event} \tab PFS event indicator (\code{0}: no event, \code{1}: event) \cr
\tab \code{arm} \tab treatment arms (control, experimental) \cr
}
}
\source{
Moran T, Wei J, Cobo M, et al. Two biomarker-directed randomized
trials in European and Chinese patients with nonsmall-cell lung
cancer: the BRCA1-RAP80 Expression Customization (BREC) studies. Ann
Oncol 2014; 25: 2147–55.
}
\usage{
BREC_2A
}
\description{
Kaplan-Meier digitized data from BREC, figure 2A (PMID 25164908). A reported sample size of 279 for a primary endpoint of PFS in lung cancer.
}
\examples{
summary(BREC_2A)
kmplot(BREC_2A)
}
\keyword{datasets}
|
a5c155e53d131720c854a21f6301986a440a003f
|
fed5e62eae92e827585b85e144cd75f83225d0fa
|
/R/bin_filtered_results.R
|
3026140ad940cb1724ddccc6a6dae22dfc390e8f
|
[
"MIT"
] |
permissive
|
benjamincrary/CrossRefEDNA
|
7fcf3dbae3cb313c205f64689d8aee7fdf0e0dbf
|
9de9c1843a2f79fca54d43b12e90560587d389b4
|
refs/heads/master
| 2023-04-14T07:59:57.553022
| 2021-05-04T04:04:05
| 2021-05-04T04:04:05
| 280,018,107
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,902
|
r
|
bin_filtered_results.R
|
#' bin_filtered_results
#'
#' @param filtered_edna
#' @param destination
#'
#' @importFrom magrittr %>%
#'
#' @return
#' @export
#'
#' @examples
bin_filtered_results <- function(filtered_edna, input_destination, data_location) {
print("Binning filtered results...")
taxonomies <- load_taxonomies(data_location)
categories <- load_categories(data_location)
# bin and consolidate
taxResults <- listConstruct(filtered_edna, taxonomies) %>%
dplyr::mutate(Other=ifelse(rowSums(.==F) == ncol(taxonomies), TRUE, FALSE))
catResults <- listConstruct(filtered_edna, categories) %>%
dplyr::mutate(Other=ifelse(rowSums(.==F) == ncol(categories), TRUE, FALSE))
taxSummary <- filtered_edna %>%
dplyr::select(one_of("title", "author", "container-title", "link", "issued", "is-referenced-by-count", "issued.date-parts")) %>%
dplyr::mutate(date = extractDateFromParts(`issued.date-parts`)) %>%
dplyr::select(one_of("title", "author", "container-title", "link", "is-referenced-by-count", "date")) %>%
dplyr::bind_cols(., taxResults) %>%
tidyr::gather("taxonomy", "tax", 7:21) %>%
dplyr::filter(tax=="TRUE") %>%
dplyr::mutate_all(dplyr::funs(nullToNA(.))) %>%
dplyr::mutate(leadAuthor=extractLeadAuthor(author)) %>%
dplyr::mutate(URL = unlist(purrr::map(link, extractURL))) %>%
dplyr::mutate(`container-title` = purrr::map(`container-title`,1)) %>%
dplyr::mutate_at(dplyr::vars(title, `container-title`, `is-referenced-by-count`), dplyr::funs(unlist(.))) %>%
dplyr::select(-tax, -author, -link)
catSummary <- filtered_edna %>%
dplyr::select(one_of("title", "author", "container-title", "link", "issued", "is-referenced-by-count", "issued.date-parts")) %>%
dplyr::mutate(date = extractDateFromParts(`issued.date-parts`)) %>%
dplyr::select(one_of("title", "author", "container-title", "link", "is-referenced-by-count", "date")) %>%
dplyr::bind_cols(., catResults) %>%
tidyr::gather("category", "cat", 7:15) %>%
dplyr::filter(cat=="TRUE") %>%
dplyr::mutate_all(dplyr::funs(nullToNA(.))) %>%
dplyr::mutate(leadAuthor=extractLeadAuthor(author)) %>%
dplyr::mutate(URL = unlist(purrr::map(link, extractURL))) %>%
dplyr::mutate(`container-title` = purrr::map(`container-title`,1)) %>%
dplyr::mutate_at(dplyr::vars(title, `container-title`, `is-referenced-by-count`), dplyr::funs(unlist(.))) %>%
dplyr::select(-cat, -author,-link)
##### Format for input into Shiny App
Summary <- dplyr::full_join(catSummary,taxSummary) %>%
dplyr::mutate_at(dplyr::vars(taxonomy), stringr::str_replace, pattern="Marine\\.Fish..", replace="Marine Fish") %>%
dplyr::mutate_at(dplyr::vars(taxonomy, category), stringr::str_replace, pattern="\\.", replace=" ") %>%
dplyr::mutate_at(dplyr::vars(taxonomy, category), stringr::str_replace, pattern="\\.and\\.", replace=" and ") %>%
dplyr::mutate_at(dplyr::vars(taxonomy, category), stringr::str_replace, pattern="Field\\.", replace="Field ") %>%
dplyr::mutate_at(dplyr::vars(taxonomy, category), dplyr::funs(replace(.,is.na(.), "Other"))) %>%
dplyr::mutate_if(is.character, dplyr::funs(stringr::str_replace(.,pattern="[\r\n]", replace=""))) %>%
dplyr::rename(TaxonomyBin=taxonomy, CategoryBin=category, Journal = `container-title`, Title=title) %>%
dplyr::rename(`Cited by` = `is-referenced-by-count`) %>%
dplyr::mutate_at(dplyr::vars(date), as.Date, origin="1970-01-01") %>%
dplyr::mutate(PublicationYear = lubridate::year(date), PublicationMonth = lubridate::month(date), Access="subscription")
AllPubs <- Summary %>%
dplyr::select(Title, leadAuthor, Journal, date, `Cited by`, leadAuthor, URL) %>%
dplyr::rename(`Publication Date` = date, `Lead Author`=leadAuthor) %>%
dplyr::distinct()
saveRDS(Summary,paste0(input_destination, "Input.RDS"))
saveRDS(AllPubs,paste0(input_destination, "AllPubs.RDS"))
}
|
34cdce974d1d90762b91e900b630c2bf5424aa2a
|
88c4ab90e349646f4bba4ac81a5d810baacc9f50
|
/ICA.R
|
d52a2fc21e983b9b2a478b517021f8cc52561d1a
|
[] |
no_license
|
amlerario/ACC
|
d9a1ca067f23b35acb19fd239e03376c917ba77f
|
3e0da9687fab2474534f588edc19d476535f1533
|
refs/heads/master
| 2020-04-06T06:57:52.764576
| 2014-07-31T03:50:46
| 2014-07-31T03:50:46
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 12,225
|
r
|
ICA.R
|
#source("http://bioconductor.org/biocLite.R")
#biocLite(pkgs=c("Rsubread","limma"))
library(Biobase)
library(plyr)
library(ggplot2)
library(foreach)
library(xtable)
library(biomaRt)
library(GOstats)
library(cluster)
library(marray)
library(mclust)
library(RColorBrewer)
library(igraph)
library(Rgraphviz)
library(graph)
library(colorspace)
library(annotate)
library(scales)
library(gtools)
library(MineICA)
library(genefilter)
library(org.Hs.eg.db)
library(impute)
library(xlsx)
setwd("/home/amlerario/Projects/TCGA/EXPRESSION")
### edgeR.log2.txt
xx <- as.matrix(read.table("edgeR.log2.txt", header=TRUE, sep="\t", row.names=1, as.is=TRUE))
pData <- read.table("pData.txt", header=T,sep="\t", row.names=1, as.is=TRUE)
### REESCALING TO EXCLUDE NEGATIVE VALUES
#xxantilog <- apply(xx,2,function(x){ 2^x})
#constant<-1.001-range(as.vector(xxantilog), na.rm=T)[1]
#xxantilog <- apply(xxantilog,2,function(x){constant+x})
#xx <- apply(xxantilog,2,log2)
#xx[is.na(xx)] <- 1.001 # remove NA - attribute the minimal value to NA
## ANNOTATING
symbol<-unlist(lookUp(rownames(xx), 'org.Hs.eg', 'SYMBOL'))
symbol<-unname(symbol, force=FALSE)
name<-unlist(lookUp(rownames(xx), 'org.Hs.eg', 'GENENAME'))
name<-unname(name, force=FALSE)
assayData<-cbind(symbol,name)
rownames(assayData)<-symbol
assayData <- assayData[!is.na(rownames(assayData)),]
assayData <- data.frame(assayData)
rownames(xx)<-symbol
rownames(pData) <- colnames(xx)
xx <- xx[!is.na(rownames(xx)),]##remove those rows without SYMBOL
rownames(xx)<-as.factor(rownames(xx))
### - imputation - elliminate NAs from the table
#xx<-impute.knn(xx)
### create an eset object
phenoData <- new("AnnotatedDataFrame", data=pData)
assayData<-new("AnnotatedDataFrame", data=assayData)
#eset <- ExpressionSet(assayData=xx$data, phenoData=phenoData, annotation="SYMBOL",featureData=assayData)
eset <- ExpressionSet(assayData=xx, phenoData=phenoData, annotation="SYMBOL",featureData=assayData)
eset_var<-eset
library(genefilter)
exprs(eset_var) <- 2^exprs(eset_var)
#Filter #1 - exclude genes with very low expression values - consider 2.5 to 5 inferior percentile as "zero"
exprs(eset_var)[is.na(exprs(eset_var))] <- quantile(exprs(eset_var),0.01,na.rm=T) ## eliminate NA by substituting them by the p1 value
#eset_var <- selectFeatures_IQR(eset_var,10000)
ffun <- filterfun(pOverA(p=.8,A=quantile(exprs(eset_var),0.025,na.rm=T)))
#ffun <- filterfun(pOverA(p=0, A=-8))
t.fil <- genefilter(exprs(eset_var),ffun)
##################t.fil <- apply(e.mat,2,allNA)
# apply filter, and put expression back on log scale
exprs(eset_var) <- log2(exprs(eset_var)[t.fil,])
#zz<-impute.knn(exprs(eset_var))
#exprs(eset_var)<-zz$data
#ACC_RNASEQ_PRADA_RefGene
#xx <- as.matrix(read.table("ACC_RNASEQ_PRADA_RefGene.txt", header=TRUE, sep="\t", row.names=1, as.is=TRUE))
#isexpr <- rowSums(xx >= 0.5) >= length(xx[1,])*0.5
#xx <- xx[isexpr,]
#constant<-1.001-range(as.vector(xx), na.rm=T)[1]
#xx <- apply(xx,2,function(x){constant+x})
#xx[is.na(xx)] <- 1.001 # remove NA - attribute the minimal value to NA
#ffun <- filterfun(cv(0.7,10))
#t.fil <- genefilter(e.mat,ffun)
# apply filter, and put expression back on log scale
#xx <- log2(e.mat[t.fil,])
#xx <- apply(xx,2,log2)
## ANNOTATING PRADA
#xx <- xx[!is.na(rownames(xx)),]##remove those rows without SYMBOL
#rownames(pData) <- colnames(xx)
## Adding phenotype data and building an eSet object
#phenoData <- new("AnnotatedDataFrame", data=pData)
#eset <- ExpressionSet(assayData=xx, phenoData=phenoData, annotation="hgnc_symbol")
# obtain expression estimates on the UN-LOGGED scale
#e.mat <- 2^exprs(eset)
# look at mean, sd, & cv for each gene across arrays
#gene.mean <- apply(e.mat,1,mean)
#gene.sd <- apply(e.mat,1,sd)
#gene.cv <- gene.sd/gene.mean
# make plots
#library(geneplotter); library(RColorBrewer)
#blues.ramp <- colorRampPalette(brewer.pal(9,"Blues")[-1])
#dCol <- densCols(log(gene.mean),log(gene.sd),colramp=blues.ramp)
#par(mfrow=c(2,2))
#plot(gene.mean,gene.sd,log='xy',col=dCol,pch=16,cex=0.1)
#abline(v=5,lwd=3,col='red')
#hist(log(gene.cv),main=NA)
#abline(v=log(.7),lwd=3,col='red')
## Filtering by variance
#eset_var <- selectFeatures_IQR(eset,round(dim(exprs(eset))[1]*0.5)) # IQR p50
## restrict the number of genes to 10000 (as suggested by Anne Biton)
#eset_var <- selectFeatures_IQR(eset_var,10000)
########################################################################
## D E F I N E H E R E T H E C U T O F F V A L U E F O R I Q R ##
cutoff_iqr=0 #########################################################
########################################################################
#if(cutoff_iqr != 0) {
# eset_var <- nsFilter(eset_var, var.cutoff=cutoff_iqr/100, require.entrez=F, remove.dupEntrez=F)
# eset_var <- eset_var$eset
#}
library(JADE)
## Features are mean-centered before ICA computation
exprs(eset_var) <- t(apply(exprs(eset_var),1,scale,scale=FALSE))#scale=FALSE
colnames(exprs(eset_var)) <- sampleNames(eset_var)
## Run ICA-JADE
########################################################################
## D E F I N E H E R E T H E N U M B E R O F C O M P O N E N T S ##
ncomp=8 ###############################################################
########################################################################
########################################################################
## D E F I N E H E R E T H E C U T O F F S ##
selCutoff=3.5 #########################################################
########################################################################
resJade <- runICA(X=exprs(eset_var), nbComp=ncomp, method="JADE", maxit=dim(exprs(eset_var))[1])
## if an error message appears here, try to reduce the ncomp variable
mart <- useMart(biomart="ensembl", dataset="hsapiens_gene_ensembl")
typeIDeset_var <-c(geneID_annotation="SYMBOL", geneID_biomart="hgnc_symbol")#"hgnc_symbol"entrezgene
params <- buildMineICAParams(resPath=paste("ICA.com",ncomp,"_iqr",cutoff_iqr,"_",selCutoff, "/",sep=""), selCutoff=selCutoff, pvalCutoff=0.01)
refSamplesMainz <- character(0)
resBuild <- buildIcaSet(params=params, A=data.frame(resJade$A), S=data.frame(resJade$S), dat=exprs(eset_var),
pData=pData(eset_var),refSamples=refSamplesMainz, typeID=typeIDeset_var,mart=mart)
icaSeteset_var <- resBuild$icaSet
params <- resBuild$params
## see sample projections of specific components - in this example, component 1
#comp1<-getComp(icaSeteset_var, level="genes", ind=1)
#comp18<-getComp(icaSeteset_var, level="genes", ind=18)
keepVar <- c("TP53","CTNNB1","ZNRF3_ALT","weiss","Mitosis_50","Grade","hormone_excess","histological_type","age_diagnosis","ENSAT", "mRNA_K5", "mRNA_K2", "Methy_CLS", "miRNA_CLS", "SCNA_CLS", "LOH", "n_mutation", "gender", "vital_status", "PKA")
runAn(params=params, icaSet=icaSeteset_var, writeGenesByComp=TRUE, keepVar=keepVar,mart=mart,selCutoffWrite=selCutoff)
#resW <- writeProjByComp(icaSet=icaSeteset_var,params=params,mart=mart,level="genes",selCutoffWrite=3)
#[1] "mRNA_K5" "mRNA_K2" "ENSAT" "Methy_CLS"
#[5] "MethyLevel_CLS" "miRNA_CLS" "SCNA_CLS" "PKA"
#[9] "LOH" "purity" "ploidy" "n_mutation"
#[13] "hypermut" "histological_type" "gender" "vital_status"
#[17] "weiss" "Mitosis_50" "hormone_excess" "age_diagnosis"
#[21] "days_to_birth" "days_to_death" "days_to_last_followup" "TP53_mut"
#[25] "TP53_SCNA" "CTNNB1_mut" "CTNNB1_SCNA" "MEN1_mut"
#[29] "MEN1_SCNA" "PRKAR1A_mut" "PRKAR1A_SCNA" "RPL22_mut"
#[33] "RPL22_SCNA" "X12q14_CDK4" "X16q22_TERF2" "X17q25_RFNG"
#[37] "X19p13_BRD4_.AKAP8" "X19q12_CCNE1" "X1q22_EFNA3_4" "X5p15_TERT"
#[41] "xq28_PNMA6A" "X22q12_ZNRF3" "ZNRF3_mut" "X9p21_CDKN2A"
#[45] "CDKN2A_methelation" "APC" "ZNRF3" "TP53"
library(STRINGdb)
string_db <- STRINGdb$new(version="9_1", species=9606, score_threshold=0, input_directory="")
contrib <- selectContrib(icaSeteset_var, cutoff=3, level="genes")
#genes18<-sort(contrib[[18]],decreasing=TRUE)#[1:10]
#genes18<-data.frame(names(genes18))
### evaluating interactions of the positive side of the components
dir.create(paste(getwd(),"/ICA.com",ncomp,"_iqr",cutoff_iqr,"_",selCutoff,"/string",sep=""),showWarnings=F)
for(i in 1:ncomp) {
genes<-sort(contrib[[i]][contrib[[i]]>0],decreasing=TRUE)
genes<-data.frame(names(genes))
mapped <- string_db$map(genes,"names.genes.", removeUnmappedRows = TRUE)
hits <- mapped$STRING_id
string_db$plot_network(hits)
dev.copy(pdf,paste(getwd(),"/ICA.com",ncomp,"_iqr",cutoff_iqr,"_",selCutoff,"/string/","comppos",i,".pdf",sep=""))
dev.off()
}
### evaluating interactions of the negative side of the components
### create
for(i in 1:ncomp) {
genes<-sort(contrib[[i]][contrib[[i]]<0],decreasing=TRUE)
genes<-data.frame(names(genes))
mapped <- string_db$map(genes,"names.genes.", removeUnmappedRows = TRUE)
hits <- mapped$STRING_id
string_db$plot_network(hits)
dev.copy(pdf,paste(getwd(),"/ICA.com",ncomp,"_iqr",cutoff_iqr,"_",selCutoff,"/string/","compneg",i,".pdf",sep=""))
dev.off()
}
for(i in 1:ncomp) {
genesneg<-sort(contrib[[i]][contrib[[i]]<0],decreasing=TRUE)
genesneg<-data.frame(names(genesneg))
mappedneg <- string_db$map(genesneg,"names.genesneg.", removeUnmappedRows = TRUE)
hitsneg <- mappedneg$STRING_id
enrichmentGOneg <- string_db$get_enrichment(hitsneg, category = "Process", methodMT = "fdr", iea = TRUE)
enrichmentKEGGneg <- string_db$get_enrichment(hitsneg, category = "KEGG", methodMT = "fdr", iea = TRUE )
genespos<-sort(contrib[[i]][contrib[[i]]>0],decreasing=TRUE)
genespos<-data.frame(names(genespos))
mappedpos <- string_db$map(genespos,"names.genespos.", removeUnmappedRows = TRUE)
hitspos <- mappedpos$STRING_id
enrichmentGOpos <- string_db$get_enrichment(hitspos, category = "Process", methodMT = "fdr", iea = TRUE)
enrichmentKEGGpos <- string_db$get_enrichment(hitspos, category = "KEGG", methodMT = "fdr", iea = TRUE )
genes<-sort(contrib[[i]],decreasing=TRUE)
genes<-data.frame(names(genes))
mapped <- string_db$map(genes,"names.genes.", removeUnmappedRows = TRUE)
hits <- mapped$STRING_id
enrichmentGO <- string_db$get_enrichment(hits, category = "Process", methodMT = "fdr", iea = TRUE)
enrichmentKEGG <- string_db$get_enrichment(hits, category = "KEGG", methodMT = "fdr", iea = TRUE )
for(j in 1:6){
if(j==1){
sheet<-"GO_NEG"
data <- enrichmentGOneg
}
if(j==2){
sheet<-"KEGG_NEG"
data <- enrichmentKEGGneg
}
if(j==3){
sheet<-"GO_POS"
data <- enrichmentGOpos
}
if(j==4){
sheet<-"KEGG_POS"
data <- enrichmentKEGGpos
}
if(j==5){
sheet<-"GO_ALL"
data <- enrichmentGO
}
if(j==6){
sheet<-"KEGG_ALL"
data <- enrichmentKEGG
}
write.xlsx(data.frame(data),file=paste(getwd(),"/ICA.com",ncomp,"_iqr",cutoff_iqr,"_",selCutoff,"/string/","comp",i,".xlsx",sep=""),sheetName=sheet,col.names=T,row.names=T,append=T,showNA=T)
}
}
#geneint=c("GATA4")
#closeg <- genefinder(exprs(eset), geneint, 200, method="maximum", scale="zscore")
#rownames(exprs(eset))[closeg[[1]]$indices]
#resEnrich <- runEnrich(params=params,icaSet=icaSeteset_var[,,3],dbs="GO", ontos="BP", cond=T)
#head(icaSeteset_var[,1:20])
### WTINESS GENES OF EACH COMPONENT
#witGenes(icaSeteset_var)
#1 2 3 4 5 6 7 8 9 10 11 12 13
#"RPS4Y1" "GSTA1" "SPRR1A" "NDN" "CTNNA2" "DLGAP5" "NKAIN4" "LUM" "TDGF1P3" "RASGRF1" "PROK1" "MAOB" "FAM166B"
#14 15 16 17 18 19 20 21 22
#"B3GNT7" "FCGBP" "FAM19A5" "ZFR2" "SLC4A10" "ORM1" "EGLN3" "MGAT5" "SNCB"
|
6ca491ad10b41c9ad740ff259ebd900c9bdc6830
|
1f9a964c779f440be2b88fbe7226ceef8cb5b335
|
/R/AllClasses.R
|
ee4a353225580cafe0cda09a6b742ec71923573d
|
[] |
no_license
|
phycomlab/RISC
|
e7b1bc07fdc22d2656bcf62ab08142d539b1e545
|
94fcf7f6e82f7089285a8086b4e001202420c48d
|
refs/heads/master
| 2023-06-08T18:11:16.582266
| 2021-06-27T17:53:39
| 2021-06-27T17:53:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 11,113
|
r
|
AllClasses.R
|
####################################################################################
#' Import single cell data
####################################################################################
#'
#' The single cell RNA-seq (scRNA-seq) data can be imported in three different ways.
#' Primarily, we could import from 10X Genomics output directly by using
#' "read10Xgenomics". The user only need to provide the folder path. Secondly,
#' we could read data from HT-seq output by "readHTSeqdata", the user have to input
#' the folder path. Lastly, we could input matrix, cell and genes mannually
#' using "readscdata".
#'
#' @useDynLib RISC
#' @importFrom methods as new
#' @importFrom utils head read.table
#' @importFrom Matrix readMM colSums rowSums
#' @rdname SingleCellData
#' @return SingleCellData
#' @param assay The list of gene counts.
#' @param coldata The data.frame with cell information.
#' @param rowdata The data.frame with gene information.
#' @name SingleCellData
SingleCellData <- function(assay, coldata, rowdata){
object <- new(Class = 'RISCdata', assay = assay, coldata = coldata, rowdata = rowdata)
}
####################################################################################
#' Example data
####################################################################################
#'
#' @docType data
#' @usage data(raw.mat)
#' @format A list including a simulated cell-gene matrix, columns for cells and
#' rows for genes, a cell group and a batch information.
"raw.mat"
####################################################################################
#' Import data from matrix, cell and genes directly.
####################################################################################
#'
#' Import data set from matrix, cell and genes directly, the customer needs three
#' files: a matrix file including gene expression values: raw counts/UMIs (rows for
#' genes while columns for cells), a cell file (whose row.name are equal to the
#' col.name of the matrix), and a gene file whose row.name are the same as the
#' row.name of the matrix. If row.names of the gene matrix are Ensembl ID, the
#' customer need to transfer them to gene symbols manually.
#'
#' @rdname Import-Matrix
#' @param count Matrix with raw counts/UMIs.
#' @param cell Data.frame with cell Barcode, whose row.name are equal to the
#' col.name of the matrix.
#' @param gene Data.frame with gene symbol, whose row.name are the same as the
#' row.name of the matrix.
#' @param is.filter Remove not expressed genes.
#' @return RISC single cell dataset, including count, coldata, and rowdata.
#' @name readscdata
#' @export
#' @examples
#' mat0 = as.matrix(raw.mat[[1]])
#' coldata0 = as.data.frame(raw.mat[[2]])
#' coldata.obj = coldata0[coldata0$Batch0 == 'Batch3',]
#' matrix.obj = mat0[,rownames(coldata.obj)]
#' obj0 = readscdata(count = matrix.obj, cell = coldata.obj,
#' gene = data.frame(Symbol = rownames(matrix.obj),
#' row.names = rownames(matrix.obj)), is.filter = FALSE)
readscdata <- function(
count,
cell,
gene,
is.filter = TRUE
) {
if(exists("count") & exists("cell") & exists("gene")){
if(all(colnames(count) == rownames(cell)) & all(rownames(count) == rownames(gene))){
run.data0 = as.matrix(count)
mito.gene = grep(pattern = '^mt-', x = rownames(run.data0), ignore.case = TRUE, value = TRUE)
run.cell = data.frame(scBarcode = rownames(cell), scUMI = Matrix::colSums(run.data0), ngene = Matrix::colSums(run.data0 > 0), row.names = rownames(cell), stringsAsFactors = FALSE)
run.cell$mito = Matrix::colSums(run.data0[rownames(run.data0) %in% mito.gene,]) / run.cell$scUMI
run.cell = cbind.data.frame(run.cell, cell)
run.gene = data.frame(Symbol = rownames(run.data0), RNA = "Gene Expression", row.names = rownames(run.data0), stringsAsFactors = FALSE)
run.gene$nCell = Matrix::rowSums(run.data0 > 0)
if(is.filter){
run.gene = run.gene[run.gene$nCell > 0,]
} else {
run.gene = run.gene
}
run.data0 = run.data0[rownames(run.data0) %in% run.gene$Symbol,]
SingleCellData(assay = list(count = as(run.data0, 'dgCMatrix')), rowdata = data.frame(run.gene, stringsAsFactors = FALSE), coldata = data.frame(run.cell, stringsAsFactors = FALSE))
} else {
stop('Matrix colnames or rownames are not equal to cell name or gene name')
}
} else {
stop('No matrix, cell or gene is found here')
}
}
####################################################################################
#' Import data from 10X Genomics output.
####################################################################################
#'
#' Import data directly from 10X Genomics output, usually using filtered gene
#' matrices which contains three files: matrix.mtx, barcode.tsv and gene.tsv.
#' The user only need to input the directory into "data.path". If not the original
#' 10X Genomics output, the user have to make sure the barcode.tsv and gene.tsv
#' without col.names, the barcode.tsv at least contains one column for cell
#' barcode, and the gene.tsv has two columns for gene Ensembl ID and Symbol.
#'
#' @rdname Import-10X-Genomics
#' @param data.path Directory containing the filtered 10X Genomics output,
#' including three files: matrix.mtx, barcode.tsv (without colnames) and gene.tsv
#' (without colnames).
#' @param sep The sep can be changed by the users
#' @param is.filter Remove not expressed genes.
#' @return RISC single cell dataset, including count, coldata, and rowdata.
#' @name read10Xgenomics
#' @export
read10Xgenomics <- function(
data.path,
sep = '\t',
is.filter = TRUE
) {
if(!exists("data.path")){
stop('Please input data.path')
} else {
data.path = as.character(data.path)
}
sep0 = sep
files = list.files(path = data.path, full.names = TRUE)
file.matrix = grep('matrix', files, ignore.case = TRUE, value = TRUE)
file.gene = grep(pattern = 'features|genes', files, ignore.case = TRUE, value = TRUE)
file.cell = grep('barcodes', files, ignore.case = TRUE, value = TRUE)
if(length(file.matrix) == 1 & length(file.gene) == 1 & length(file.cell) == 1){
run.matrix = readMM(file = file.matrix)
run.matrix = as(run.matrix, 'dgCMatrix')
run.gene = read.table(file = file.gene, header = FALSE, sep = sep0, stringsAsFactors = FALSE)
run.gene = data.frame(run.gene, stringsAsFactors = FALSE)
if(ncol(run.gene) > 2){
colnames(run.gene) = c('Ensembl', 'Symbol', 'RNA')
} else {
colnames(run.gene) = c('Ensembl', 'Symbol')
run.gene$RNA = "Gene Expression"
}
run.gene$nCell = Matrix::rowSums(run.matrix > 0)
run.gene$Symbol = make.unique(run.gene$Symbol)
rownames(run.matrix) = rownames(run.gene) = run.gene$Symbol
if(is.filter){
run.gene = run.gene[run.gene$nCell > 0,]
} else {
run.gene = run.gene
}
run.matrix = run.matrix[rownames(run.matrix) %in% run.gene$Symbol,]
mito.gene = grep(pattern = '^mt-', x = rownames(run.matrix), ignore.case = TRUE, value = TRUE)
run.cell0 = read.table(file = file.cell, header = FALSE, sep = sep0, stringsAsFactors = FALSE)
# run.cell = sapply(run.cell$V1, function(x){strsplit(x, '-', fixed = T)[[1]][[1]]})
run.cell = data.frame(scBarcode = as.character(run.cell0$V1), scUMI = Matrix::colSums(run.matrix), ngene = Matrix::colSums(run.matrix > 0), stringsAsFactors = FALSE)
run.cell$mito = Matrix::colSums(run.matrix[rownames(run.matrix) %in% mito.gene,]) / run.cell$scUMI
colnames(run.matrix) = rownames(run.cell) = run.cell$scBarcode
} else {
stop('The direcotry is invalid, please input the dir including files: "barcodes.tsv", "features(genes).tsv", "matrix.mtx"')
}
SingleCellData(assay = list(count = as(run.matrix, 'dgCMatrix')), rowdata = data.frame(run.gene, stringsAsFactors = FALSE), coldata = data.frame(run.cell, stringsAsFactors = FALSE))
}
####################################################################################
#' Import data from HT-Seq output.
####################################################################################
#'
#' Import data directly from HT-Seq output, but each HTSeq.output.txt need to have
#' the same length of genes. If genes annotated by Ensembl ID, the customer need to
#' transfer them to gene symbols manually. The user need to make a folder to contain
#' all HT-Seq outputs, and input the directory into "data.path".
#'
#' @rdname Import-HT-Seq
#' @param data.path Directory containing all the HT-Seq outputs, each HT-Seq has the
#' same length of genes.
#' @param is.filter Remove not expressed genes.
#' @importFrom data.table fread
#' @return RISC single cell dataset, including count, coldata, and rowdata.
#' @name readHTSeqdata
#' @export
readHTSeqdata <- function(
data.path,
is.filter = TRUE
) {
if(!exists("data.path")){
stop('Please input data.path')
} else {
data.path = as.character(data.path)
}
files = list.files(path = data.path, full.names = TRUE)
names = list.files(path = data.path, full.names = FALSE)
names0 = sapply(names, function(x){strsplit(x, '.', fixed = TRUE)[[1]][1]})
sam = data.frame(file = files, name = names0, stringsAsFactors = FALSE)
if(is.null(length(files))){
stop('The directory is invalid, no files there')
} else {
run.data0 = do.call(cbind, lapply(sam$file, function(x){readHT(x)}))
}
run1 = fread(file = files[1], sep = '\t', header = FALSE, stringsAsFactors = FALSE)
run2 = run1[!run1$V1 %in% grep('__', run1$V1, value = TRUE),]
rownames(run.data0) = make.unique(as.character(run2$V1))
colnames(run.data0) = make.unique(sam$name)
mito.gene = grep(pattern = '^mt-', x = rownames(run.data0), ignore.case = TRUE, value = TRUE)
run.cell = data.frame(scBarcode = colnames(run.data0), scUMI = colSums(run.data0), ngene = Matrix::colSums(run.data0 > 0), stringsAsFactors = FALSE)
run.cell$mito = Matrix::colSums(run.data0[rownames(run.data0) %in% mito.gene,]) / run.cell$scUMI
colnames(run.data0) = rownames(run.cell) = run.cell$scBarcode
run.gene = data.frame(Symbol = rownames(run.data0), RNA = "Gene Expression", stringsAsFactors = FALSE)
run.gene$nCell = Matrix::rowSums(run.data0 > 0)
if(is.filter){
run.gene = run.gene[run.gene$nCell > 0,]
} else {
run.gene = run.gene
}
run.data0 = run.data0[rownames(run.data0) %in% run.gene$Symbol,]
rownames(run.data0) = rownames(run.gene) = run.gene$Symbol
SingleCellData(assay = list(count = as(run.data0, 'dgCMatrix')), rowdata = data.frame(run.gene, stringsAsFactors = FALSE), coldata = data.frame(run.cell, stringsAsFactors = FALSE))
}
####################################################################################
####################################################################################
readHT = function(x){
run = fread(x, sep = '\t', header = FALSE, stringsAsFactors = FALSE)
run1 = run[!run$V1 %in% grep('__', run$V1, value = TRUE),]
return(run1$V2)
}
|
478cea1f8f9a7ac937e93d857c1a2442e4111ac7
|
7ef1d508442e1638ab124011a44b66bec95c5349
|
/R/extract.singleComparison.R
|
3273b4c48ab0d8b3759401569d7665f2e431f691
|
[] |
no_license
|
APAFbioinformatics/TMTPrePro
|
ce1c02b39bb289a21f2aff7a1641beea621a4dba
|
dc76dd7a6bf4a38851355a74eba742e00a8eed8e
|
refs/heads/master
| 2022-05-08T22:25:05.472112
| 2022-04-29T04:09:07
| 2022-04-29T04:09:07
| 159,422,005
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 426
|
r
|
extract.singleComparison.R
|
## extract values for a single comparison
extract.singleComparison=
function(tmt.df,numera_label,denom_label)
{
label.name=paste(numera_label,denom_label,sep=".")
col.nums=grep(label.name,colnames(tmt.df))
## 3 columns: ratio, count, variability
start.col=min(grep("1",colnames(tmt.df)))
extract.cols=c(1:(start.col-1))
extract.cols=c(extract.cols,col.nums)
tmt.df[,extract.cols]
}
|
b896b754657c2cb15da8e12269b44dbd1a1790fd
|
5a7611075079f277e02c52aeb6cbef1ad189ea96
|
/ImpactTrip_Charts.R
|
a4b5edbdd316dcb13ee0e562fa5f0f9168a54b5f
|
[] |
no_license
|
scottmmiller1/P4H
|
0138196ad227bb01a7296f21e1a7e292edc7cc2e
|
a4377572f4f88ada51a3df7874e41b5165e8d415
|
refs/heads/master
| 2021-06-29T17:06:28.557166
| 2020-09-29T13:57:11
| 2020-09-29T13:57:11
| 148,031,254
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 17,935
|
r
|
ImpactTrip_Charts.R
|
setwd("/Users/scottmiller/Desktop/P4H Global/Evaluation/Analysis/ImpactTrip/ImpactTrip_UF_Haiti_ALL/Rplots")
### Run ImpactTrip.R prior to this section
# -------------------------------------------------------------------------
##########################
## Dem Charts ##
##########################
# Gender
gender <- round(c(dem[1,2:3]) / sum(dem[1,2:3]), digits = 2)*100
png("dem1.png", width = 700)
pie(gender, labels = paste(gender,"%",sep=""), col= c("royalblue3","firebrick2"),
cex = 2, radius = .7, clockwise = T)
legend("top", legend = c("Male","Female"),
fill = c("royalblue3","firebrick2"),
horiz = T, cex = 1.2, yjust = 2)
dev.off()
# College Status
college <- round(c(dem[2,2:6]) / sum(dem[2,2:6]), digits = 2)*100
png("dem2.png", width = 700)
pie(college, labels = paste(college,"%",sep=""),
col= c("darkorchid4","grey","firebrick2","royalblue3","gray48","green4"),
cex = 2, radius = .7, clockwise = T)
legend("top", legend = c("Freshman","Sophomore","Junior","Senior","Gradute"),
fill = c("darkorchid4","grey","firebrick2","royalblue3","gray48","green4"),
horiz = T, cex = 1,yjust = 1)
dev.off()
# Race
race <- round(c(dem[4,c(3:6)]) / sum(dem[4,c(3:6)]), digits = 2)*100
png("dem3.png", width = 700)
pie(race, labels = paste(race,"%",sep=""),
col= c("grey","royalblue3","gray48","firebrick2"),
cex = 2, radius = .7, clockwise = T)
legend("top", legend = c("Asian","African American / Black","Other","White"),
fill = c("grey","royalblue3","gray48","firebrick2"),
horiz = T, cex = .9)
dev.off()
# Travel
travel <- round(c(dem[6,1:7]) / sum(dem[6,1:7]), digits = 2)*100
png("dem4.png", width = 800)
op <- par(mar = c(4,4,4,2) + 1)
dem4 <- barplot(travel, names.arg = c("0","1","2","3","4","5","6+"), las=1, col= c("royalblue3"), border="white",
main = "International Travel", cex.main = 2,
font.main = 1, font.axis=1, space = 1,
ylim = c(0,72), font.lab=1, cex.axis = 1.5, cex.names = 1.5)
text(dem4, travel + 4, paste(travel,"%",sep="") ,cex=1.9)
par(op) ## reset
dev.off()
# P4H Travel
p4h <- round(c(dem[7,1:5]) / sum(dem[7,1:5]), digits = 2)*100
png("dem5.png", width = 800)
op <- par(mar = c(4,4,4,2) + 1)
dem5 <- barplot(p4h, names.arg = c("0","1","2","3","4"), las=1, col= c("royalblue3"), border="white",
main = "P4H Travel", cex.main = 2,
font.main = 1, font.axis=1, space = 1,
ylim = c(0,60), font.lab=1, cex.axis = 1.5, cex.names = 1.5)
text(dem5, p4h + 5, paste(p4h,"%",sep="") ,cex=1.9)
par(op) ## reset
dev.off()
# -------------------------------------------------------------------------
##########################
## BA Charts ##
##########################
# Summary
# avg. growth
avg1 <- round(avg, digits = 2)
rownames(avg1) <- c("Culture","Aid","Poverty","Operate","Critical Thinking","Career","Perform","Team Connection","Local Connection")
png("avg_growth.png", width = 700)
op <- par(mar = c(4,11,4,4) + 1)
avg_growth <- barplot(avg1[,3], names.arg = rownames(avg1), las=1, cex.names= 1.5, xlim = c(0,2.7),
font.lab=1, font.main = 1, font.axis = 1, horiz = T, space = 1,
col= c("royalblue3") , border="white", main = "Avg. Growth", cex.main=2)
text(y = avg_growth, avg1[,3] + .13, paste(avg1[,3]), cex=1.5)
par(op) ## reset
dev.off()
# pct. w/ positive growth
pos <- c(rep(0,6))
for (i in 1:9) {
pos[i] <- round(1 - (sum(diff1[i,1:5]) / sum(diff1[i,])), digits = 2)*100
}
png("pos_growth.png", width = 700)
op <- par(mar = c(10,4,2,1) + 1)
pos_growth <- barplot(pos, names.arg = rownames(avg1), las=2, cex.names= 1.4, ylim = c(0,115),
font.lab=1, font.main = 1, font.axis = 1, space = 1,
col= c("royalblue3") , border="white", main = "% with Positive Growth", cex.main=2)
text(pos_growth, pos + 8, paste(pos,"%",sep=""), cex=1.9)
par(op) ## reset
dev.off()
# pct. w/ positive growth adjusted
v.h <- c(rep(0,18))
for (i in seq(1,17, by = 2)) {
v.h[i] <- round(BA1[i,5] / sum(BA1[i,]), digits = 2)*100
}
v.h <- v.h[-seq(2,18, by = 2)]
adj.tot <- c(rep(0,9))
for (i in 1:9) {
adj.tot[i] <- pos[i] + v.h[i]
}
adj <- rbind(pos, v.h)
colnames(adj) <- c("Culture","Aid","Poverty","Operate","Critical Thinking","Career","Perform","Team Connection","Local Connection")
png("adj_growth.png", width = 700)
op <- par(mar = c(10,4,2,1) + 1)
pos_growth <- barplot(adj, names.arg = colnames(adj), las=2, cex.names= 1.4, ylim = c(0,115),
font.lab=1, font.main = 1, font.axis = 1, space = 1,
col= c("royalblue3", "firebrick2") , border="white", main = "Adj. % with Positive Growth", cex.main=2)
text(pos_growth, adj.tot + 8, paste(adj.tot,"%",sep=""), cex=1.9)
par(op) ## reset
dev.off()
# ---------------------------------------------------
# BA Charts
BA.questions <- c(1:18)
diff.questions <- c(1:9)
# convert to percentages
for (i in BA.questions) {
BA[i,] <- (BA[i,]/sum(BA[i,]))*100
}
colnames(BA) <- c("Very Low","Low","Medium","High","Very High")
rownames(BA) <- rep(c("Before","After"),length(diff.questions))
d <- matrix(0,length(diff.questions),6)
for (i in diff.questions) {
d[i,] <- (diff[i,4:9]/sum(diff[i,4:9]))*100
}
colnames(d) <- c("-1 Unit","0 Units","1 Unit","2 Units","3 Units","4 Units")
# 1
png("b1.png", width = 800)
op <- par(mar = c(4,4,4,1) + 1)
b1 <- barplot(BA[1:2,], col= c("royalblue3","firebrick2"), border="white",
main = "History & Culture", cex.main = 2,
font.main = 1, font.axis=1, beside=T,
ylim = c(0,80), font.lab=1, cex.axis = 1.5, cex.names = 1.5)
legend("top", legend = rownames(BA[1:2,]),
fill = c("royalblue3","firebrick2"),
horiz = T, cex = 1.5)
par(op) ## reset
dev.off()
png("c1.png", width = 800)
op <- par(mar = c(4,4,4,2) + 1)
c1 <- barplot(d[1,], names.arg = colnames(d), las=1, col= c("royalblue3"), border="white",
main = "Change in History & Culture", cex.main = 2,
font.main = 1, font.axis=1, beside=T, space = 1,
ylim = c(0,80), font.lab=1, cex.axis = 1.5, cex.names = 1.5)
text(c1, d[1,] + 4, paste(round(d[1,],digits = 0),"%",sep="") ,cex=1.9)
par(op) ## reset
dev.off()
# 2
png("b2.png", width = 800)
op <- par(mar = c(4,4,4,1) + 1)
b2 <- barplot(BA[3:4,], col= c("royalblue3","firebrick2"), border="white",
main = "Effective Aid Practices", cex.main = 2,
font.main = 1, font.axis=1, beside=T,
ylim = c(0,80), font.lab=1, cex.axis = 1.5, cex.names = 1.5)
legend("top", legend = rownames(BA[3:4,]),
fill = c("royalblue3","firebrick2"),
horiz = T, cex = 1.5)
par(op) ## reset
dev.off()
png("c2.png", width = 800)
op <- par(mar = c(4,4,4,2) + 1)
c2 <- barplot(d[2,], names.arg = colnames(d), las=1, col= c("royalblue3"), border="white",
main = "Change in Effective Aid Practices", cex.main = 2,
font.main = 1, font.axis=1, beside=T, space = 1,
ylim = c(0,80), font.lab=1, cex.axis = 1.5, cex.names = 1.5)
text(c2, d[2,] + 4, paste(round(d[2,],digits = 0),"%",sep="") ,cex=1.9)
par(op) ## reset
dev.off()
# 3
png("b3.png", width = 800)
op <- par(mar = c(4,4,4,1) + 1)
b3 <- barplot(BA[5:6,], col= c("royalblue3","firebrick2"), border="white",
main = "Understanding of Poverty", cex.main = 2,
font.main = 1, font.axis=1, beside=T,
ylim = c(0,80), font.lab=1, cex.axis = 1.5, cex.names = 1.5)
legend("top", legend = rownames(BA[5:6,]),
fill = c("royalblue3","firebrick2"),
horiz = T, cex = 1.5)
par(op) ## reset
dev.off()
png("c3.png", width = 800)
op <- par(mar = c(4,4,4,2) + 1)
c3 <- barplot(d[3,], names.arg = colnames(d), las=1, col= c("royalblue3"), border="white",
main = "Change in Understanding of Poverty", cex.main = 2,
font.main = 1, font.axis=1, beside=T, space = 1,
ylim = c(0,80), font.lab=1, cex.axis = 1.5, cex.names = 1.5)
text(c3, d[3,] + 4, paste(round(d[3,],digits = 0),"%",sep="") ,cex=1.9)
par(op) ## reset
dev.off()
# 4
png("b4.png", width = 800)
op <- par(mar = c(4,4,4,1) + 1)
b4 <- barplot(BA[7:8,], col= c("royalblue3","firebrick2"), border="white",
main = "Ability to Operate", cex.main = 2,
font.main = 1, font.axis=1, beside=T,
ylim = c(0,80), font.lab=1, cex.axis = 1.5, cex.names = 1.5)
legend("top", legend = rownames(BA[7:8,]),
fill = c("royalblue3","firebrick2"),
horiz = T, cex = 1.5)
par(op) ## reset
dev.off()
png("c4.png", width = 800)
op <- par(mar = c(4,4,4,2) + 1)
c4 <- barplot(d[4,], names.arg = colnames(d), las=1, col= c("royalblue3"), border="white",
main = "Change in Ability to Operate", cex.main = 2,
font.main = 1, font.axis=1, beside=T, space = 1,
ylim = c(0,80), font.lab=1, cex.axis = 1.5, cex.names = 1.5)
text(c4, d[4,] + 4, paste(round(d[4,],digits = 0),"%",sep="") ,cex=1.9)
par(op) ## reset
dev.off()
# 5
png("b5.png", width = 800)
op <- par(mar = c(4,4,4,1) + 1)
b5 <- barplot(BA[9:10,], col= c("royalblue3","firebrick2"), border="white",
main = "Critical Thinking", cex.main = 2,
font.main = 1, font.axis=1, beside=T,
ylim = c(0,80), font.lab=1, cex.axis = 1.5, cex.names = 1.5)
legend("top", legend = rownames(BA[9:10,]),
fill = c("royalblue3","firebrick2"),
horiz = T, cex = 1.5)
par(op) ## reset
dev.off()
png("c5.png", width = 800)
op <- par(mar = c(4,4,4,2) + 1)
c5 <- barplot(d[5,], names.arg = colnames(d), las=1, col= c("royalblue3"), border="white",
main = "Change in Critical Thinking", cex.main = 2,
font.main = 1, font.axis=1, beside=T, space = 1,
ylim = c(0,80), font.lab=1, cex.axis = 1.5, cex.names = 1.5)
text(c5, d[5,] + 4, paste(round(d[5,],digits = 0),"%",sep="") ,cex=1.9)
par(op) ## reset
dev.off()
#6
png("b6.png", width = 800)
op <- par(mar = c(4,4,4,1) + 1)
b6 <- barplot(BA[11:12,], col= c("royalblue3","firebrick2"), border="white",
main = "Career Trajectory", cex.main = 2,
font.main = 1, font.axis=1, beside=T,
ylim = c(0,80), font.lab=1, cex.axis = 1.5, cex.names = 1.5)
legend("top", legend = rownames(BA[11:12,]),
fill = c("royalblue3","firebrick2"),
horiz = T, cex = 1.5)
par(op) ## reset
dev.off()
png("c6.png", width = 800)
op <- par(mar = c(4,4,4,2) + 1)
c6 <- barplot(d[6,], names.arg = colnames(d), las=1, col= c("royalblue3"), border="white",
main = "Change in Career Trajectory", cex.main = 2,
font.main = 1, font.axis=1, beside=T, space = 1,
ylim = c(0,80), font.lab=1, cex.axis = 1.5, cex.names = 1.5)
text(c6, d[6,] + 4, paste(round(d[6,],digits = 0),"%",sep="") ,cex=1.9)
par(op) ## reset
dev.off()
#7
png("b7.png", width = 800)
op <- par(mar = c(4,4,4,1) + 1)
b7 <- barplot(BA[13:14,], col= c("royalblue3","firebrick2"), border="white",
main = "Ability to Perform", cex.main = 2,
font.main = 1, font.axis=1, beside=T,
ylim = c(0,80), font.lab=1, cex.axis = 1.5, cex.names = 1.5)
legend("top", legend = rownames(BA[13:14,]),
fill = c("royalblue3","firebrick2"),
horiz = T, cex = 1.5)
par(op) ## reset
dev.off()
png("c7.png", width = 800)
op <- par(mar = c(4,4,4,2) + 1)
c7 <- barplot(d[7,], names.arg = colnames(d), las=1, col= c("royalblue3"), border="white",
main = "Change in Ability to Perform", cex.main = 2,
font.main = 1, font.axis=1, beside=T, space = 1,
ylim = c(0,80), font.lab=1, cex.axis = 1.5, cex.names = 1.5)
text(c7, d[7,] + 4, paste(round(d[7,],digits = 0),"%",sep="") ,cex=1.9)
par(op) ## reset
dev.off()
#8
png("b8.png", width = 800)
op <- par(mar = c(4,4,4,1) + 1)
b8 <- barplot(BA[15:16,], col= c("royalblue3","firebrick2"), border="white",
main = "Team Connection", cex.main = 2,
font.main = 1, font.axis=1, beside=T,
ylim = c(0,80), font.lab=1, cex.axis = 1.5, cex.names = 1.5)
legend("top", legend = rownames(BA[15:16,]),
fill = c("royalblue3","firebrick2"),
horiz = T, cex = 1.5)
par(op) ## reset
dev.off()
png("c8.png", width = 800)
op <- par(mar = c(4,4,4,2) + 1)
c8 <- barplot(d[8,], names.arg = colnames(d), las=1, col= c("royalblue3"), border="white",
main = "Change in Team Connection", cex.main = 2,
font.main = 1, font.axis=1, beside=T, space = 1,
ylim = c(0,80), font.lab=1, cex.axis = 1.5, cex.names = 1.5)
text(c8, d[8,] + 4, paste(round(d[8,],digits = 0),"%",sep="") ,cex=1.9)
par(op) ## reset
dev.off()
#9
png("b9.png", width = 800)
op <- par(mar = c(4,4,4,1) + 1)
b9 <- barplot(BA[17:18,], col= c("royalblue3","firebrick2"), border="white",
main = "Haitian Connection", cex.main = 2,
font.main = 1, font.axis=1, beside=T,
ylim = c(0,80), font.lab=1, cex.axis = 1.5, cex.names = 1.5)
legend("top", legend = rownames(BA[17:18,]),
fill = c("royalblue3","firebrick2"),
horiz = T, cex = 1.5)
par(op) ## reset
dev.off()
png("c9.png", width = 800)
op <- par(mar = c(4,4,4,2) + 1)
c9 <- barplot(d[9,], names.arg = colnames(d), las=1, col= c("royalblue3"), border="white",
main = "Change in Haitian Connection", cex.main = 2,
font.main = 1, font.axis=1, beside=T, space = 1,
ylim = c(0,80), font.lab=1, cex.axis = 1.5, cex.names = 1.5)
text(c9, d[9,] + 4, paste(round(d[9,],digits = 0),"%",sep="") ,cex=1.9)
par(op) ## reset
dev.off()
###################
## Eval Charts ##
###################
colnames(trip) <- c("Strongly Disagree","Disagree","Neutral","Agree","Strongly Agree")
# 1
png("e1.png", width = 800)
op <- par(mar = c(4,11,2,2) + 1)
e1 <- barplot((trip[1,]/sum(trip[1,]))*100, names.arg = colnames(trip), las=1, col= c("royalblue3"), border="white",
main = "", cex.main = 1.6, horiz = T,
font.main = 1, font.axis=1, space = 1,
xlim = c(0,100), font.lab=1, cex.axis = 1.5, cex.names = 1.5)
text(y = e1, (trip[1,]/sum(trip[1,]))*100 + 5, paste(round((trip[1,]/sum(trip[1,]))*100,digits = 0),"%",sep="") ,cex=1.9)
par(op) ## reset
dev.off()
# 2
png("e2.png", width = 800)
op <- par(mar = c(4,11,2,2) + 1)
e2 <- barplot((trip[2,]/sum(trip[2,]))*100, names.arg = colnames(trip), las=1, col= c("royalblue3"), border="white",
main = "", cex.main = 1.6, horiz = T,
font.main = 1, font.axis=1, space = 1,
xlim = c(0,100), font.lab=1, cex.axis = 1.5, cex.names = 1.5)
text(y = e2, (trip[2,]/sum(trip[2,]))*100 + 5, paste(round((trip[2,]/sum(trip[2,]))*100,digits = 0),"%",sep="") ,cex=1.9)
par(op) ## reset
dev.off()
# 3
png("e3.png", width = 800)
op <- par(mar = c(4,11,2,2) + 1)
e3 <- barplot((trip[3,]/sum(trip[3,]))*100, names.arg = colnames(trip), las=1, col= c("royalblue3"), border="white",
main = "", cex.main = 1.6, horiz = T,
font.main = 1, font.axis=1, space = 1,
xlim = c(0,100), font.lab=1, cex.axis = 1.5, cex.names = 1.5)
text(y = e3, (trip[3,]/sum(trip[3,]))*100 + 5, paste(round((trip[3,]/sum(trip[3,]))*100,digits = 0),"%",sep="") ,cex=1.9)
par(op) ## reset
dev.off()
# 4
png("e4.png", width = 800)
op <- par(mar = c(4,11,2,2) + 1)
e4 <- barplot((trip[4,]/sum(trip[4,]))*100, names.arg = colnames(trip), las=1, col= c("royalblue3"), border="white",
main = "", cex.main = 1.6, horiz = T,
font.main = 1, font.axis=1, space = 1,
xlim = c(0,100), font.lab=1, cex.axis = 1.5, cex.names = 1.5)
text(y = e4, (trip[4,]/sum(trip[4,]))*100 + 5, paste(round((trip[4,]/sum(trip[4,]))*100,digits = 0),"%",sep="") ,cex=1.9)
par(op) ## reset
dev.off()
# 5
png("e5.png", width = 800)
op <- par(mar = c(4,11,2,2) + 1)
e1 <- barplot((trip[5,]/sum(trip[5,]))*100, names.arg = colnames(trip), las=1, col= c("royalblue3"), border="white",
main = "", cex.main = 1.6, horiz = T,
font.main = 1, font.axis=1, space = 1,
xlim = c(0,100), font.lab=1, cex.axis = 1.5, cex.names = 1.5)
text(y = e1, (trip[5,]/sum(trip[5,]))*100 + 5, paste(round((trip[5,]/sum(trip[5,]))*100,digits = 0),"%",sep="") ,cex=1.9)
par(op) ## reset
dev.off()
# 6
png("e6.png", width = 800)
op <- par(mar = c(4,11,2,2) + 1)
e1 <- barplot((trip[6,]/sum(trip[6,]))*100, names.arg = colnames(trip), las=1, col= c("royalblue3"), border="white",
main = "", cex.main = 1.6, horiz = T,
font.main = 1, font.axis=1, space = 1,
xlim = c(0,100), font.lab=1, cex.axis = 1.5, cex.names = 1.5)
text(y = e1, (trip[6,]/sum(trip[6,]))*100 + 5, paste(round((trip[6,]/sum(trip[6,]))*100,digits = 0),"%",sep="") ,cex=1.9)
par(op) ## reset
dev.off()
# --------------
# Pre-loaded data
# obs data
n.obs <- matrix(0,9,3)
# BA
for (i in 1:9) {
n.obs[i,1] <- sum(diff[i,])
}
# eval
for (i in 1:6) {
n.obs[i,2] <- sum(eval[i,])
}
# dem
for (i in c(1:7)) {
n.obs[i,3] <- sum(dem[i,])
}
write.table(n.obs,"nobs.txt",sep=",",row.names=FALSE,col.names =FALSE)
# difference data
d <- round(d,digits = 0)
write.table(d, "diff.txt",sep=",",row.names=FALSE,col.names =FALSE)
# avg growth data
avg <- round(avg[,3],digits = 2)
write.table(avg,"avg.txt",sep=",",row.names=FALSE,col.names =FALSE)
# eval data
evalpct <- round(evalpct*100,digits = 0)
write.table(evalpct,"eval.txt",sep=",",row.names=FALSE,col.names =FALSE)
# dem data
for (i in 1:7) {
dem[i,] <- round((dem[i,]/sum(dem[i,]))*100,digits = 0)
}
write.table(dem,"dem.txt",sep=",",row.names=FALSE,col.names =FALSE)
|
c0f48e23de292b1ec878d83660c300ec0644aeea
|
ed1920915c1f7070c7cec39de8ca82672be18cc5
|
/source/CRTpredict/dclean.R
|
53ae7ec6979ea46ec8456ca90c401a08f3fe8155
|
[] |
no_license
|
sthallor/miscRscripts
|
d28f7a9cdbc53fc7c7994c6000d1753b3679236d
|
c3a5a206c35cdbbb15f07a4ea9250ff861b2e7f1
|
refs/heads/master
| 2022-11-06T03:39:03.951539
| 2020-06-21T23:21:47
| 2020-06-21T23:21:47
| 273,998,540
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 16,521
|
r
|
dclean.R
|
#######################################################################################
# dclean.R - clean drill predictor data according to parameters
# Ensign Energy Services Inc. retains all rights to this software
# FHS, Nov 28, 2017
#######################################################################################
dclean <- function(dt,prm.dc) {
epsilon <- 1e-5
# Check that date time column is present
if (is.null(dt[[prm.dc$timeColName]])) {
stop(sprintf('FATAL ERROR - dclean - MUST HAVE DATE TIME COLUMN "%s"',prm.dc$timeColName))
}
# initialize numeric value conversion count table
convertCount <- data.frame(name=c('Other_Continuous','Other_Discrete',prm.dc$valueLimits$name,prm.dc$discrete$name))
convertCount$startingNA <- 0
convertCount$minNA <- 0
convertCount$floor <- 0
convertCount$ceiling <- 0
convertCount$maxNA <- 0
convertCount$totalIn <- 0
convertCount$interpolated <- 0
convertCount$extrapolated <- 0
convertCount$endingNA <- 0
convertCount$totalOut <- 0
# bring column names to standard names based on lookup table prm.dc$namefixp
for (i in 1:nrow(prm.dc$namefixp)) {
if (!is.null(dt[[prm.dc$namefixp$nonStandardName[i]]])) { # Non-standard name i found
j <- which(names(dt)==prm.dc$namefixp$nonStandardName[i])
# Convert any NA number codes to NAs prior to possible linear transformations
dt[dt[!(is.na(dt[,j])),j]==prm.dc$globalNumberCodeNA,j] <- NA
if (is.null(dt[[prm.dc$namefixp$standardName[i]]])) {
if (prm.dc$verbose) cat('\nChanging predictor name=',names(dt)[j],' to ',prm.dc$namefixp$standardName[i])
names(dt)[j] <- prm.dc$namefixp$standardName[i]
} else if (prm.dc$namefixp$overwrite[i]==TRUE) {
if (prm.dc$verbose) cat('\nOverwriting existing predictor ',prm.dc$namefixp$standardName[i],
' with ',prm.dc$namefixp$nonStandardName[i])
dt[[prm.dc$namefixp$standardName[i]]] <- NULL
names(dt)[j] <- prm.dc$namefixp$standardName[i]
} else {
if (prm.dc$verbose) cat('\nStandard predictor ',prm.dc$namefixp$standardName[i],
' already exists, NOT overwriting with ', prm.dc$namefixp$nonStandardName[i])
}
}
}
if (prm.dc$verbose) cat('\n\n')
# Perform any unit conversions here if option chosen and historian reshape option not chosen
if (prm.dc$convertUOM==TRUE & !prm.dc$reshapeHistorianData) {
prm.dc$standardUOM$changeFlag <- FALSE
for (i in 1:nrow(prm.dc$standardUOM)) {
if (!is.null(dt[[prm.dc$standardUOM$dColName[i]]]) &
!is.null(dt[[prm.dc$standardUOM$uomColName[i]]])) {
uomConvertCount <- length(which(dt[[prm.dc$standardUOM$uomColName[i]]]==prm.dc$standardUOM$inputUnit[i]))
if (uomConvertCount>0) {
# We have at least some values to convert from inputUnit to outputUnit
# Any NAs in uomColName are set to inputUnit
dt[[prm.dc$standardUOM$uomColName[i]]][is.na(dt[[prm.dc$standardUOM$uomColName[i]]])] <-
prm.dc$standardUOM$inputUnit[i]
uomConvertCount <- length(which(dt[[prm.dc$standardUOM$uomColName[i]]]==prm.dc$standardUOM$inputUnit[i]))
# makes temporary copy of conversion formula and checks for inconsistencies'
formula <- prm.dc$standardUOM$formula[i]
if (substr(formula,1,nchar(prm.dc$standardUOM$outputUnit[i])) != prm.dc$standardUOM$outputUnit[i] |
is.na(grep(prm.dc$standardUOM$inputUnit[i],formula)[1])) {
if (prm.dc$verbose) cat('\n\ndclean WARNING unit conversion formula ',prm.dc$standardUOM$formula[i],
' inconsistent with inputUnit=',prm.dc$standardUOM$inputUnit[i],
' and outputUnit=',prm.dc$standardUOM$outputUnit[i])
} else {
# Some units in dColName are being converted, sets flag so unit indicator uomColName is also updated
prm.dc$standardUOM$changeFlag[i] <- TRUE
# fixes formula with 'output' as a function of 'input'
formula <- paste0(gsub(prm.dc$standardUOM$outputUnit[i],'output',substr(formula,1,nchar(prm.dc$standardUOM$outputUnit[i]))),
substr(formula,(nchar(prm.dc$standardUOM$outputUnit[i])+1),nchar(formula)))
formula <- gsub(prm.dc$standardUOM$inputUnit[i],'input',formula)
# copies input
input <- dt[[prm.dc$standardUOM$dColName[i]]][dt[[prm.dc$standardUOM$uomColName[i]]]==prm.dc$standardUOM$inputUnit[i]]
# calculates output from input+formula
eval(parse(text=formula))
# replaces input uoms with output uoms
dt[[prm.dc$standardUOM$dColName[i]]][dt[[prm.dc$standardUOM$uomColName[i]]]==prm.dc$standardUOM$inputUnit[i]] <- output
if (prm.dc$verbose) cat('\nConverted ',uomConvertCount,prm.dc$standardUOM$uomColName[i],' values in ',
prm.dc$standardUOM$dColName[i],' from ',
prm.dc$standardUOM$inputUnit[i],' to ',prm.dc$standardUOM$outputUnit[i],
' with ',prm.dc$standardUOM$formula[i])
}
}
} else {
if (is.null(dt[[prm.dc$standardUOM$uomColName[i]]]) & prm.dc$verbose) cat('\ndclean WARNING ... UOM column ',
prm.dc$standardUOM$uomColName[i],' not found for testing data column ',
prm.dc$standardUOM$dColName[i],' for possible unit conversion of ',prm.dc$standardUOM$inputUnit[i],
' to ',prm.dc$standardUOM$outputUnit[i])
if (is.null(dt[[prm.dc$standardUOM$dColName[i]]]) & prm.dc$verbose) cat('\ndclean WARNING ... data column ',
prm.dc$standardUOM$dColName[i],' not found for testing with UOM column ',
prm.dc$standardUOM$uomColName[i],' for possible unit conversion of ',prm.dc$standardUOM$inputUnit[i],
' to ',prm.dc$standardUOM$outputUnit[i])
}
}
# For dColName columns with unit conversions, update the uomColName unit values accordingly
for (i in 1:nrow(prm.dc$standardUOM)) {
if (prm.dc$standardUOM$changeFlag[i]==TRUE) {
dt[[prm.dc$standardUOM$uomColName[i]]] <- as.character(dt[[prm.dc$standardUOM$uomColName[i]]])
dt[[prm.dc$standardUOM$uomColName[i]]][dt[[prm.dc$standardUOM$uomColName[i]]]==prm.dc$standardUOM$inputUnit[i]] <-
prm.dc$standardUOM$outputUnit[i]
}
}
}
if (prm.dc$verbose) cat('\n\n')
# Clean up potential stray character between date and time
# Convert to UTC time if option selected
if (prm.dc$timeAvg==TRUE) {
dt[[prm.dc$timeColName]] <- paste0(substr(dt[[prm.dc$timeColName]],1,10),' ',substr(dt[[prm.dc$timeColName]],12,19))
} else {
# with no time averaging, truncates last digit to round to nearest 10 seconds
dt[[prm.dc$timeColName]] <- paste0(substr(dt[[prm.dc$timeColName]],1,10),' ',substr(dt[[prm.dc$timeColName]],12,18),'0')
}
if(prm.dc$UTCOffset==TRUE & sum(colnames(dt) %in% "UTC_Offset")>0) {
if (prm.dc$verbose) cat('\nUsing UTC_Offset to convert EDR_DateTime to UTC times.\n')
dt$UTC_Offset <- as.character(dt$UTC_Offset)
dt$time <- as.POSIXlt((as.numeric(dt$time)+
as.integer(substr(dt$UTC_Offset,2,3))*3600+
as.integer(substr(dt$UTC_Offset,5,6))*60),
origin="1970-01-01 00:00:00")
} else {
dt$time <- as.POSIXlt(dt[[prm.dc$timeColName]],"%Y-%m-%d %H:%M:%S",tz="")
}
if (sum(is.na(dt$time))>0) {
if (prm.dc$verbose) cat('\n\ndclean WARNING ... deleting ',sum(is.na(dt$time)),' observations with invalid time stamp.')
if (prm.dc$verbose) cat('\nFirst 3 time values = ',head(dt[[prm.dc$timeColName]][is.na(dt$time)],n=3))
if (prm.dc$verbose) cat('\nThis could be caused by daylight savings time ...\n')
dt <- dt[!is.na(dt$time),]
}
# If save all option not used, reduces columns to only those explicitly selected
if (!prm.dc$saveAll) {
dt <- dt[,colnames(dt) %in% c(prm.dc$timeColName,'time',prm.dc$valueLimits$name,prm.dc$discrete$name)]
if (prm.dc$verbose) cat('\nSave all option not used, only selected columns will be cleaned and returned : \n')
savedColumns <- data.frame(colname=colnames(dt))
savedColumns$class <- 'Blank'
for (i in 1:length(colnames(dt))) { savedColumns$class[i] <- class(dt[[colnames(dt)[i]]])[1] }
print(savedColumns)
}
# Loop through the columns, set NAs, floor & ceiling, track statistics
for (c in 1:ncol(dt)) {
if (class(dt[,c])[1]=='numeric' | class(dt[,c])[1]=='integer') {
if (colnames(dt)[c] %in% prm.dc$valueLimits$name) {
vli <- which(prm.dc$valueLimits$name %in% colnames(dt)[c]) # valueLimits index
cci <- which(convertCount$name %in% colnames(dt)[c]) # convertCount index
# Adjusts total incoming observations count
convertCount$totalIn[cci] <- convertCount$totalIn[cci] + nrow(dt)
# Sets values equal to globalNumberCodeNA to NA
dt[which(dt[,c]==prm.dc$globalNumberCodeNA),c] <- NA
# Starting NA count
convertCount$startingNA[cci] <- sum(is.na(dt[,c]))
# Sets values below minNA to NA
convertCount$minNA[cci] <- convertCount$minNA[cci] + sum(dt[,c]<prm.dc$valueLimits$minNA[vli],na.rm=T)
dt[which(dt[,c]<prm.dc$valueLimits$minNA[vli]),c] <- NA
# Sets values above maxNA to NA
convertCount$maxNA[cci] <- convertCount$maxNA[cci] + sum(dt[,c]>prm.dc$valueLimits$maxNA[vli],na.rm=T)
dt[which(dt[,c]>prm.dc$valueLimits$maxNA[vli]),c] <- NA
# Sets values below floor to floor
convertCount$floor[cci] <- convertCount$floor[cci] + sum(dt[,c]<prm.dc$valueLimits$floor[vli],na.rm=T)
dt[which(dt[,c]<prm.dc$valueLimits$floor[vli]),c] <- prm.dc$valueLimits$floor[vli]
# Sets values above ceiling to ceiling
convertCount$ceiling[cci] <- convertCount$ceiling[cci] + sum(dt[,c]>prm.dc$valueLimits$ceiling[vli],na.rm=T)
dt[which(dt[,c]>prm.dc$valueLimits$ceiling[vli]),c] <- prm.dc$valueLimits$ceiling[vli]
# Trap for all NAs and numeric value assigned for NAs, FHS Nov 6, 2016
if (sum(!is.na(dt[,c]))==0) {
if(suppressWarnings(!is.na(as.numeric(prm.dc$valueLimits$NANum[vli])))) {
dt[,c] <- as.numeric(prm.dc$valueLimits$NANum[vli])
}
}
} else { # Numeric but not one of the continuous selected columns, apply global parameters
if (colnames(dt)[c] %in% prm.dc$discrete$name) {
cci <- which(convertCount$name %in% colnames(dt)[c]) # convertCount index
} else { # not a selected column
# Test for NA limits whether continuous or discrete
cci <- if (length(unique(dt[,c]))>=prm.dc$contMinUnique & sum(!is.na(dt[,c]))/nrow(dt)>prm.dc$contMinDensity) 1 else 2
}
# Adjusts total observations count
convertCount$totalIn[cci] <- convertCount$totalIn[cci] + nrow(dt)
# Sets values equal to globalNumberCodeNA to NA
dt[which(dt[,c]==prm.dc$globalNumberCodeNA),c] <- NA
# Starting NA count
convertCount$startingNA[cci] <- convertCount$startingNA[cci] + sum(is.na(dt[,c]))
# Sets values below globalMinNA to NA
convertCount$minNA[cci] <- convertCount$minNA[cci] + sum(dt[,c]<prm.dc$globalMinNA,na.rm=T)
dt[which(dt[,c]<prm.dc$globalMinNA),c] <- NA
# Sets values above globalMaxNA to NA
convertCount$maxNA[cci] <- convertCount$maxNA[cci] + sum(dt[,c]>prm.dc$globalMaxNA,na.rm=T)
dt[which(dt[,c]>prm.dc$globalMaxNA),c] <- NA
}
}
}
# If option selected, Group to time intervals, average/sample values
if (prm.dc$timeAvg=='TRUE') {
library(lubridate)
library(data.table)
dt$timeSec <- as.factor(as.character(as.POSIXlt(
# Removed force_tz because of error in R3.4.2 FHS, Nov 27, 2017
# Seems to work fine in R3.2.2
# round((as.numeric(force_tz(dt$time,tzone=""))-prm.dc$timeOffset+epsilon)/prm.dc$timeInterval)*prm.dc$timeInterval+prm.dc$timeOffset,
# origin="1970-01-01 00:00:00")))
round((as.numeric(dt$time)-prm.dc$timeOffset+epsilon)/prm.dc$timeInterval)*prm.dc$timeInterval+prm.dc$timeOffset,
origin="1970-01-01 00:00:00")))
dt1 <- data.frame(time=unique(dt$timeSec)) # unique time records rounded to nearest time interval
for (c in 1:ncol(dt)) {
if (sum(!is.na(dt[,c]))>0) { # Must have at least one value to aggregate
if (prm.dc$verbose) cat('\nAggregating ',colnames(dt)[c],class(dt[,c]))
if (class(dt[,c])[1]=='numeric' | class(dt[,c])[1]=='integer') {
if (length(table(dt[,c]))>=prm.dc$contMinUnique) {
# Takes average for continuous data
# dataSec <- aggregate(.~timeSec, data=dt[,c(c,ncol(dt))], FUN=function(x){mean(x,na.rm=T)})
dtable <- data.table(dt[!is.na(dt[,c]),c(c,ncol(dt))])
colnames(dtable) <- c('v1','timeSec')
dataSec <- dtable[,list(v1=mean(v1)),by='timeSec']
if (prm.dc$verbose) cat(' continuous average ')
} else {
# Takes the first non NA value for discrete data
# dataSec <- aggregate(.~timeSec, data=dt[,c(c,ncol(dt))], FUN=function(x){as.numeric(names(table(x))[1])})
dtable <- data.table(dt[!is.na(dt[,c]),c(c,ncol(dt))])
colnames(dtable) <- c('v1','timeSec')
dataSec <- dtable[,list(v1=head(v1,n=1)),by='timeSec']
if (prm.dc$verbose) cat(' discrete numeric first values ')
}
if (prm.dc$verbose) cat(' with ',nrow(dataSec),' values.')
colnames(dataSec) <- c("time",names(dt)[c])
dataSec <- dataSec[!is.na(dataSec$time),]
dt1 <- merge(dt1,dataSec,by='time',all.x=T)
} else { # Non-numeric columns, such as driller remarks
if (!(colnames(dt)[c] %in% c(prm.dc$timeColName,'time','timeSec'))) {
# dt[,c] <- as.character(dt[,c])
# dataSec <- aggregate(.~timeSec, data=dt[,c(c,ncol(dt))], FUN=function(x){names(sort(table(x),decreasing=TRUE)[1])})
dtable <- data.table(dt[!is.na(dt[,c]),c(c,ncol(dt))])
colnames(dtable) <- c('v1','timeSec')
dataSec <- dtable[,list(v1=head(v1,n=1)),by='timeSec']
if (prm.dc$verbose) cat(' non-numeric first values with ',nrow(dataSec),' values.')
colnames(dataSec) <- c("time",names(dt)[c])
dataSec <- dataSec[!is.na(dataSec$time),]
dt1 <- merge(dt1,dataSec,by='time',all.x=T)
} else {
if (prm.dc$verbose) cat(' skipping...')
}
}
}
}
dt1$time <- as.POSIXlt(dt1$time)
if (prm.dc$verbose) cat('\n\nAggregating to ',prm.dc$timeInterval,
' second time intervals has reduced row count from ',nrow(dt),' to ',nrow(dt1))
# Insert any missing time intervals into dt1
dt1$timespanAfter <- 0
dt1$timespanAfter[1:(nrow(dt1)-1)] <- as.numeric(difftime(dt1$time[2:nrow(dt1)],dt1$time[1:(nrow(dt1)-1)],units='secs'))
newtimes <- NULL
for (i in which(dt1$timespanAfter > (prm.dc$timeInterval+1) & dt1$timespanAfter < prm.dc$insertMaxTimespan)) {
# Use force_tz() to avoid problems when converting back to POSIXct
# Removed force_tz because of error in R3.4.2 FHS, Nov 28, 2017
# Seems to work fine in R3.2.2
# timeValue <- as.numeric(force_tz(as.POSIXlt(dt1$time[i]),tzone=""))
timeValue <- as.numeric(as.POSIXlt(dt1$time[i]))
for (j in seq(timeValue+prm.dc$timeInterval,timeValue+dt1$timespanAfter[i]-1,by=prm.dc$timeInterval)) {
newtimes <- append(newtimes, as.POSIXct(j,origin="1970-01-01 00:00:00"))
}
}
newtimes <- data.frame(time=newtimes)
if (nrow(newtimes)>0) {
if (prm.dc$verbose) cat('\n\nInserting ',nrow(newtimes),' blank observations with ',
prm.dc$timeInterval, ' second time intervals to fill in gaps.')
dt1 <- merge(dt1,newtimes,by='time',all.x=T,all.y=T)
}
dt1$timespanAfter <- NULL
} else {
dt1 <- dt
}
# Loop through the columns, impute NAs (interpolate/extrapolate), track statistics
for (c in 1:ncol(dt1)) {
if (class(dt1[,c])[1]=='numeric' | class(dt1[,c])[1]=='integer') {
# Attempt to impute any missing values
res <- impute(dt1[,c],colnames(dt1)[c],prm.dc,convertCount)
dt1[,c] <- res$dt.col
convertCount <- res$convertCount
} else {
dt1[,c] <- as.character(dt1[,c])
}
}
if (prm.dc$verbose) cat('\n\n')
if (prm.dc$verbose) print(convertCount)
return(dt1)
}
|
c84637d9f60d114d70309b3b579e66e69371b381
|
4ab21a0fc1a84067e0a1b83c2932087a78d9f1d0
|
/submissions/01_cj-data-transformation-kz2na.R
|
0ef0e476ea6aacd5ec88a5bfcfc41afb1063dbe5
|
[] |
no_license
|
GCOM7140/completejourney-exercises
|
af7a58d0d0afb980e8fcf7fc7c442dd06b746a02
|
e6685585d55646a74ef0e3493aade5eb0c2690d0
|
refs/heads/master
| 2021-03-19T13:06:04.568094
| 2019-07-30T00:11:13
| 2019-07-30T00:11:13
| 121,028,827
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,919
|
r
|
01_cj-data-transformation-kz2na.R
|
install.packages("tidyverse")
install.packages("devtools")
devtools::install_github("bradleyboehmke/completejourney")
library(tidyverse)
library(completejourney)
library(dplyr)
---
## Question 1:## Change the discount variables (i.e., retail_disc, coupon_disc, coupon_match_disc) from negative to positive.
# Notes:
# mutate() adds new variables and preserves existing one.(Adding new column to the exsiting data frame)/ Calculating returns to the same variables if not assign to a new variable
# %>%: pipe operator means calculate from left to right as a chain function. For example: x %>% f %>% g %>% h can be rewritten as h(g(f(x)))
transactions <- transactions %>%
mutate(
retail_disc = abs(retail_disc),
coupon_disc = abs(coupon_disc),
coupon_match_disc = abs(coupon_match_disc)
)
transactions
---
## Question 2: ## Create three new variables named regular_price, loyalty_price, and coupon_price according to the following logic:
transactions <- transactions %>%
mutate (regular_price = (sales_value + retail_disc + coupon_match_disc) / quantity,
loyalty_price = (sales_value + coupon_match_disc) / quantity,
coupon_price = (sales_value - coupon_disc) / quantity )
transactions
---
## Question 3: ## The transactions dataset includes 68,509 unique product IDs. How many of these products (not transactions!) had a regular price of one dollar or less? What does this count equal for loyalty price and coupon price?
# In mathematics: n_distinct(select(filter(transactions, regular_price =< 1), product_id ))
transactions %>% filter(regular_price <= 1) %>% select(product_id) %>% n_distinct()
transactions %>% filter(loyalty_price <= 1) %>% select(product_id) %>% n_distinct()
transactions %>% filter(coupon_price <= 1) %>% select(product_id) %>% n_distinct()
---
## Question 4 ##: What proportion of baskets are over $10 in sales value?
## Hint: ## You need to use [`group_by()`][summarize], [`summarize()`][summarize],
##and [`ungroup()`][ungroup]. As a last step, you can calculate the proportion by
##taking the mean of `TRUE/FALSE` values, using `mean(basket_value > 10)` to get
##the proportion over $10.
# In mathematics: summarize(ungroup(summarize(group by())))
transactions %>% group_by(basket_id) %>% summarize(sales_value > 10) %>%
---
## Question 5 ##: Which store with over $10K in total `sales_value` discounts its products the most for loyal customers?
transactions %>%
filter(
is.finite(regular_price),
is.finite(loyalty_price),
regular_price > 0
) %>%
mutate(
pct_loyalty_disc = 1 - (loyalty_price / regular_price)
) %>%
group_by(store_id) %>%
summarize(
total_sales_value = sum(sales_value),
avg_pct_loyalty_disc = mean(pct_loyalty_disc)
) %>%
filter(total_sales_value > 10000) %>%
arrange(desc(avg_pct_loyalty_disc))
|
5dc2681e37b6ef2ab0c9be9ed80fc9c4dd99f1e1
|
96bb1dca9df61f0cddd526921f071e403169b53c
|
/man/addgraph.Rd
|
59f8938e844d4ed0a88dff005dc3edd6ddb88ec5
|
[] |
no_license
|
byadu/libcubolap
|
c3f8d582ca2ee1d52147022c8e2c5c640b1beafc
|
aab240405b1868dfdbf23c4f618b7fcd8e84db9f
|
refs/heads/master
| 2022-11-19T16:07:50.921510
| 2020-07-17T13:09:06
| 2020-07-17T13:09:06
| 278,988,102
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 592
|
rd
|
addgraph.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/olapwrt.R
\name{addgraph}
\alias{addgraph}
\title{addgraph}
\usage{
addgraph(repname, foldid, g, currfilt, M)
}
\arguments{
\item{repname}{is name of report being saved}
\item{foldid}{is the folder id under which this report is being saved}
\item{g}{is the graph being saved}
\item{currfilt}{are the filters in use}
\item{M}{is the metadata/config database connection}
}
\description{
addgraph saves a new graph into the databsae
}
\details{
add graph_series
add graph_def
add folder_tree
add graph_props
}
|
c63386218d97746dc42d9f1161c77469746e3e3b
|
cb84ffe686ad920e254a24b2bc381d758c2fb406
|
/man/Constitutional.Rd
|
38b32b08862b72e583062331e82dbc22a74496f5
|
[] |
no_license
|
jedison-github/BioMedR-generating-various-molecular-representations-for-chemicals-proteins-DNAs-RNAs-and-their-in
|
893f9acb6e140b2a90da0e5e90c5289241ecb374
|
07d67e01aae12137cc8e380e3521e6cb7b60127f
|
refs/heads/master
| 2020-04-11T07:59:02.395625
| 2018-04-16T15:02:17
| 2018-04-16T15:02:17
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,723
|
rd
|
Constitutional.Rd
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/404-extractDrugAminoAcidCount.R
\name{Constitutional}
\alias{Constitutional}
\alias{extrDrugAminoAcidCount}
\alias{extrDrugAromaticAtomsCount}
\alias{extrDrugAromaticBondsCount}
\alias{extrDrugAtomCount}
\alias{extrDrugBondCount}
\alias{extrDrugLargestChain}
\alias{extrDrugLargestPiSystem}
\alias{extrDrugLongestAliphaticChain}
\alias{extrDrugRotatableBondsCount}
\title{Calculates the Number of Amino Acids Descriptor}
\usage{
extrDrugAminoAcidCount(molecules, silent = TRUE)
extrDrugAromaticAtomsCount(molecules, silent = TRUE)
extrDrugAromaticBondsCount(molecules, silent = TRUE)
extrDrugAtomCount(molecules, silent = TRUE)
extrDrugBondCount(molecules, silent = TRUE)
extrDrugLargestChain(molecules, silent = TRUE)
extrDrugLargestPiSystem(molecules, silent = TRUE)
extrDrugLongestAliphaticChain(molecules, silent = TRUE)
extrDrugRotatableBondsCount(molecules, silent = TRUE)
}
\arguments{
\item{molecules}{Parsed molucule object.}
\item{silent}{Logical. Whether the calculating process
should be shown or not, default is \code{TRUE}.}
}
\value{
A data frame, each row represents one of the molecules,
each column represents one feature.
This function returns 20 columns named
\code{nA}, \code{nR}, \code{nN}, \code{nD}, \code{nC},
\code{nF}, \code{nQ}, \code{nE}, \code{nG}, \code{nH},
\code{nI}, \code{nP}, \code{nL} \code{nK}, \code{nM},
\code{nS}, \code{nT}, \code{nY} \code{nV}, \code{nW}.
A data frame, each row represents one of the molecules,
each column represents one feature.
This function returns one column named \code{naAromAtom}.
}
\description{
Calculates the Number of Amino Acids Descriptor
Calculates the Number of Aromatic Atoms Descriptor
Calculates the Number of Aromatic Bonds Descriptor
Calculates the Number of Atom Descriptor
Calculates the Descriptor Based on the Number of Bonds of a
Certain Bond Order
Descriptor that Calculates the Number of Atoms in the Largest Chain
Descriptor that Calculates the Number of Atoms in the Largest Pi Chain
Descriptor that Calculates the Number of Atoms in the Longest Aliphatic Chain
Descriptor that Calculates the Number of Nonrotatable Bonds on A Molecule
}
\details{
Calculates the number of each amino acids (total 20 types)
found in the molecues.
Calculates the number of aromatic atoms of a molecule.
Calculates the number of aromatic bonds of a molecule.
Calculates the number of atoms of a certain element type in a molecule.
By default it returns the count of all atoms.
Calculates the descriptor based on the number of bonds of a
certain bond order.
This descriptor calculates the number of atoms in the largest chain.
Note that a chain exists if there are two or more atoms.
Thus single atom molecules will return \code{0}.
This descriptor calculates the number of atoms in the largest pi chain.
This descriptor calculates the number of atoms in the longest aliphatic chain.
The number of rotatable bonds is given by the SMARTS specified by
Daylight on SMARTS tutorial
(\url{http://www.daylight.com/dayhtml_tutorials/languages/smarts/smarts_examples.html#EXMPL})
}
\examples{
# Calculates the Number of Amino Acids Descriptor
smi = system.file('vignettedata/test.smi', package = 'BioMedR')
mol = readMolFromSmi(smi, type = 'mol')
dat = extrDrugAminoAcidCount(mol)
head(dat)
# Calculates the Number of Aromatic Atoms Descriptor
smi = system.file('vignettedata/test.smi', package = 'BioMedR')
mol = readMolFromSmi(smi, type = 'mol')
dat = extrDrugAromaticAtomsCount(mol)
head(dat)
# Calculates the Number of Aromatic Bonds Descriptor
smi = system.file('vignettedata/test.smi', package = 'BioMedR')
mol = readMolFromSmi(smi, type = 'mol')
dat = extrDrugAromaticBondsCount(mol)
head(dat)
# Calculates the Number of Atom Descriptor
smi = system.file('vignettedata/test.smi', package = 'BioMedR')
mol = readMolFromSmi(smi, type = 'mol')
dat = extrDrugAtomCount(mol)
head(dat)
# Calculates the Descriptor Based on the Number of Bonds of a
# Certain Bond Order
smi = system.file('vignettedata/test.smi', package = 'BioMedR')
mol = readMolFromSmi(smi, type = 'mol')
dat = extrDrugBondCount(mol)
head(dat)
# Descriptor that Calculates the Number of Atoms in the Largest Chain
smi = system.file('vignettedata/test.smi', package = 'BioMedR')
mol = readMolFromSmi(smi, type = 'mol')
dat = extrDrugLargestChain(mol)
head(dat)
# Descriptor that Calculates the Number of Atoms in the Largest Pi Chain
smi = system.file('vignettedata/test.smi', package = 'BioMedR')
mol = readMolFromSmi(smi, type = 'mol')
dat = extrDrugLargestPiSystem(mol)
head(dat)
# Descriptor that Calculates the Number of Atoms in the Longest Aliphatic Chain
smi = system.file('vignettedata/test.smi', package = 'BioMedR')
mol = readMolFromSmi(smi, type = 'mol')
dat = extrDrugLongestAliphaticChain(mol)
head(dat)
# Descriptor that Calculates the Number of Nonrotatable Bonds on A Molecule
smi = system.file('vignettedata/test.smi', package = 'BioMedR')
mol = readMolFromSmi(smi, type = 'mol')
dat = extrDrugRotatableBondsCount(mol)
head(dat)
}
\author{
Min-feng Zhu <\email{wind2zhu@163.com}>,
Nan Xiao <\url{http://r2s.name}>
}
\keyword{Acid}
\keyword{Aliphatic}
\keyword{Amino}
\keyword{Aromatic}
\keyword{Atom}
\keyword{Atoms}
\keyword{Bond}
\keyword{Bonds}
\keyword{Chain}
\keyword{Count}
\keyword{Largest}
\keyword{Longest}
\keyword{Pi}
\keyword{Rotatable}
\keyword{extrDrugAminoAcidCount}
\keyword{extrDrugAromaticAtomsCount}
\keyword{extrDrugAromaticBondsCount}
\keyword{extrDrugAtomCount}
\keyword{extrDrugBondCount}
\keyword{extrDrugLargestChain}
\keyword{extrDrugLargestPiSystem}
\keyword{extrDrugLongestAliphaticChain}
\keyword{extrDrugRotatableBondsCount}
|
eeb929051200988553be9d444f028e6ebfd3036a
|
8c5062a3c839a744a0bb72be1bd81ccf04b6901e
|
/[GO enrichment]Colon_study_GOenrichment_wsw.R
|
6d1276cec86afac6606cf4cbba9e3478b798fd49
|
[] |
no_license
|
zhichai0120/RNAseqAnalysis
|
60e910040e9d5b755c030f6d00b426d95536d94f
|
3a81c8f1695730111bcf77dac1aeb079b6297d5e
|
refs/heads/master
| 2023-07-02T23:59:12.524753
| 2021-08-05T14:26:05
| 2021-08-05T14:26:05
| 277,416,327
| 0
| 0
| null | null | null | null |
GB18030
|
R
| false
| false
| 11,810
|
r
|
[GO enrichment]Colon_study_GOenrichment_wsw.R
|
#GO富集分析
source("http://Bioconductor.org/biocLite.R")
#进入bioconductor第三方网站
setwd("c:/R_work/res_R11/");
#设置工作目录
library(clusterProfiler)
#biocLite("org.Hs.eg.db")
#library(org.Hs.eg.db)
##bioconductor website, Genome wide annotation for Human, primarily based on mapping using Entrez Gene identifiers.
#biocLite("org.Mm.eg.db")
library(org.Mm.eg.db)
#Genome wide annotation for Mouse, primarily based on mapping using Entrez Gene identifiers.
allgene3<-read.csv("20181208allgene3.csv",header=T,row.names=1)
#读取所有基因检验表,赋值给allgene
gene_dif<-read.csv("20181208gene_dif.csv",header=T,row.names=1)
#读取差异基因检验表,赋值给gene_dif
gene_up<-read.csv("20181208gene_up.csv",header=T,row.names=1)
#读取上调基因检验表,赋值给gene_up
gene_down<-read.csv("20181208gene_down.csv",header=T,row.names=1)
#读取下调基因检验表,赋值给gene_down
EGU<-allgene3$entrez
#选取所有基因的EntrezID,赋值给EGU
EG_dif<-gene_dif$entrez
#选取差异基因的EntrezID,赋值给EGS
EG_up<-gene_up$entrez
#选取上调基因gl<-allgene$logFC,赋值给EGS_up
EG_down<-gene_down$entrez
#选取下调基因的EntrezID,赋值给EGS_up
mode(EGU)
mode(EG_dif)
EGU<-as.character(EGU)
#将EGU转化为字符串类型
EG_dif<-as.character(EG_dif)
#将EGU转化为字符串类型
EG_up<-as.character(EG_up)
#将EGU转化为字符串类型
EG_down<-as.character(EG_down)
#将EGU转化为字符串类型
mode(EG_dif)
length(EGU)
length(EG_up)
length(EG_down)
length(EG_dif)
#给allgene3增加一列FoldChange,并给数据赋值为2^log2FoldChange
allgene3$FoldChange<-2^(allgene3$log2FoldChange)
genelist<-allgene3$log2FoldChange
#提取所有基因的Log2FoldChange,赋值给genelist
names(genelist)<-allgene3$entrez
#用EntrezID对变化倍率进行命名
genelist<-sort(genelist,decreasing=T)
#将数据从大到小进行排列
#=======================================up
up_ego_CC<-enrichGO(gene=EG_up,universe= EGU,OrgDb= org.Mm.eg.db,ont= "CC",pAdjustMethod = "BH",pvalueCutoff= 0.01,qvalueCutoff= 0.05,readable= TRUE)
#将差异上调基因在cc细胞组成上进行GO富集分析。
up_ego_MF<- enrichGO(gene=EG_up,universe= EGU,OrgDb= org.Mm.eg.db,ont= "MF",pAdjustMethod = "BH",pvalueCutoff= 0.01,qvalueCutoff= 0.05,readable= TRUE)
#将差异上调基因在MF分子功能上进行GO富集分析。
up_ego_BP<- enrichGO(gene=EG_up,universe= EGU,OrgDb= org.Mm.eg.db,ont= "BP",pAdjustMethod = "BH",pvalueCutoff= 0.01,qvalueCutoff= 0.05,readable= TRUE)
#将差异上调基因在BP生物过程上进行GO富集分析。
write.table(up_ego_CC,file="up_ego_CC.txt",sep="\t",quote=F,row.names=F)
#将差异基因在CC上的GO富集分析结果表进行存储。
write.table(up_ego_MF,file="up_ego_MF.txt",sep="\t",quote=F,row.names=F)
#将差异基因MF上的GO富集分析结果表进行存储。
write.table(up_ego_BP,file="up_ego_BP.txt",sep="\t",quote=F,row.names=F)
#将差异基因在BP上的GO富集分析结果表进行存储。
tiff(file="up_BP_barplot.tif",res=100,units='in',width=10,height=10)
#打开tiff绘图设备,命名图片。
barplot(up_ego_BP, showCategory=10)
#GO富集条形图
dev.off()
#关闭设备,保存图片。
tiff(file="up_BP_dotplot.tif",res=100,units='in',width=10,height=10)
#打开tiff绘图设备,命名图片。
dotplot(up_ego_BP,showCategory=10)
#GO富集散点图
dev.off()
#关闭设备,保存图片。
#https://guangchuangyu.github.io/2015/06/dotplot-for-enrichment-result/
dotplot(do, x="count", showCategory=20, colorBy="qvalue")
#We can set the x-axis to use gene count and dot color by one of ‘pvalue’, ‘p.adjust’ or ‘qvalue’.
#x- "GeneRatio"(default) or "Count"
#color- "p/adjust" or "pvalue" or "qvalue"
tiff(file="up_BP_enrichMap.tif",res=100,units='in',width=20,height=20)
#打开tiff绘图设备,命名图片。
enrichMap(up_ego_BP)
#富集的GO term之间关系图
dev.off()
#关闭设备,保存图片。
tiff(file="up_BP_cnetplot2.tif",res=150,units='in',width=20,height=15)
#打开tiff绘图设备,命名图片。
cnetplot(up_ego_BP,showCategory =5,categorySize="pvalue", foldChange=genelist,vertex.label.cex=1,vertext.label.font=20)
#GO与基因关系的网络图。
dev.off()
#关闭设备,保存图片。
## categorySize can be scaled by 'pvalue' or 'geneNum'
biocLite("topGO")
library(topGO)
tiff(file="dif_CC_topGO.tif",res=400,units='in',width=10,height=10)
#打开tiff绘图设备
plotGOgraph(dif_ego_CC,firstSigNodes =5)
#topGO有向无环图DAG。
dev.off()
#关闭设备,保存图片。
#============================down
down_ego_CC<-enrichGO(gene=EG_down,universe=EGU,OrgDb= org.Mm.eg.db,ont= "CC",pAdjustMethod = "BH",pvalueCutoff= 0.01,qvalueCutoff= 0.05,readable= TRUE)
#将差异下调基因在cc细胞组成上进行GO富集分析。
down_ego_MF<-enrichGO(gene=EG_down,universe=EGU,OrgDb= org.Mm.eg.db,ont= "MF",pAdjustMethod = "BH",pvalueCutoff= 0.01,qvalueCutoff= 0.05,readable= TRUE)
#将差异下调基因在MF分子功能上进行GO富集分析。
down_ego_BP<-enrichGO(gene=EG_down,universe=EGU,OrgDb= org.Mm.eg.db,ont= "BP",pAdjustMethod = "BH",pvalueCutoff= 0.01,qvalueCutoff= 0.05,readable= TRUE)
#将差异下调基因在BP生物过程上进行GO富集分析。
write.table(down_ego_CC,file="down_ego_CC.txt",sep="\t",quote=F,row.names=F)
#将差异基因在CC上的GO富集分析结果表进行存储。
write.table(down_ego_MF,file="down_ego_MF.txt",sep="\t",quote=F,row.names=F)
#将差异基因MF上的GO富集分析结果表进行存储。
write.table(down_ego_BP,file="down_ego_BP.txt",sep="\t",quote=F,row.names=F)
#将差异基因在BP上的GO富集分析结果表进行存储。
tiff(file="down_MF_barplot.tif",res=100,units='in',width=10,height=10)
#打开tiff绘图设备,命名图片。
barplot(down_ego_MF, showCategory=10)
#GO富集条形图
dev.off()
#关闭设备,保存图片。
tiff(file="down_MF_dotplot.tif",res=100,units='in',width=10,height=10)
#打开tiff绘图设备,命名图片。
dotplot(down_ego_MF,showCategory=10)
#GO富集散点图
dev.off()
#关闭设备,保存图片。
#https://guangchuangyu.github.io/2015/06/dotplot-for-enrichment-result/
dotplot(do, x="count", showCategory=20, colorBy="qvalue")
#We can set the x-axis to use gene count and dot color by one of ‘pvalue’, ‘p.adjust’ or ‘qvalue’.
#x- "GeneRatio"(default) or "Count"
#color- "p/adjust" or "pvalue" or "qvalue"
tiff(file="down_MF_enrichMap.tif",res=100,units='in',width=20,height=20)
#打开tiff绘图设备,命名图片。
enrichMap(down_ego_MF)
#富集的GO term之间关系图
dev.off()
#关闭设备,保存图片。
tiff(file="down_MF_cnetplot.tif",res=150,units='in',width=20,height=15)
#打开tiff绘图设备,命名图片。
cnetplot(down_ego_MF,showCategory =5,categorySize="pvalue", foldChange=genelist,vertex.label.cex=1)
#GO与基因关系的网络图。
dev.off()
#关闭设备,保存图片。
## categorySize can be scaled by 'pvalue' or 'geneNum'
biocLite("topGO")
library(topGO)
tiff(file="down_MF_topGO.tif",res=400,units='in',width=10,height=10)
#打开tiff绘图设备
plotGOgraph(down_ego_MF,firstSigNodes =5)
#topGO有向无环图DAG。
dev.off()
#关闭设备,保存图片。
#====================both up and down====
dif_ego_CC<-enrichGO(gene=EG_dif,universe= EGU,OrgDb= org.Mm.eg.db,ont= "CC",pAdjustMethod = "BH",pvalueCutoff= 0.05,qvalueCutoff= 0.05,readable= TRUE)
#将差异基因在cc细胞组成上进行GO富集分析。
dif_ego_MF<- enrichGO(gene=EG_dif,universe= EGU,OrgDb= org.Mm.eg.db,ont= "MF",pAdjustMethod = "BH",pvalueCutoff= 0.05,qvalueCutoff= 0.05,readable= TRUE)
#将差异基因在MF分子功能上进行GO富集分析。
dif_ego_BP<- enrichGO(gene=EG_dif,universe= EGU,OrgDb= org.Mm.eg.db,ont= "BP",pAdjustMethod = "BH",pvalueCutoff= 0.05,qvalueCutoff= 0.05,readable= TRUE)
#将差异基因在BP生物过程上进行GO富集分析。
write.table(dif_ego_CC,file="dif_ego_CC.txt",sep="\t",quote=F,row.names=F)
#将差异基因在CC上的GO富集分析结果表进行存储。
write.table(dif_ego_MF,file="dif_ego_MF.txt",sep="\t",quote=F,row.names=F)
#将差异基因MF上的GO富集分析结果表进行存储。
write.table(dif_ego_BP,file="dif_ego_BP.txt",sep="\t",quote=F,row.names=F)
#将差异基因在BP上的GO富集分析结果表进行存储。
tiff(file="dif_MF_barplot.tif",res=100,units='in',width=10,height=10)
#打开tiff绘图设备,命名图片。
barplot(dif_ego_MF, showCategory=10)
#font.size=26
#GO富集条形图
dev.off()
#关闭设备,保存图片。
tiff(file="dif_MF_dotplot.tif",res=100,units='in',width=10,height=10)
#打开tiff绘图设备,命名图片。
dotplot(dif_ego_MF,showCategory=10,font.size =32)
#GO富集散点图
dev.off()
#关闭设备,保存图片。
#https://guangchuangyu.github.io/2015/06/dotplot-for-enrichment-result/
dotplot(do, x="count", showCategory=20, colorBy="qvalue")
#We can set the x-axis to use gene count and dot color by one of ‘pvalue’, ‘p.adjust’ or ‘qvalue’.
#x- "GeneRatio"(default) or "Count"
#color- "p/adjust" or "pvalue" or "qvalue"
tiff(file="dif_MF_enrichMap.tif",res=100,units='in',width=20,height=20)
#打开tiff绘图设备,命名图片。
enrichMap(dif_ego_MF)
#富集的GO term之间关系图
dev.off()
#关闭设备,保存图片。
tiff(file="dif_MF_cnetplot2.tif",res=150,units='in',width=20,height=15)
#打开tiff绘图设备,命名图片。
cnetplot(dif_ego_MF,showCategory=25 ,categorySize="pvalue", foldChange=genelist,vertex.label.cex=1)
#GO与基因关系的网络图。
dev.off()
#关闭设备,保存图片。
## categorySize can be scaled by 'pvalue' or 'geneNum'
biocLite("topGO")
library(topGO)
tiff(file="dif_CC_topGO.tif",res=400,units='in',width=10,height=10)
#打开tiff绘图设备
plotGOgraph(dif_ego_CC,firstSigNodes =5)
#topGO有向无环图DAG。
dev.off()
#关闭设备,保存图片。
#=====dif, BP==
tiff(file="dif_BP_barplot.tif",res=100,units='in',width=10,height=10)
#打开tiff绘图设备,命名图片。
barplot(dif_ego_BP, showCategory=10)
#GO富集条形图
dev.off()
#关闭设备,保存图片。
tiff(file="dif_BP_dotplot.tif",res=100,units='in',width=10,height=10)
#打开tiff绘图设备,命名图片。
dotplot(dif_ego_BP,showCategory=10)
#GO富集散点图
dev.off()
#关闭设备,保存图片。
#https://guangchuangyu.github.io/2015/06/dotplot-for-enrichment-result/
dotplot(do, x="count", showCategory=20, colorBy="qvalue")
#We can set the x-axis to use gene count and dot color by one of ‘pvalue’, ‘p.adjust’ or ‘qvalue’.
#x- "GeneRatio"(default) or "Count"
#color- "p/adjust" or "pvalue" or "qvalue"
tiff(file="dif_BP_enrichMap.tif",res=100,units='in',width=20,height=20)
#打开tiff绘图设备,命名图片。
enrichMap(dif_ego_BP)
#富集的GO term之间关系图
dev.off()
#关闭设备,保存图片。
tiff(file="dif_BP_cnetplot2.tif",res=150,units='in',width=20,height=15)
#打开tiff绘图设备,命名图片。
cnetplot(dif_ego_BP,showCategory =15,categorySize="pvalue", foldChange=genelist,vertex.label.cex=1)
#GO与基因关系的网络图。
dev.off()
#关闭设备,保存图片。
## categorySize can be scaled by 'pvalue' or 'geneNum'
biocLite("topGO")
library(topGO)
tiff(file="dif_BP_topGO.tif",res=400,units='in',width=10,height=10)
#打开tiff绘图设备
plotGOgraph(dif_ego_BP,firstSigNodes =5)
#topGO有向无环图DAG。
dev.off()
#关闭设备,保存图片
|
b48994d495169d0455b146fa5e2dbd47ecccc23d
|
f1154af6cca60d224dd1c5701a587e4b5b0fbef6
|
/simple example.R
|
148bee789346cd68e6394fb8ae7ab4faf88fa28e
|
[] |
no_license
|
robertlynch66/Populism-study
|
da39f7d3c8ea18fdc3480213acd50f773c9af88d
|
deb8f542fd58c001816d64e3eb3ae5b2893042d7
|
refs/heads/master
| 2020-03-16T22:02:16.574909
| 2019-06-05T14:34:58
| 2019-06-05T14:34:58
| 133,024,115
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,944
|
r
|
simple example.R
|
# simple simulation example
install.packages('dplyr')
install.packages('rstanarm')
library(dplyr)
library(rstanarm)
# path to the folder with the R data files
# read in the data
# PUT in CORRECTpath in place of '...' below to read in the sample data frame 'sample' which should give you 2 of the smallest counties - (loving, texas and clark, idaho and greenlee, arizona)
p<- readRDS(".../sample.rds")
#make dependent variable integers
p$trump_votes<- as.integer(p$trump_16)
p$clinton_votes <- as.integer(p$clinton_16)
# run in rstanarm
data_list <- data.frame(
trump_votes= p$trump_votes,
clinton_votes = p$clinton_votes,
sk_change = p$sk2014_over_2005,
white_16_to_10 = p$white_16_to_10,
pop_change = p$pop_change_16_to_10,
median_hh_income_change = p$median_hh_income_16_to_10,
perc_bachelors_change = p$bachelors_16_to_10,
male_unemplmt_change = p$male_unemplmt_16_to_10,
female_unemplmt_change = p$female_unemplmt_16_to_10,
for_born_change= p$for_born_16_to_10,
alcohol_change = p$alcohol_16_to_8,
drugs_change = p$drugs_16_to_8,
suicides_change = p$suicides_16_to_8,
state_id = p$state)
# trump clinton model
#
model <- stan_glmer(formula = cbind(trump_votes, clinton_votes) ~
sk_change +
pop_change +
white_16_to_10 +
median_hh_income_change +
perc_bachelors_change +
male_unemplmt_change +
female_unemplmt_change +
for_born_change +
alcohol_change +
drugs_change +
suicides_change +
(1 | state_id),
family = binomial, data = data_list,chains = 1, iter = 2000, warmup = 500,control=list(max_treedepth=20))
# get parameter estimates
summary(model)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.