blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
โ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
โ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
โ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
ee59901c97ceb1267aad2db3642d2fe92abd2c75
|
cdbc1057868bef1b44b28e9a30d0fcbf86d98bbf
|
/find_aliasgeneposition_biomart.R
|
e3475091b3113555bd78c3055dcfdfcad8214d53
|
[] |
no_license
|
xwang234/ovarian
|
4f57ecfe16dc1f0a06afa72b1d5be5ed29ad3198
|
b23bdd340230b3d0145bd3e4b432397c4c1512ba
|
refs/heads/master
| 2021-07-08T17:14:48.542354
| 2017-10-05T21:09:22
| 2017-10-05T21:09:22
| 105,941,140
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,968
|
r
|
find_aliasgeneposition_biomart.R
|
#!/usr/bin/env Rscript
njob=100
library("biomaRt")
#mart=useMart("ENSEMBL_MART_ENSEMBL", host="may2009.archive.ensembl.org/biomart/martservice/", dataset="hsapiens_gene_ensembl") #NCBI36, use listDatasets(mart)
mart=useMart("ENSEMBL_MART_ENSEMBL", host="feb2014.archive.ensembl.org/biomart/martservice/", dataset="hsapiens_gene_ensembl") #GRCh37,NCBI37
#use biomart
#alias gene is in the format of aaa|bbb|xxx
findgeneposition=function(gene)
{
res1=rep(NA,3)
allchrs=c("1","2","3","4","5","6","7","8","9","10","11","12","13","14","15","16","17","18","19","20","21","22","X","Y","23","24")
filter="hgnc_symbol"
#filter="hgnc_id"
#filter="entrezgene"
attributes=c("chromosome_name","start_position","end_position")
mygenes=unlist(strsplit(gene,"|",fixed=T))
for (i in 1:length(mygenes))
{
res <- getBM(attributes=attributes, filters=filter, values=mygenes[i], mart=mart)
if (nrow(res)>0)
{
if (sum(as.character(res$chromosome_name) %in% allchrs)>0)
{
idx=which(as.character(res$chromosome_name) %in% allchrs)
res1=c(res[idx[1],1],res[idx[1],2],res[idx[1],3])
break
}
}
}
return(res1)
}
mpi_findgeneposition=function(genes,outputfile="/fh/fast/dai_j/CancerGenomics/Ovarian_Cancer/result/genepositions/aliasgeneposition_biomart.txt")
{
mpi.bcast.Robj2slave(genes)
mpi.bcast.Robj2slave(findgeneposition)
mpi.bcast.Robj2slave(mart)
mpi.bcast.cmd(library(biomaRt))
mpi.remote.exec(system('uname -n',intern=T))
res1=data.frame(matrix(NA,nrow=0,ncol=4))
colnames(res1)=c("gene","chr","start","end")
nrun <- ceiling(length(genes)/1000)
print("start2")
for (j in 1:nrun){
cat(j,"..")
if (j < nrun) cseq <- ((j-1)*1000+1):(j*1000) else cseq <- ((j-1)*1000+1):length(genes)
z=genes[cseq]
res=mpi.parSapply(X=z,FUN=findgeneposition,job.num=njob)
idx=seq(1,length(res),3)
res2=data.frame(gene=genes[cseq],chr=res[idx],start=res[idx+1],end=res[idx+2])
res1=rbind(res1,res2)
write.table(res1,file=outputfile,col.names=T,row.names=F,sep="\t",quote=F)
}
return(res1)
}
require(Rmpi)
mpi.spawn.Rslaves(needlog = FALSE)
.Last <- function()
{ if (is.loaded("mpi_initialize")){
if (mpi.comm.size(1) > 0){
print("Please use mpi.close.Rslaves() to close slaves.")
mpi.close.Rslaves()
}
print("Please use mpi.quit() to quit R")
.Call("mpi_finalize")
}
}
aliastable=read.table(file="/fh/fast/dai_j/CancerGenomics/Tools/database/other/genes_multiplesymbols.txt",sep="\t",fill=T,quote="",header=T,stringsAsFactors=F )
multiplegenes=paste(aliastable$symbol,aliastable$alias_symbol,sep="|")
multiplegenes=gsub("\\|$","",multiplegenes,perl=T)
multiplegenes=gsub("\"","",multiplegenes,perl=T)
multiplegenes=gsub("\'","",multiplegenes,perl=T)
aliasgenepos=mpi_findgeneposition(genes=multiplegenes,outputfile="/fh/fast/dai_j/CancerGenomics/Ovarian_Cancer/result/genepositions/aliasgeneposition_biomart.txt")
mpi.close.Rslaves()
mpi.quit()
|
f1ee234dd3f3829495e62eb0370e908918387109
|
4d21fd017062a2d3242017e13fe8f5a30f78bcc6
|
/Code/tutte_funzioni_Amalgamare_rete.R
|
80064ab3b4213c50614fa6dbe7cb15050b13282b
|
[] |
no_license
|
FedericoMelograna/CTBN_PhaseDistribution
|
05cce6ea1bafe8103a5dadcbe5134f460ce00b52
|
949af0fa112966e0abb6f0f8ca9bf4597571199f
|
refs/heads/master
| 2022-05-22T19:09:37.748517
| 2022-05-10T14:34:46
| 2022-05-10T14:34:46
| 222,083,031
| 0
| 0
| null | null | null | null |
WINDOWS-1252
|
R
| false
| false
| 24,275
|
r
|
tutte_funzioni_Amalgamare_rete.R
|
# funzioni amalgamazione e indietro ---------------------------------------
library(stringr)
# creazionefullRapprdatoDirect --------------------------------------------
directnames=function(k_int,N_int,matriced){
vector=c()
for (i in 1:k_int){
for (j in 1:N_int){
vector=c(vector,paste0(j,"_",i))
}
}
# print(vector)
rownames(matriced)=vector;colnames(matriced)=vector
return(matriced)
}
##importantissima anche per dopo!!
# RAPPRESENTAZIONE FULL: DA DIRECT A FULL
#funzione che crea singola Qx|h1
##funzione che crea Qx|h1 in rappresentazione FULL
singolax=function(k_int,j_int){
#prende come input due numeri, k e j, e crea la matrice corrisponendte
mm=matrix(ncol=k_int,nrow=k_int)
for (i in 1:k_int){
for (m in 1:k_int){
mm[i,m]=ifelse(i==m && i!=j_int,-Inf,ifelse(j_int==m & i!=j_int, Inf, 0))
}
}
return(mm)
}
vuota=function(k_int){
m_int=matrix(0,ncol=k_int,nrow=k_int)
return(m_int)
} #crea matrice vuota X!Hi
Funz_tuttex=function(k,N){
m_full_x=array(NA,c(k,k,N*k))
for (l in 1:(N*k)){ #N*k)
j=(l+N-1)/N; #print(j)
ii=ifelse(j!= floor(j), T,F)# vuota(k_int=k),singolax(k_int=k,j_int=j))
if (ii){
m_full_x[,,l]=vuota(k_int=k)
}
else
m_full_x[,,l]=singolax(k_int=k,j_int=j)
}
return(m_full_x)
} #creo tutte le matrici Qx|H
singolah=function(k_int,N_int,j_int,matriced){
mm=matrix(ncol=k_int*N_int,nrow=k_int*N_int)
for (l in 1:(k_int*N_int)){
for (m in 1:(k_int*N_int)){
# print(l)
# print(m)
# print("-----")
i=l-N_int*(j_int-1)
u=ceiling(m/N_int)
r=m-N_int*(u-1)
mm[l,m]=ifelse(l>N_int*(j_int-1) && l<=N_int*j_int, matriced[paste0(i,"_",j_int),paste0(r,"_",u)],0)
#mm[i,m]=ifelse(i==m && i!=j_int,-Inf,ifelse(j_int==m & i!=j_int, Inf, 0))
}
}
return(mm)
} #singola H|xi
Funz_tutteh_rappresentazioneFULL=function(k,N,matrice){
m_full_x=array(NA,c(N*k,N*k,k))
for (j in 1:(k)){ #N*k)
m_full_x[,,j]=singolah(k_int=k,N_int=N,j_int=j,matriced=matrice)
}
return(m_full_x)
} #tutte
#gli cambio nome!! senno entra in conflitto
# va,vb,v, ordinamento ----------------------------------------------------
#transizioni non allowate per va(pari) e vb (dispari)
funz_va<-function(k_int){
###OVVERO LA FORMULA รจ: k/2*(k/2 -1) con approx per eccesso
return(ceiling(k_int/2)*(ceiling(k_int/2)-1))
}
funz_vb<-function(k_int){
###OVVERO LA FORMULA รจ: k/2*(k/2 -1) con approx per DIFETTO.
return(floor(k_int/2)*(floor(k_int/2)-1))
}
funz_v<-function(k_int){
### combina semplicemente le due sopra, restituisce il numero totale v
### di transizioni non allowate
return(funz_vb(k_int)+funz_va(k_int))
}
#funzioni di ordinamento, per gli eta (servono alle N)
ordinamentoX_H<-function(N,K){
#############a
#ordinamento รจ una funzione che si occupa di creare un ordine
#congiunto eta_X_H. come parametri nel nostro caso specifico gli diamo
#numero di stati di X,N, e numero di stati di ogNiX_H H,K.
##ritorna l'ordinamento.
#############a
mm=array(NA,dim=c(N*K,K,2))#ncol=K,nrow=(N*K))
succ_K=paste0("K",seq(1,K,by=1))
succ_N=paste0("N",seq(1,N,by=1))
succe_KN=array(NA,dim=c(N,K,2))
for(j in 1:length(succ_K)){
for(i in 1:length(succ_N)){
succe_KN[i,j,1]=succ_K[j]
succe_KN[i,j,2]=succ_N[i]
}
}
m_f=matrix(nrow=length(c(succe_KN[,,1])),ncol=2)
for (i in 1:length(c(succe_KN[,,1]))){
m_f[i,1]=c(succe_KN[,,1])[i] #ordinamento opposto: c(t(succe....))
m_f[i,2]=c(succe_KN[,,2])[i] #ordinamento opposto: c(t(succe....))
}
return(m_f)
} #ordine congiunto eta_X_H
#ordinamenti parziali invece di X e H
ordinamentoX<-function(K) return(paste0("K",seq(1,K,by=1)))
ordinamentoH<-function(N) return(paste0("N",seq(1,N,by=1)))
#funzioni che computano le matrici NX|H e NH|X che servono per amalgamare
singolaNX_hi<-function(K,N,i,ord,ordH,ordX){
### calcola la matrice NX|hi, avendo in input tutti i vari ordinamenti, a quali
### indice i ci riferiamo, numero di stati di X e #di stati di N
NiX_H=matrix(NA,nrow=K*N,ncol=K)
for (j in 1:nrow(NiX_H)){
for (k in 1:ncol(NiX_H)){
NiX_H[j,k]=ifelse(ord[j,2]==ordH[i] & ord[j,1]==ordX[k],1,0 )
}
}
return(NiX_H)
} #singola NX|H, per h specifico
singolaNH_xi<-function(K,N,i,ord,ordH,ordX){
### calcola la matrice NH_xi, avendo in input tutti i vari ordinamenti, a quali
### indice i ci riferiamo, numero di stati di X e #di stati di N
NiH_X=matrix(NA,nrow=K*N,ncol=N)
for (j in 1:nrow(NiH_X)){
for (k in 1:ncol(NiH_X)){
NiH_X[j,k]=ifelse(ord[j,1]==ordX[i] & ord[j,2]==ordH[k],1,0 )
}
}
return(NiH_X)
} #singola NH|X
completaNX_H<-function(K,N){
###mette assieme tutte le matricini NX|hi di prima per formare
#un array tridimensionale di dimensioni K*N, K, N
##dove in ogni elemento bidimensionale K*N, K CE una singola matrice NX|hi
# K=2;N=3
NX_H=array(NA,dim=c(K*N,K,N))
ordn=ordinamentoX_H(N,K)
ordnH=ordinamentoH(N)
ordnX=ordinamentoX(K)
for (i in 1:N){
temp=singolaNX_hi(K,N,i,ordn,ordnH,ordnX)
NX_H[,,i]=temp
}
return(NX_H)
}
completaNH_X<-function(K,N){
###mette assieme tutte le matricini NH_xi di prima per formare
#un array tridimensionale di dimensioni K*N, N, K
##dove in ogni elemento bidimensionale K*N, N CE una singola matrice NH_xi
# K=2;N=3
# K=2;N=3
NH_X=array(NA,dim=c(K*N,N,K))
ordn=ordinamentoX_H(N,K)
ordnH=ordinamentoH(N)
ordnX=ordinamentoX(K)
for (i in 1:K){
temp=singolaNH_xi(K,N,i,ordn,ordnH,ordnX)
NH_X[,,i]=temp
}
return(NH_X)
} #tutte le matrici NH|X
# bipartitica -------------------------------------------------------------
#funzioni di supporto e ordinameto per QX_l
funzsupporto<-function(k) {
#prende in input k #stati di X e restituisce le transazioni non allowate
A<-seq(1,k,2)
B<-seq(2,k,2)
nl=vector(); nn=vector(); cont=1
for (i in 1:length(A)){
for (j in 1:length(A)){
if (i!=j) {
nl[cont]<-paste0(A[i],"_",A[j])
nn[cont]<-paste0(B[i],"_",B[j])
cont=cont+1
}
}
}
nn<-nn[!str_detect(nn,"NA")]
return(c(nn,nl) )
}
funzioneordinamento<-function(k){
ff=funzsupporto(k)
z=data.frame(prim=as.numeric(substr(ff,1,1)),sec=as.numeric(substr(ff,3,3)))
z$terz=ifelse(z$prim>z$sec,1,0)
z1=z[z$terz==0,];(z1<-z1[order(z1$prim,z1$sec),]);z1$ris=2*(1:nrow(z1)-1)+1
z2=z[z$terz!=0,]; z2<-z2[order(z2$sec,z2$prim),];z2$ris=2*(1:nrow(z2))#;z2
zfin=rbind(z1,z2);zfin=zfin[order(zfin$ris),c(1,2,4)]
return(zfin)
}
#funzioni per computare QX|H nei vari casi
QX_h1<-function(K,N,mm){
##prende in input #stati di X, #stati di N, e la matrice mm in forma diretta
## restituisce una matrice QX|h1: la prima matrice bipartitica
matrice=matrix(NA,ncol=K,nrow=K)
for (i in 1:K){
for (j in 1:K){
matrice[i,j]=ifelse(i==j & i%%2==0,NA,ifelse(i!=j & i%%2==0 & j%%2!=0, mm[paste0(N,"_",i),paste0(1,"_",j)],0 ))
# print(matrice)
}
temp=matrice[i,]
temp[is.na(temp)]<--sum(temp,na.rm=T)
matrice[i,]<-temp
}
return(matrice)
} ## Matrice bipartitica dato h1
QX_hN<-function(K,N,mm){
###altro caso particolare รจ la matrice QX|Hn, anche in questo caso input sono K,N e
###la matrice in forma diretta. Restituisce una matrice K*K.
matrice=matrix(NA,ncol=K,nrow=K)
for (i in 1:K){
for (j in 1:K){
matrice[i,j]=ifelse(i==j & i%%2!=0,NA,ifelse(i!=j & i%%2!=0 & j%%2==0, mm[paste0(N,"_",i),paste0(1,"_",j)],0 ))
# print(matrice)
}
temp=matrice[i,]
temp[is.na(temp)]<--sum(temp,na.rm=T)
matrice[i,]<-temp
}
return(matrice)
} # matrice bipartitica dato hN
QX_h0<-function(K,N=1,mm=matrix(c(-1,1,0,0,1,-2,1,0,0,0,-0.5,0.5,0.6,0,0.4,-1)
,ncol=4,byrow=T)){##matrice vuota di zeri quando l sta tra 1 e N
#dovrei cambiare input: non mi serve a niente avere una matrice. Anche N รจ tralasciabile
return(matrice=matrix(0,ncol=K,nrow=K))
} # matrice quando 1<l<N ---> matrice vuota di zeri
QX_l<-function(K,N,l){
###FUNZONE che computa la matrice QX|l quando l>N ; l<=N+v
###ovvero quelli stati di h ausiliari che servono per far avvenire le
### transazioni non allowate.
### Input: K,N, e l'indice l. Restituisce una matrice K*K
ff=funzioneordinamento(k=K)
a=ff[ff$ris==(l-N),1]
b=ff[ff$ris==(l-N),2]
# l=4
matrice=matrix(NA,ncol=K,nrow=K)
for (i in 1:K){
for (j in 1:K){
matrice[i,j]=ifelse(i==j & i==a,-Inf,ifelse(i==a & j==b, Inf,0))
# print(matrice)
}
}
return(matrice)
} ###FUNZONE che computa la matrice QX|l quando l>N ; l<=N+v
QX_Htotale<-function(k,N,matrice){
### funzione che mette assieme i pezzi costruiti prima per costruire tutte le
### matrici di QX|H.
### Input: k,N e la matrice in forma diretta.
### Restituisce un array K, K, N+v.
### dove in ogni singolo elemento K,K ce una matrice QX|hi
v=funz_v(k)
m_full_x=array(NA,c(k,k,N+v))
for (j in 1:(N+v)){ #N*k)
tt=data.frame()
if (j==1){
m_full_x[,,j]=QX_h1(K=k,N=N,mm=matrice)
} else if(j<N){
m_full_x[,,j]=QX_h0(K=k)
} else if(j==N){
m_full_x[,,j]=QX_hN(K=k,N=N,mm=matrice)
} else {
m_full_x[,,j]=QX_l(K=k,N=N,l=j)
}
# print(tt)
# m_full_x[,,j]=tt
}
return(m_full_x)
}
#crea tutte le matrici QX|H
#funzione di supporto per QH|X
funz_quartopuntoqhlhm_x<-function(K,N,j){
##funzione che mi serve per quarto punto, sia apri che dispari
#della matrice QH|X
v=funz_v(K) #ordinamento
c<-0
s<-vector()
for (l in (N+1):(N+v)){
a=QX_l(K=K,N=N,l=l) ##rifaccio al contrario passo QX|Hl con l>N
#per ogni l>N
# print(a)
# print(l)
if (Inf %in% a[,j]){
#se in tale matrice,alla j-esima colonna, รจ presente un Inf,
##allora scrivo tale l nel mio vettore s
c<-c+1
s[c]<-l
}
}
ifelse(length(s)>0,return(s),return(FALSE))
##controllo per restituire sempre qualcosa di diverso dall'insieme vuoto.
} ##si trova in esempioprimarete3_3
#computo tutte le matrici QH|X
QH_Xdispari<-function(N,K,v,va,j,mm){
##Funzione che computa QH|xj, quando l'indice della x, j, รจ dispari.
###prende in input, N,K #di stati rispettivamente di H e X,
### v, va transizioni non allowate totali, e transizioni non allowate
### all'interno del gruppo A (quello dispari.)
### j, indice della X, e mm matrice di rappresentazione diretta.
### Restituisce una matrice N+v * N+v
s<-funz_quartopuntoqhlhm_x(K=K,N=N,j=j)
matrice=matrix(NA,ncol=N+v,nrow=N+v)
ff=funzioneordinamento(k=K)
for (l in 1:(N+v)){
for (m in 1:(N+v)){
if (l==m & m<N){
matrice[l,m]=mm[paste0(l,"_",j),paste0(l,"_",j)]
} else if(l==m & m==N){
tempo=0
for (u in 1:K){
tempo<-tempo+ifelse(u%%2==0,mm[paste0(N,"_",j),paste0(1,"_",u)],0)
##ciclo che mi porta dentro la somma quando u รจ pari
# print(tempo)
}
matrice[l,m]=mm[paste0(N,"_",j),paste0(N,"_",j)]+tempo
# print("bbbb")
# print(matrice[l,m])
} else if(l!=m & l<=N & m<=N){
matrice[l,m]=mm[paste0(l,"_",j),paste0(m,"_",j)]
} else if(l==N & m>N & m<=N+va ){
if( ff[ff$ris==abs((m-N)),1]==j){ #ho messo un if dentro poichรจ altrimenti
#dava errore che ci sono casi dove la chiamata dell'if รจ indefinita (out of bounds)
a=ff[ff$ris==(m-N),1]
b=ff[ff$ris==abs((m-N)),2]
# print(a)
# print("...")
# print(b)
# print("...")
matrice[l,m]=mm[paste0(N,"_",j),paste0(1,"_",b)]
} else matrice[l,m]=0
# print("ddddd")
# print(matrice[l,m])
}
else if(l>N & l<=N+va & l %in% s & m==l){
###per inserire i +-Infinito NON FUNZIONA la formula del Prof, ho usato questa formula
###con s, con s funzione AD-Hoc per questo punto.
matrice[l,m]=-Inf
#problema: SBAGLIATO!!!
# l>N & l<=N+va & l==ff[ff$ris==K+1-j,2]+N & m==l
}#j 3--->1 j 1 --->2
##possibile sol K-j per dispari e K-j+1 per pari
else if(l>N & l<=N+va & l %in% s & m==1){
matrice[l,m]=Inf
#problema: SBAGLIATO!!!
}
else {
matrice[l,m]=0
}
# print(tt)
# m_full_x[,,j]=tt
}
}
return(matrice)
} #versione sovrascritta!!!!
QH_Xpari<-function(N,K,v,va,j,mm){
##Funzione che computa QH|xj, quando l'indice della x, j, รจ PARI.
###prende in input, N,K #di stati rispettivamente di H e X,
### v, va transizioni non allowate totali, e transizioni non allowate
### all'interno del gruppo A (quello dispari.) (IN QUANTO andra ad agire da N+va fino a N+v)
### j, indice della X, e mm matrice di rappresentazione diretta.
### Restituisce una matrice N+v * N+v
matrice=matrix(NA,ncol=N+v,nrow=N+v)
s<-funz_quartopuntoqhlhm_x(K=K,N=N,j=j)
ff=funzioneordinamento(k=K)
for (l in 1:(N+v)){
for (m in 1:(N+v)){
if (l==m & m<=N & l>1){
matrice[l,m]=mm[paste0(N-l+1,"_",j),paste0(N-l+1,"_",j)]
} else if(l==m & m==1){
tempo=0
for (u in 1:K){
tempo<-tempo+ifelse(u%%2==1,mm[paste0(N,"_",j),paste0(1,"_",u)],0)
##DIFFERENZA CON IL PROF: 1_u alposto che N_u!
# print(tempo)
}
# print(tempo)
# print(mm[paste0(N,"_",j),paste0(N,"_",j)])
# print(matrice[l,m])
matrice[l,m]=mm[paste0(N,"_",j),paste0(N,"_",j)]+tempo
# print("bbbb")
# print(matrice[l,m])
} else if(l!=m & l<=N & m<=N){ #3
matrice[l,m]=mm[paste0(N-l+1,"_",j),paste0(N-m+1,"_",j)]
} else if(l==1 & m>N+va & m<=N+v ){#4
if( ff[ff$ris==abs((m-N)),1]==j){##come sopra: if interno altrimenti va out of bound
##si basa dul dire che la a deve essere a==j
a=ff[ff$ris==(m-N),1]
b=ff[ff$ris==(m-N),2]
matrice[l,m]=mm[paste0(N,"_",j),paste0(1,"_",b)]
## DIVERSO DAL PROF!!!!
} else matrice[l,m]=0
# print("ddddd")
# print(matrice[l,m])
# DIVERSO DAL PROF #4 PARI!!
}
else if(l>N+va & l<=N+v & l %in% s & m==l){
####come in dispari anche questo 5,6 diversi dal PROF
matrice[l,m]=-Inf
#sbaglaito
# l>N+va & l<=N+v & l==ff[ff$ris==K+1-j,2]+N & m==l
}
else if(l>N+va & l<=N+v & l %in% s & m==N){
matrice[l,m]=Inf
}
else {
matrice[l,m]=0
}
# print(tt)
# m_full_x[,,j]=tt
}
}
return(matrice)
} ##versione sovrascritta!!!!
Funz_tutteh=function(k,N,matrice){
###combina il pari e dispari di prima per creare un array,
# di dimensioni N+v, N+v, k, dove ogni elemento n+v*n+v รจ una matrice
## QH|xj.
## input: k,N soliti e la matrice in forma diretta
va<-funz_va(k_int=k)
v=funz_v(k_int=k)
m_full_h=array(NA,c(N+v,N+v,k))
for (j in 1:k){
if (j%%2==0 ){
m_full_h[,,j]=QH_Xpari(K=k,N=N,va=va,v=v,j=j,mm=matrice)
} else if(j%%2!=0 ){
m_full_h[,,j]=QH_Xdispari(K=k,N=N,va=va,v=v,j=j,mm=matrice)
}
# print(tt)
# m_full_x[,,j]=tt
}
return(m_full_h)
}
#funzione che mi crea tutte le matrici QH|X
#matrice amalgamata H|X
amalgamataH_X<-function(k,N,matrice){
###funzione che amalgama tutte le matrice QH|X in una matriciona di
### dimensioni k*(N+v), k*(N+v) amalgamata.
matrice=directnames(matriced=matrice,k_int=k,N_int = N)
aus=Funz_tutteh(k=k,N=N,matrice=matrice)
aus[aus==Inf]<-10000; aus[aus==-Inf]<--1000
v=funz_v(k_int=k)
tot=N+v
NN=completaNH_X(K=k,N=tot)
Matr_amalgH_X<-matrix(0,nrow=k*(N+v),ncol=k*(N+v))
for(i in 1:k){
Matr_amalgH_X<-Matr_amalgH_X+NN[,,i]%*%aus[,,i]%*%t(NN[,,i])
}
return(Matr_amalgH_X)
}
#matrice amalgamata X|H
amalgamataX_H<-function(k,N,matrice){
###funzione che amalgama tutte le matrice QX|H in una matriciona di
### dimensioni k*(N+v), k*(N+v) amalgamata.
# matrice=directnames(matriced=matrice,k_int=k,N_int = N)
aus2=QX_Htotale(k=k,N=N,matrice=matrice)
aus2[aus2==Inf]<-10000
aus2[aus2==-Inf]<--1000
v=funz_v(k_int=k)
tot=N+v
NN=completaNX_H(K=k,N=tot)
Matr_amalgX_H<-matrix(0,nrow=k*(N+v),ncol=k*(N+v))
for(i in 1:(N+v)){
Matr_amalgX_H<-Matr_amalgX_H+NN[,,i]%*%aus2[,,i]%*%t(NN[,,i])
}
return(Matr_amalgX_H)
}
# NuovaversionediQH_Xpariedispari QH|X -----------------------------------------
#strutturazione alternativa di QH|X e QX|H, corretta!!
#e consistente per ogni dimensione
#creano matrice QH|X_pari e se x รจ dispari
QH_Xpari<-function(N,K,v,va,j,mm){
va=1
##Funzione che computa QH|xj, quando l'indice della x, j, รจ PARI.
###prende in input, N,K #di stati rispettivamente di H e X,
### v, va transizioni non allowate totali, e transizioni non allowate
### all'interno del gruppo A (quello dispari.) (IN QUANTO andra ad agire da N+va fino a N+v)
### j, indice della X, e mm matrice di rappresentazione diretta.
### Restituisce una matrice N+v * N+v
matrice=matrix(NA,ncol=N+v,nrow=N+v)
s<-funz_quartopuntoqhlhm_x(K=K,N=N,j=j)
ff=funzioneordinamento(k=K)
for (l in 1:(N+v)){
for (m in 1:(N+v)){
if (l==m & m<=N & l>1){
matrice[l,m]=mm[paste0(N-l+1,"_",j),paste0(N-l+1,"_",j)]
} else if(l==m & m==1){
tempo=0
for (u in 1:K){
tempo<-tempo+ifelse(u%%2==1,mm[paste0(N,"_",j),paste0(1,"_",u)],0)
##DIFFERENZA CON IL PROF: 1_u alposto che N_u!
# print(tempo)
}
# print(tempo)
# print(mm[paste0(N,"_",j),paste0(N,"_",j)])
# print(matrice[l,m])
matrice[l,m]=mm[paste0(N,"_",j),paste0(N,"_",j)]+tempo
# print("bbbb")
# print(matrice[l,m])
} else if(l!=m & l<=N & m<=N){ #3
matrice[l,m]=mm[paste0(N-l+1,"_",j),paste0(N-m+1,"_",j)]
} else if(l==1 & m>N+va & m<=N+v ){#4
if( ff[ff$ris==abs((m-N)),1]==j){##come sopra: if interno altrimenti va out of bound
##si basa dul dire che la a deve essere a==j
a=ff[ff$ris==(m-N),1]
b=ff[ff$ris==(m-N),2]
matrice[l,m]=mm[paste0(N,"_",j),paste0(1,"_",b)]
## DIVERSO DAL PROF!!!!
} else matrice[l,m]=0
# print("ddddd")
# print(matrice[l,m])
# DIVERSO DAL PROF #4 PARI
}
else if(l>N+va & l<=N+v & l %in% s & m==l){
####come in dispari anche questo 5,6 diversi dal PROF
matrice[l,m]=-Inf
#sbaglaito
# l>N+va & l<=N+v & l==ff[ff$ris==K+1-j,2]+N & m==l
}
else if(l>N+va & l<=N+v & l %in% s & m==N){
matrice[l,m]=Inf
}
else {
matrice[l,m]=0
}
# print(tt)
# m_full_x[,,j]=tt
}
}
return(matrice)
}
QH_Xdispari<-function(N,K,v,va,j,mm){
va=v
##Funzione che computa QH|xj, quando l'indice della x, j, รจ dispari.
###prende in input, N,K #di stati rispettivamente di H e X,
### v, va transizioni non allowate totali, e transizioni non allowate
### all'interno del gruppo A (quello dispari.)
### j, indice della X, e mm matrice di rappresentazione diretta.
### Restituisce una matrice N+v * N+v
s<-funz_quartopuntoqhlhm_x(K=K,N=N,j=j)
matrice=matrix(NA,ncol=N+v,nrow=N+v)
ff=funzioneordinamento(k=K)
for (l in 1:(N+v)){
for (m in 1:(N+v)){
if (l==m & m<N){
matrice[l,m]=mm[paste0(l,"_",j),paste0(l,"_",j)]
} else if(l==m & m==N){
tempo=0
for (u in 1:K){
tempo<-tempo+ifelse(u%%2==0,mm[paste0(N,"_",j),paste0(1,"_",u)],0)
##ciclo che mi porta dentro la somma quando u รจ pari
# print(tempo)
}
matrice[l,m]=mm[paste0(N,"_",j),paste0(N,"_",j)]+tempo
# print("bbbb")
# print(matrice[l,m])
} else if(l!=m & l<=N & m<=N){
matrice[l,m]=mm[paste0(l,"_",j),paste0(m,"_",j)]
} else if(l==N & m>N & m<=N+va ){
if( ff[ff$ris==abs((m-N)),1]==j){ #ho messo un if dentro poichรจ altrimenti
#dava errore che ci sono casi dove la chiamata dell'if รจ indefinita (out of bounds)
a=ff[ff$ris==(m-N),1]
b=ff[ff$ris==abs((m-N)),2]
# print(a)
# print("...")
# print(b)
# print("...")
matrice[l,m]=mm[paste0(N,"_",j),paste0(1,"_",b)]
} else matrice[l,m]=0
# print("ddddd")
# print(matrice[l,m])
}
else if(l>N & l<=N+va & l %in% s & m==l){
###per inserire i +-Infinito NON FUNZIONA la formula del Prof, ho usato questa formula
###con s, con s funzione AD-Hoc per questo punto.
matrice[l,m]=-Inf
#problema: SBAGLIATO!!!
# l>N & l<=N+va & l==ff[ff$ris==K+1-j,2]+N & m==l
}#j 3--->1 j 1 --->2
##possibile sol K-j per dispari e K-j+1 per pari
else if(l>N & l<=N+va & l %in% s & m==1){
matrice[l,m]=Inf
#problema: SBAGLIATO!!!
}
else {
matrice[l,m]=0
}
# print(tt)
# m_full_x[,,j]=tt
}
}
return(matrice)
}
# fileamalgamato indietro -------------------------------------------------
#creano rispettivamente le CIM QH|x1,...xk e QX|h1,...hn
funzioneAM_QH_X<-function(k,N,matriceam){
v=funz_v(k)
matr_full_H=array(NA,c(N+v,N+v,k))
for (l in 1:k){
for (i in 1:(N+v)){
for (j in 1:(N+v)){
if (i!=j){
print(c(i,j,l))
matr_full_H[i,j,l]=matriceam[(l-1)*(N+v)+i,(l-1)*(N+v)+j]
} else if(i==j){
tempo=0
for (u in 1:(N+v)){
tempo<-tempo-ifelse(u!=i,matriceam[(l-1)*(N+v)+i,(l-1)*(N+v)+u],0)
##ciclo che mi porta dentro la somma quando u รจ pari
# print(tempo)
print(c(i,j,l))
}
matr_full_H[i,j,l]=tempo
}
}
}
}
return(matr_full_H)
}
funzioneAM_QX_H<-function(k,N,matriceam){
v=funz_v(k)
matr_full_X=array(NA,c(k,k,N+v))
for (l in 1:(N+v)){
for (i in 1:(k)){
for (j in 1:(k)){
if (i!=j){
print(c(i,j,l,N,v))
matr_full_X[i,j,l]=matriceam[(i-1)*(N+v)+l,(j-1)*(N+v)+l]
} else if(i==j){
tempo=0
for (u in 1:(k)){
print(c(i,j,l,u))
tempo<-tempo-ifelse(u!=i,matriceam[(i-1)*(N+v)+l,(u-1)*(N+v)+l],0)
##ciclo che mi porta dentro la somma quando u รจ pari
print(tempo)
}
matr_full_X[i,j,l]=tempo
}
}
}
}
return(matr_full_X)
}
|
f23642d6e68e25d8b0be7c12cccfd89b2a044835
|
69db104f2b9234bfdc99192794f2f369395cf7be
|
/R/proCrustes.R
|
47c176941627fec48095a3f50f441608e5a1cf9e
|
[] |
no_license
|
cran/mvdalab
|
d9ebb55f5ab5cd6bb9ebce9bade38f9fd1cf79a9
|
21e9983ecc4b15df922058de38bf3d7748cae2cb
|
refs/heads/master
| 2022-11-05T10:04:27.426346
| 2022-10-05T22:00:14
| 2022-10-05T22:00:14
| 55,814,072
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,503
|
r
|
proCrustes.R
|
proCrustes <- function(X, Y, scaling = TRUE, standardize = FALSE, scale.unit = F, ...) {
Col.Diff <- (ncol(X) - ncol(Y))
if (ncol(X) > ncol(Y)) {
Y <- data.frame(Y, Added = matrix(0, nrow = nrow(X), ncol = Col.Diff))
} else {
X <- X
Y <- Y
}
X. <- scale(X, scale = scale.unit)
Y. <- scale(Y, scale = scale.unit)
Xmeans <- attr(X., "scaled:center")
Ymeans <- attr(Y., "scaled:center")
if (!(standardize)) {
X. <- X.
Y. <- Y.
} else {
X. <- X.
Y. <- Y.
X./sqrt(sum(diag(crossprod(X.))))
Y./sqrt(sum(diag(crossprod(Y.))))
}
SVD <- svd(t(X.) %*% Y.)
Q <- SVD$v %*% t(SVD$u) #Rotation Matrix
if (!(scaling)) {
c. <- 1
} else {
c. <- sum(diag(SVD$d)) / sum(diag(Y. %*% t(Y.)))
}
M2_min <- sum(diag(X. %*% t(X.))) + (c.^2 * sum(diag(Y. %*% t(Y.)))) - (2 * c. * sum(diag(SVD$d)))
PRMSE <- sqrt(M2_min / nrow(X.))
Yproj <- c. * Y. %*% Q
Translation <- Xmeans - c. * Ymeans %*% Q
difference <- X. - t(Q %*% t(Y.))
residuals. <- sqrt(apply(difference^2, 1, sum))
MSS <- c.^2 * sum(diag(Y. %*% t(Y.)))
ESS <- M2_min
TSS <- MSS + ESS
Results <- list(Rotation.Matrix = Q, Residuals = difference, M2_min = M2_min, Xmeans = Xmeans,
Ymeans = Ymeans, PRMSE = PRMSE, Yproj = Yproj,
scale = c., Translation = Translation, residuals. = residuals.,
Anova.MSS = MSS, Anova.ESS = ESS, Anova.TSS = TSS)
class(Results) <- "proC"
Results
}
|
43462d0ef2d42df70832f3c2ba2df2a4a3319b10
|
b088413a7481706cd82cd57adfc0692b3e50d68c
|
/tests/testthat/test-morgancpp.R
|
a7d5e7ed1b0b1552562403b9109e2c5f9ed95099
|
[
"MIT"
] |
permissive
|
ArtemSokolov/morgancpp
|
9dc540cc3a5705b5708d89721da5b6d09f1e985e
|
9f481385fefa3cf8fae224be429aeb0a03b85d0d
|
refs/heads/master
| 2020-09-29T03:42:50.613481
| 2019-12-09T18:35:16
| 2019-12-09T18:35:16
| 226,941,895
| 0
| 0
|
MIT
| 2019-12-09T18:41:15
| 2019-12-09T18:41:14
| null |
UTF-8
|
R
| false
| false
| 2,575
|
r
|
test-morgancpp.R
|
context( "MorganFPS functionality" )
load_example1 <- function(n)
scan( "../../inst/examples/example1.txt.gz", n=n,
what=character(), quiet=TRUE )
test_that("Self-similarity is always 1", {
## Test the first 100 strings from the example
v <- load_example1(1000)
res <- sapply( v, function(hx) tanimoto(hx,hx) )
expect_equal( unname(res), rep(1,1000) )
})
test_that("Hex strings can be compared directly", {
## Spot-check several pairwise values
v <- load_example1(10)
expect_equal( tanimoto(v[1], v[2]), 0.1627907 )
expect_equal( tanimoto(v[5], v[6]), 0.08450704 )
expect_equal( tanimoto(v[9], v[10]), 0.09677419 )
})
test_that("Hex strings have to be of length 512", {
expect_error( tanimoto("ABC", "123"),
"Input hex string must be of length 512" )
})
test_that("New collections can be instatiated from hex strings", {
v <- load_example1(1000)
m <- MorganFPS$new(v)
expect_identical( m$size(), 256000 )
})
test_that("Collections can be queried for pairwise similarities", {
v <- load_example1(10)
m <- MorganFPS$new(v)
## Compare to stand-alone function
expect_equal( m$tanimoto(1,2), tanimoto(v[1], v[2]) )
expect_equal( m$tanimoto(5,6), tanimoto(v[5], v[6]) )
expect_equal( m$tanimoto(9,10), tanimoto(v[9], v[10]) )
## Index has to be in-range
expect_error( m$tanimoto(-1,1) )
})
test_that("Collections can be queried for full similarity profiles", {
v <- load_example1(100)
m <- MorganFPS$new(v)
v0 <- sapply( 1:100, function(i) m$tanimoto(1,i) )
v1 <- m$tanimoto_all(1)
v2 <- m$tanimoto_ext(v[1])
expect_length( v1, 100 )
expect_length( v2, 100 )
expect_identical( v0, v1 )
expect_identical( v0, v2 )
})
test_that("Collection indexing is 1-based", {
v <- load_example1(1000)
m <- MorganFPS$new(v)
## Pair-wise function
expect_error( m$tanimoto(0,1), "Index out of range" )
expect_error( m$tanimoto(1,0), "Index out of range" )
expect_error( m$tanimoto(-1,1), "Index out of range" )
expect_error( m$tanimoto(1,-1), "Index out of range" )
expect_error( m$tanimoto(1001,1), "Index out of range" )
expect_error( m$tanimoto(1,1001), "Index out of range" )
expect_identical( m$tanimoto(1000,1000), 1 )
## Full-profile function
expect_error( m$tanimoto_all(-1), "Index out of range" )
expect_error( m$tanimoto_all(0), "Index out of range" )
expect_error( m$tanimoto_all(1001), "Index out of range" )
expect_length( m$tanimoto_all(1000), 1000 )
})
|
3142c6d9977b9e24b9a5eaa6308bd5674bd25bcd
|
b564df9556614ef2c8256d578236ca776b85c0a7
|
/README.Rd
|
c4a30520a2925d4d4e489d80afbcc6a800c12fcd
|
[] |
no_license
|
wesleyburr/FinancialAnalyticsIntl
|
9b825470c248bd4f25a35f12bace21548bd20319
|
751e5deda3762c3f4867d950e2e6de3dbcc461e3
|
refs/heads/main
| 2023-04-19T00:51:54.120242
| 2021-05-07T13:11:32
| 2021-05-07T13:11:32
| 365,234,949
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 291
|
rd
|
README.Rd
|
# Trent International - Financial Analytics Mini-Lecture
Short (20-minute) talk given to potential international students interested in the Financial Analytics / Financial Science major at Trent University. Topic is reproducibility (specifically: computational reproducibility) in science.
|
8b52132d1c00784bb7cac2cf5148d3c32b341cdf
|
6dbc7d2df79a031c0d7877cd7d43652a6585a1b5
|
/plot.R
|
3188f589939077258684d4a8a278bc1cfbe9c57f
|
[] |
no_license
|
RamrajSekar/Rfundamentals
|
bacaa236c19d75afaa00bddad291c40598fb0724
|
b966ab9a1b11f38e175a19148f8d61074161d942
|
refs/heads/master
| 2021-01-21T10:25:27.096869
| 2017-05-19T07:36:58
| 2017-05-19T07:36:58
| 91,687,611
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,084
|
r
|
plot.R
|
x=5:7
y = 8:10
plot(x,y)
# data is a time series, lynx here is a line plot
plot(lynx)
# title, color, title color, title magnification
plot(lynx, main="Lynx Trappings", col="red",col.main=52, cex.main=1.5)
# label names
plot(lynx, ylab="Lynx Trappings", xlab="")
# label orientation
plot(lynx, ylab="Lynx Trappings", xlab="", las=2)
# changing the session paramter, 2*2 plot matrix
par(mfrow=c(2,2), col.axis="red")
plot(1:8, las=0, xlab="xlab", ylab="ylab", main="LAS = 0")
plot(1:8, las=1, xlab="xlab", ylab="ylab", main="LAS = 1")
plot(1:8, las=2, xlab="xlab", ylab="ylab", main="LAS = 2")
plot(1:8, las=3, xlab="xlab", ylab="ylab", main="LAS = 3")
?plot
# by using "type" we can specify which kind of plot we want
plot(lynx) # plot for time series data
plot(lynx, type="p", main="Type p") # points (default)
plot(lynx, type="l", main="Type l") # lines (default for time series)
plot(lynx, type="b", main="Type b") # points connected by lines
plot(lynx, type="b", main="Type c") # lines only of b
plot(lynx, type="h", main="Type h") # high density
plot(lynx, type="s", main="Type s") # steps
plot(lynx, type="n", main="Type n") # no plot
# Example: advanced line plot with R Base
par(mar=c(4,3,3,3), col.axis="darkgreen") # change of plot margins
plot(cars$speed, type="s", col="red", xlab="Cars ID", ylab="Speed",main = "Car Speed")
text(8, 14, "Speed in mph", cex=0.85, col="red") # adding the explanatory text to plot 1
par(new=T) # allows 2 in 1 plot
plot(cars$dist, type="s", bty="n", ann=F, axes=F, col="darkblue")
axis(side=4, col = "darkblue") # y axis for plot 2
text(37, 18, "Stopping distance in ft", cex=0.85, col="darkblue") # explanations to plot 2
title(main="Speed and Stopping\n Distances of Cars") # main title
#??? graphical parameters
?par
par()
x = 1:41
y = rivers
plot(x,y, col = "green", pch = 20,
main = "Lengths of\nMajor N. American Rivers",
col.main ="red", xlab = "",
ylab = "length in miles")
plot(x,y, xlab="index",ylab="length in miles", main = "Length of n.american rivers",
col.main="orange",pch = 20)
|
6579a3fd294189ef4124b568727feb6a5dddd3da
|
2ebd7c57f02e7be7285213701e6d2d33cd164f44
|
/code/get_me_pheno_associations.R
|
d2c9da50ad0e9313fedfb89c333580fc7812fd66
|
[] |
no_license
|
SimonCouv/integrative-omics-public
|
780e6bc679827ed6eae81df6f83d21a3eff97619
|
2ff3ba45fce2f6b02bd2fae9163fe1598fa3a49c
|
refs/heads/master
| 2020-04-30T12:58:41.688568
| 2019-03-26T12:33:31
| 2019-03-26T12:33:31
| 176,841,222
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,188
|
r
|
get_me_pheno_associations.R
|
get_me_pheno_associations <- function(eigengenes, y){
# phenotype association models
me_wilcox_cvd <- function(df) wilcox.test(formula=value~CVD0010, data=df, conf.int=TRUE)
me_wilcox_sex <- function(df) wilcox.test(formula=value~SEX, data=df, conf.int=TRUE)
me_wilcox_hrcvd <- function(df) wilcox.test(formula=value~HRCVD, data=df, conf.int=TRUE)
me_spear_age <- function(df) cor.test(formula = ~value+AGE, data=df, method="spearman")
me_coxph_cvdw <- function(df) coxph(formula=Surv(CVD0010W,CVD0010)~value, data=df)
me_wilcox_diab <- function(df) wilcox.test(formula=value~diabetes, data=df, conf.int=TRUE)
me_wilcox_statin <- function(df) wilcox.test(formula=value~statin, na.action = "na.omit", data=df, conf.int=TRUE)
me_lrm_cvd_sa_adj <- function(df){
fit <- lrm(formula=CVD0010~value+SEX+AGE, data = df, x=TRUE, y=TRUE)
list(p.value = anova(fit)['value', 'P'],
coef = fit$coefficients['value'],
gof = resid(fit, "gof")['P'])
}
me_lrm_cvd_sas_adj <- function(df){
fit <- lrm(formula=CVD0010~value+SEX+AGE+statin, data = df, x=TRUE, y=TRUE)
list(p.value = anova(fit)['value', 'P'],
coef = fit$coefficients['value'],
gof = resid(fit, "gof")['P'])
}
me_lrm_cvd <- function(df){
fit <- lrm(formula=CVD0010~value, data = df, x=TRUE, y=TRUE)
list(p.value = anova(fit)['value', 'P'],
coef = fit$coefficients['value'],
gof = resid(fit, "gof")['P'])
}
me_lrm_diab_ss_adj <- function(df){
fit <- lrm(formula=diabetes~value+SEX+statin, data = df, x=TRUE, y=TRUE)
list(p.value = anova(fit)['value', 'P'],
coef = fit$coefficients['value'],
gof = resid(fit, "gof")['P'])
}
me_lrm_diab <- function(df){
fit <- lrm(formula=diabetes~value, data = df, x=TRUE, y=TRUE)
list(p.value = anova(fit)['value', 'P'],
coef = fit$coefficients['value'],
gof = resid(fit, "gof")['P'])
}
# # note on interpretation: wilcox.test estimate is positive when group 0 is higher than group 1
# # i.e. the estimate ~ group0 - group1
# dfa <- data.frame(x=c(rnorm(100,4,1), rnorm(150,2,1)), group=rep(c(1,0), c(100,150)))
# dfa <- data.frame(x=c(rnorm(100,4,1), rnorm(150,2,1)), group=rep(c('1','0'), c(100,150)))
# dfa <- data.frame(x=c(rnorm(100,4,1), rnorm(150,2,1)), group=factor(rep(c('1','0'), c(100,150))))
# dfa <- data.frame(x=c(rnorm(100,4,1), rnorm(150,2,1)), group=factor(rep(c(1,0), c(100,150))))
# dfa <- data.frame(x= c(rnorm(150,2,1), rnorm(100,4,1)), group=factor(rep(c(0,1), c(150,100))))
# dfa <- data.frame(x= c(rnorm(150,2,1), rnorm(100,4,1)), group=rep(c(0,1), c(150,100)))
# wilcox.test(formula=x~group, data = dfa, conf.int=TRUE)$estimate
# xs <- rnorm(500,0,10)
# dfb <- data.frame(x=xs, y=xs+rnorm(500,10,1))
# cor.test(formula=~x+y, data=dfb, method="spearman")
# browser()
# statistical tests
suppressWarnings(
assoc_res <- eigengenes%>%
rownames_to_column("sample") %>%
setNames(sub(names(.),pattern = "#", replacement = "_")) %>%
bind_cols(y,.) %>%
# bind_cols(cvd[,-1]) %>%
gather(ME, value, -one_of("Bruneckcode", "CVD0010", "CVD0010W",
"AGE", "SEX", "HRCVD", "statin", "diabetes", "sample")) %>%
# spread(sample, value) %>%
dplyr:: group_by(ME) %>%
nest() %>%
dplyr::mutate(
wilcox_cvd_res=map(data, me_wilcox_cvd),
wilcox_sex_res=map(data, me_wilcox_sex),
wilcox_hrcvd_res=map(data, me_wilcox_hrcvd),
wilcox_diab_res=map(data, me_wilcox_diab),
wilcox_statin_res=map(data, me_wilcox_statin),
spear_age_res=map(data, me_spear_age),
coxph_cvdw_model=map(data, me_coxph_cvdw),
lrm_cvd_sa_adj_res = map(data, me_lrm_cvd_sa_adj),
lrm_cvd_sas_adj_res = map(data, me_lrm_cvd_sas_adj),
lrm_cvd_res = map(data, me_lrm_cvd),
lrm_diab_ss_adj_res = map(data, me_lrm_diab_ss_adj),
lrm_diab_res = map(data, me_lrm_diab))
)
# browser()
pval_m <- assoc_res %>%
dplyr::mutate(
spear_age_p=map_dbl(spear_age_res, "p.value"),
wilcox_sex_p=map_dbl(wilcox_sex_res, "p.value"),
wilcox_statin_p=map_dbl(wilcox_statin_res, "p.value"),
# wilcox_hrcvd_p=map_dbl(wilcox_hrcvd_res, "p.value"),
# wilcox_diab_p=map_dbl(wilcox_diab_res, "p.value"),
lrm_cvd_sas_adj_p = map_dbl(lrm_cvd_sas_adj_res, "p.value"),
lrm_cvd_sa_adj_p = map_dbl(lrm_cvd_sa_adj_res, "p.value"),
lrm_cvd_p = map_dbl(lrm_cvd_res, "p.value"),
# lrm_diab_ss_adj_p = map_dbl(lrm_diab_ss_adj_res, "p.value"),
# lrm_diab_p = map_dbl(lrm_diab_res, "p.value"),
wilcox_cvd_p=map_dbl(wilcox_cvd_res, "p.value")) %>%
dplyr::select(ME, spear_age_p:wilcox_cvd_p) %>%
column_to_rownames("ME") %>%
as.matrix()
# BH-adjust p-values per phenotype
pval_m <- apply(pval_m, 2, p.adjust, method="BH")
# #overconservative: BH-adjust over all phenotype associations simultaneously
# pval_m <- matrix(p.adjust(pval_m, method="BH"),
# nrow = nrow(pval_m),
# ncol= ncol(pval_m),
# dimnames = list(rownames(pval_m),
# colnames(pval_m)))
#for wilcox estimate: invert direction so positive when group 1 is higher
est_m <- assoc_res %>%
dplyr::mutate(
spear_age_est=map_dbl(spear_age_res, "estimate"),
wilcox_sex_est=map_dbl(wilcox_sex_res, "estimate")*-1,
wilcox_statin_est=map_dbl(wilcox_statin_res, "estimate")*-1,
# wilcox_hrcvd_est=map_dbl(wilcox_hrcvd_res, "estimate")*-1,
# wilcox_diab_est=map_dbl(wilcox_diab_res, "estimate")*-1,
lrm_cvd_sas_adj_coef = map_dbl(lrm_cvd_sas_adj_res, "coef"),
lrm_cvd_sa_adj_coef = map_dbl(lrm_cvd_sa_adj_res, "coef"),
lrm_cvd_coef = map_dbl(lrm_cvd_res, "coef"),
# lrm_diab_ss_adj_coef = map_dbl(lrm_diab_ss_adj_res, "coef"),
# lrm_diab_coef = map_dbl(lrm_diab_res, "coef")),
wilcox_cvd_est=map_dbl(wilcox_cvd_res, "estimate")*-1) %>%
dplyr::select(ME, spear_age_est:wilcox_cvd_est) %>%
column_to_rownames("ME") %>%
as.matrix()
# browser()
p_star_m <- get_p_stars(pval_m)
coxph_m <- assoc_res %>%
dplyr::mutate(cvdw_tidy=map(coxph_cvdw_model,tidy, exponentiate=FALSE)) %>%
unnest(cvdw_tidy) %>%
dplyr::select(ME, estimate:conf.high)%>%
column_to_rownames("ME") %>%
as.matrix()
# evidence against H0: good fit
# https://stats.stackexchange.com/questions/169438/evaluating-logistic-regression-and-interpretation-of-hosmer-lemeshow-goodness-of
lrm_gof_m <- assoc_res %>%
dplyr::mutate(
lrm_cvd_sas_adj_gof = map_dbl(lrm_cvd_sas_adj_res, "gof"),
lrm_cvd_sa_adj_gof = map_dbl(lrm_cvd_sa_adj_res, "gof"),
lrm_cvd_gof = map_dbl(lrm_cvd_res, "gof"),
lrm_diab_ss_adj_gof = map_dbl(lrm_diab_ss_adj_res, "gof"),
lrm_diab_gof = map_dbl(lrm_diab_res, "gof")) %>%
dplyr::select(ME, lrm_cvd_sas_adj_gof:lrm_diab_gof) %>%
column_to_rownames("ME") %>%
as.matrix()
return(list(pval_m=pval_m, est_m=est_m, p_star_m=p_star_m, coxph_m=coxph_m, lrm_gof_m=lrm_gof_m))
}
|
28ea8f1f6247521140106d31fe688151038dba0c
|
61d29d3ef402b7d47e527d054372e1d50e6a2e12
|
/R/KNN.R
|
fa516588b50f13c37cb5a0240f86d06e8aa3ba87
|
[] |
no_license
|
DavisLaboratory/msImpute
|
2cab3e32be84656b9120db00fb32083547665e63
|
538873e2d8f512bfdfba4f764457d194d961f26a
|
refs/heads/master
| 2023-08-10T03:45:05.054105
| 2023-07-31T08:25:53
| 2023-07-31T08:25:53
| 239,129,382
| 9
| 0
| null | 2022-10-13T11:02:07
| 2020-02-08T12:32:33
|
R
|
UTF-8
|
R
| false
| false
| 1,157
|
r
|
KNN.R
|
#' k-nearest neighbour (KNN)
#'
#' The fraction of k-nearest neighbours in the original data that are preserved as k-nearest neighbours in imputed data.
#' KNN quantifies preservation of the local, or microscopic structure.
#' Requires complete datasets - for developers/use in benchmark studies only.
#'
#' @param xorigin numeric matrix. The original log-intensity data. Can not contain missing values.
#' @param ximputed numeric matrix. The imputed log-intensity data. Can not contain missing values.
#' @param k number of nearest neighbours. default to k=3.
#'
#' @return numeric The proportion of preserved k-nearest neighbours in imputed data.
#' @examples
#' data(pxd007959)
#' y <- pxd007959$y
#' y <- y[complete.cases(y),]
#' # for demonstration we use same y for xorigin and ximputed
#' KNN(y, y)
#'
#'
#' @export
KNN <- function(xorigin, ximputed, k=3){
NN_org <- FNN::get.knn(t(xorigin), k = k)
KNC_org <- NN_org$nn.index
NN_amp <- FNN::get.knn(t(ximputed), k = k)
KNC_amp <- NN_amp$nn.index
pmeans <- c()
for(i in seq_len(ncol(xorigin))){
pmeans <- c(pmeans, mean(KNC_amp[i,] %in% KNC_org[i,]))
}
return(mean(pmeans))
}
|
241d35acee766590a5f8bdc5d532f1f393feeaac
|
27f67f76a45865519d6f98acc6d650a4df494e1a
|
/netcompLib/man/compute_cellwise_loglik.Rd
|
68985f3fcefb649912851ccc356f4dfc72e3c09c
|
[] |
no_license
|
minghao2016/netcompLib
|
3154409009ad9e49b8d886300dc722f09ea80464
|
aa5c15d374959a27ced7e4b14b25629c8c5f6186
|
refs/heads/master
| 2021-01-20T13:16:57.664085
| 2016-05-11T13:34:00
| 2016-05-11T13:34:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 583
|
rd
|
compute_cellwise_loglik.Rd
|
% Generated by roxygen2 (4.0.1): do not edit by hand
\name{compute_cellwise_loglik}
\alias{compute_cellwise_loglik}
\title{Compute the loglikelihood from probabilities and counts}
\usage{
compute_cellwise_loglik(x, n, p)
}
\arguments{
\item{x}{[matrix/array-int] :: observed counts in each cell}
\item{n}{[int] :: Number observations}
\item{p}{[vector-double] :: Corresponding estimated cell probabilities (is just x / n)}
}
\value{
[vector-double] :: Vectorized version of log-likelihood (per edge group)
}
\description{
Compute the loglikelihood from probabilities and counts
}
|
6a24f9f0534e29dd5dfe9e3498193f443498ac4c
|
b4cc2e543a3822cd9e03a660348b3da6c47a8a14
|
/man/plotShifts.Rd
|
222db550122ead476bbdf186f8b96a61c243455c
|
[] |
no_license
|
hferg/BTprocessR
|
09e078ed5f97c6f63db877794f74c39003ed4fd5
|
ce8ecd4cc422605438c98787ad326b0a077a1eb7
|
refs/heads/master
| 2021-05-11T06:25:43.805695
| 2020-03-04T16:36:11
| 2020-03-04T16:36:11
| 117,988,586
| 3
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 7,010
|
rd
|
plotShifts.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plotShifts.R
\name{plotShifts}
\alias{plotShifts}
\title{plotShifts}
\usage{
plotShifts(PP, plot.options = list(), ...)
}
\arguments{
\item{PP}{The output of the rjpp function.}
\item{plot.options}{A list of control options. See details.}
\item{...}{Additional arguments passed to plotPhylo}
}
\description{
Plots the locations of the origins of scalars from the postprocessor output
of bayestraits.
}
\details{
The default behaviour of plotShifts depends on the transformations
present in the rjpp output. If variable rates, then 3 trees will be plotted:
the first has branches coloured according the log of the mean rate, the
second shows all node scalars present more than once in the posterior,
coloured according to the mean log rate and the third shows the same for
branch scalars. If delta, kappa or lambda are present then a single tree is
plotted showing all nodes that receive a scalar, coloured according to mean
magnitude. If multiple transformations are present then the user will be
prompted to select one.
The plot.options list provides a high degree of control over what
is plotted, allowing the default behaviour to be customised. The options, and
values that they can take, are as follows.
\itemize{
\item{threshold:}{ [0-1] The threshold of presence in the posterior over which
a node and/or branch scalar is plotted. Also the threshold referenced by
coloured.edges and scaled.edges.}
\item{transformation:}{ [rate, delta, lambda, kappa] The transformation to
plot.}
\item{edge.colour:}{ [none, mean, median, mode, sd, scale_pc] The metric to
colour edges by. If none branches default to the na.colour option. Mean,
median, mode and sd correspond to the appropriate branch lengths from the
posterior of trees and scale_pc colours edges according to the percentage of
time they are scaled in the posterior.}
\item{edge.transparency:}{ [none, scale_pc, sd] The measure to make edges
proportionally transparent by. None results in uniform solid branches,
scale_pc gives edges that are scaled less frequently in the posterior higher
transparency, and sd gives branches that have higher SD of estimated branch
lengths more solid colours.}
\item{coloured.edges:}{ [all, threshold] The edges to colour. If "all" then
all edges are coloured according to edge.colour, otherwise if "threshold"
then only edges that are scaled over the specified threshold are coloured.
Uncoloured edges default to na.colour}
\item{edge.palette:}{ [viridis, magma, inferno, plasma, viridis,
c("<colour1>", "<colour2>")] The colour palette for edges. If not using a
named palette then a vector of at least two colours must be specified - the
first will be the low end of the palette and the last the top end. Any other
colours in the vector will be included in the gradient.}
\item{edge.scale:}{ [none, mean, median, mode]}
\item{scaled.edges:}{ [all, threshold]}
\item{node.colour:}{ []}
\item{node.scale:}{ []}
\item{node.transparency:}{ []}
\item{node.palette:}{ [viridis, magma, inferno, plasma, viridis,
c("<colour1>", "<colour2>")] The colour palette for node symbols. If not
using a named palette then a vector of at least two colours must be
specified - the first will be the low end of the palette and the last the top
end. Any other colours in the vector will be included in the gradient.}
\item{node.fill:}{ []}
\item{node.border:}{ []}
\item{node.shape:}{ ["circle"] The shape for the node labels - "circle",
"square", "diamond", "uptriangle", "downtriangle".}
\item{node.cex:}{ [0-??] The scaling factor for node symbols. This is the
scaling factor that the symbols start at before any subsequent scaling (i.e.
if a node symbol receives no scaling, this is what it's scaling factor will
be.)}
\item{branch.colour:}{ []}
\item{branch.transparency:}{ []}
\item{branch.palette:}{ [viridis, magma, inferno, plasma, viridis,
c("<colour1>", "<colour2>")] The colour palette for branch symbols. If not
using a named palette then a vector of at least two colours must be
specified - the first will be the low end of the palette and the last the top
end. Any other colours in the vector will be included in the gradient.}
\item{branch.fill:}{ []}
\item{branch.border:}{ []}
\item{branch.scale:}{ []}
\item{branch.shape:}{ ["circle"] The shape for the branch labels - "circle",
"square", "diamond", "uptriangle", "downtriangle".}
\item{branch.cex:}{ [0-??] The scaling factor for branch symbols. This is the
scaling factor that the symbols start at before any subsequent scaling (i.e.
if a branch symbol receives no scaling, this is what it's scaling factor will
be.}
\item{na.colour:}{ []}
\item{layout:}{ [c("e", "n", "b")] This controls the layout of the plots. The
option takes the form of a vector of letters - "e", "n" and/or "b". Each
element of the vector is a new panel in the plot, and the composition of
letters in the element determins whether coloured edges - "e" - node labels -
"n" - and/or branch labels - "b" - are plotted. e.g. c("e", "n", "b") gives a
three panel plot - one panel with coloured edges, one with node labels and
one with branch labels. c("en", "b") produces two plots - one with coloured
edges and node labels and one with branch labels. c("enb") produces a single
plot with edges, node labels and branch labels.}
\item{show.legend:}{ [TRUE, FALSE] Whether or not to show legends. Legends
can be drawn seperately using the plotLegends function and then added to
plots using some other graphics software. This is useful if the legend butts
up against the lower branches of scaled phylogenies, if the type = "fan"
option is used (the automatic legend placement puts it in a weird place) or
if a more complex legend is needed (e.g. a histogram).}
\item{legend.pos:}{ [auto, c(xl, yb, xr, yt)] The legend position on the
plot. If "auto" then the legend position will be in the bottom right at
"best guess" coordinates. Otherwise a vector of coordinates for bottom left
and top right corner of the legend.}
\item{legend:}{ []}
\item{save.plot:}{ [TRUE, FALSE] If TRUE then the plot will be saved to the
working directory as a pdf and NOT plotted to the screen.}
\item{filename:}{ [<some_filename>] The filename to save the plot to. If not
specified then a filename will be generated based on the time and date.
There's no need to specify file extension.}
\item{plot.size:}{ [c(<width>, <height>)] The width and height of the saved
plot. If plot.format = "png" then the unit is in pixels.}
\item{legend.only}{ [TRUE, FALSE] When TRUE only the legend(s) corresponding
to all the other options are plotted. When this option is called two legends
are plotted per plot - the one that normally appears on the plot, and one
accompanied by a histogram showing the same data used to generate the
colours. This option is useful for when the legend is to be combined with the
plot in seperate graphical software (e.g. GIMP, Inkscape). This option is
compatible with save.plot.}
}
}
|
805b6f4dfb715294c1a21b7f9e9d7465b80b4ce5
|
55f8768526a5aba107fc8ef55749a3c5b80cfe2e
|
/man/qrmix.Rd
|
7d116ba8647502e4e5e4831ce55f548095f1c130
|
[] |
no_license
|
cran/qrmix
|
7bcbf5dff46b0ecb0b323adbf62104eafda38955
|
6f6b990cef132275fb7aae7f43a061416699a65d
|
refs/heads/master
| 2021-01-20T08:53:49.978588
| 2017-05-03T20:49:24
| 2017-05-03T20:49:24
| 90,196,443
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,956
|
rd
|
qrmix.Rd
|
\name{qrmix}
\alias{qrmix}
\title{Quantile Regression Classification
}
\description{\code{qrmix} estimates the components of a finite mixture model by using quantile regression to select a group of quantiles that satisfy an optimality criteria chosen by the user.
}
\usage{
qrmix(formula, data, k, Ntau=50, alpha=0.03, lossFn="Squared", fitMethod="lm",
xy=TRUE, ...)
}
\arguments{
\item{formula}{an object of class \code{"formula"}.
}
\item{data}{an optional data frame that contains the variables in \code{formula}.
}
\item{k}{number of clusters.
}
\item{Ntau}{an optional value that indicates the number of quantiles that will be considered for quantile regression comparison. \code{Ntau} should be greater or equal than \eqn{2k}{2k}.
}
\item{alpha}{an optional value that will determine the minimum separation between the k quantiles that represent each of the k clusters. \code{alpha} should be smaller than \eqn{\frac{1}{2k}}{1/(2k)}.
}
\item{lossFn}{the loss function to be used to select the best combination of k quantiles. The available functions are \code{"Squared"}, \code{"Absolute"}, \code{"Bisquare"}, and \code{"Huber"}.
}
\item{fitMethod}{the method to be used for the final fitting. Use \code{"lm"} for OLS (default), \code{"rlm"} for robust regression, and \code{"rq"} to use fit from quantile regression.
}
\item{xy}{logical. If \code{TRUE} (the default), the data will be saved in the qrmix object.
}
\item{\dots}{additional arguments to be passed to the function determined in \code{fitMethod}.
}
}
\details{The optimality criteria is determined by the \code{lossFn} parameter. If, for example, the default value is used (\code{lossFn = "Squared"}), the \code{k} quantiles selected will minimize the sum of squared residuals. Use \code{"Bisquare"} or \code{"Huber"} to make the method less sensitive to outliers.
}
\value{
\code{qrmix} returns an object of class "qrmix"
\item{coefficients}{a matrix with k columns that represent the coefficients for each cluster.}
\item{clusters}{cluster assignment for each observation.}
\item{quantiles}{the set of k quantiles that minimize the mean loss.}
\item{residuals}{the residuals, response minus fitted values.}
\item{fitted.values}{the fitted values.}
\item{call}{the matched call.}
\item{xy}{the data used if xy is set to \code{TRUE}.}
}
\references{Emir, B., Willke, R. J., Yu, C. R., Zou, K. H., Resa, M. A., and Cabrera, J. (2017), "A Comparison and Integration of Quantile Regression and Finite Mixture Modeling" (submitted).
}
\examples{
data(blood.pressure)
#qrmix model using default function values:
mod1 = qrmix(bmi ~ ., data = blood.pressure, k = 3)
summary(mod1)
#qrmix model using Bisquare loss function and refitted with robust regression:
mod2 = qrmix(bmi ~ age + systolic + diastolic + gender, data = blood.pressure, k = 3,
Ntau = 25, alpha = 0.1, lossFn = "Bisquare", fitMethod = "rlm")
summary(mod2)
}
|
379a1bce8307a1fea71c5725a22f17bdef92ab3d
|
2921c9c994f3c67c3ba2edac682fe91f1d1797cf
|
/attendance/server.R
|
e2ab7d1186fb71e62ebf34b4c139f3ad9146659b
|
[] |
no_license
|
leighseverson/attendance_app
|
5c66b47f70035eec528c40b7e171661b123d33bc
|
8ab4b49011ca47b175ed394465903886ac36a7de
|
refs/heads/master
| 2023-09-01T14:05:47.673356
| 2023-08-27T17:21:10
| 2023-08-27T17:21:10
| 68,144,545
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,021
|
r
|
server.R
|
library(shiny)
library(shinyjs)
library(dplyr)
library(tibble)
library(zoo)
library(DT)
student_list <- c('Anna',
'Cahill',
'Danni',
'Eileen',
'Elizabeth',
'Jane',
'Juan',
'Jyoti',
'Khalid',
'Maggie',
'Marika',
'Melea',
'Nancy',
'Olivia',
'Rachel',
'Wei',
'Zane'
)
shinyServer(function(input, output) {
form_data <- reactive({
attendance <- data.frame(name = input$student_list,
date = input$date,
timestamp = Sys.Date())
})
make_attendance_file <- reactive({
attendance_file <- data.frame(student = student_list) %>%
mutate(attended = as.numeric(student %in% input$student_list))
})
make_groups <- reactive({
groups <- form_data() %>%
select(name) %>%
mutate(n = n(),
# Figure out how many groups there should be based on
# the number of people in class today
n_groups = n %/% 2,
# Create the right number of assignments to each group
Group = rep(1:n_groups[1], length.out = n[1]),
# Shuffle the group assignments randomly
Group = sample(Group)
) %>%
group_by(Group) %>%
summarize(Students = paste(name, collapse = ", "))
}
)
output$downloadData <- downloadHandler(
filename = function() { paste("attendance_", input$date, ".csv", sep = "") },
content = function(file) {
write.csv(make_attendance_file(), file, row.names = FALSE)
}
)
observeEvent(input$makegroups, {
output$tbl <- renderTable({
make_groups()
},
caption = "Group assignments",
caption.placement = "top",
include.rownames = FALSE,
include.colnames = FALSE)
})
})
|
8ab8f6094586719d0ea95ef4846d6c760a496a72
|
841a858385500c1465b6673a2e78ba261bf687e3
|
/tests/testthat/test-CART.R
|
e4091f3fa27e13bdf9b459d7dc8c9678633f66c5
|
[] |
no_license
|
jefshe/flipTrees
|
ae3398be905652b861e2e22bd52d1c66c415e348
|
f382bc97cd620ea3d4a3c93d806262582c096252
|
refs/heads/master
| 2020-03-16T14:21:52.097183
| 2018-05-09T06:48:32
| 2018-05-09T06:54:20
| 132,714,460
| 0
| 0
| null | 2018-05-09T06:53:16
| 2018-05-09T06:53:15
| null |
UTF-8
|
R
| false
| false
| 6,528
|
r
|
test-CART.R
|
context("CART")
data("spam7", package = "DAAG")
spam.sample <- spam7[sample(seq(1,4601), 500, replace=FALSE), ]
data(cola, package = "flipExampleData")
colas <- cola
data(bank, package = "flipExampleData")
bank$fOverall <- factor(bank$Overall)
levels(bank$fOverall) <- c(levels(bank$fOverall), "8") # add an empty factor level
test_that("saving variables",
{
z <- CART(Overall ~ Fees + Interest + Phone + Branch + Online + ATM, data = bank,
subset = bank$ID > 100)
expect_error(predict(z), NA)
expect_error(flipData::Probabilities(z))
z <- suppressWarnings(CART(fOverall ~ Fees + Interest + Phone + Branch + Online + ATM,
data = bank, subset = bank$ID > 100))
expect_error(predict(z), NA)
expect_error(flipData::Probabilities(z), NA)
})
z <- CART(Overall ~ Fees + Interest + Phone + Branch + Online + ATM, data = bank, subset = bank$ID > 100)
test_that("rpart prediction",
{
expect_equal(unname(predict(z)[1]), 4.258064516129032)
})
z <- suppressWarnings(CART(fOverall ~ Fees + Interest + Phone + Branch + Online + ATM,
data = bank, subset = bank$ID > 100))
test_that("rpart Probabilities",
{
expect_equal(unname(flipData::Probabilities(z)[1, 4]), 0.2444444444444445)
})
z <- suppressWarnings(CART(fOverall ~ Fees + Interest + Phone + Branch + Online + ATM,
data = bank, subset = bank$ID > 100))
# Reading in the libraries so that their outputs do not pollute the test results.
library(mice)
library(hot.deck)
test_that("Error if missing data",
{
type = "Sankey"
# Changing data
expect_error((CART(yesno ~ crl.tot + dollar + bang + money + n000 + make,
data = spam.sample, missing = "Error if missing data")),NA)
colas$Q32[unclass(colas$Q32) == 1] <- NA
expect_that((CART(Q32 ~ Q2, data = colas, subset = TRUE, missing = "Error if missing data")),
(throws_error()))
expect_that((CART(Overall ~ Fees + Interest + Phone + Branch + Online + ATM,
data = bank, subset = TRUE, weights = NULL, output = type, missing = "Error if missing data")), (throws_error()))
# filter
expect_that((CART(Overall ~ Fees + Interest + Phone + Branch + Online + ATM,
data = bank, subset = bank$ID > 100, weights = NULL, output = type, missing = "Error if missing data")), (throws_error()))
# weight
expect_that((CART(Overall ~ Fees + Interest + Phone + Branch + Online + ATM,
data = bank, subset = TRUE, weights = bank$ID, output = type, missing = "Error if missing data")), (throws_error()))
# weight and filter
expect_that((CART(Overall ~ Fees + Interest + Phone + Branch + Online + ATM,
data = bank, subset = bank$ID > 100, weights = bank$ID, missing = "Error if missing")), (throws_error()))
# DS-1525, subset creates empty level of outcome
expect_error(suppressWarnings(CART(Q32 ~ Q2 + Q3, data = colas, subset = colas$Q32 != "Don't know")), NA)
})
for (missing in c("Exclude cases with missing data",
"Use partial data",
"Imputation (replace missing values with estimates)"))
for (type in c("Sankey", "Tree", "Text", "Prediction-Accuracy Table", "Cross Validation"))
test_that(paste(missing, type),
{
imputation <- missing == "Imputation (replace missing values with estimates)"
expect_error((suppressWarnings(CART(yesno ~ crl.tot + dollar + bang + money + n000 + make,
data = spam.sample, subset = TRUE, weights = NULL,
output = type, missing = missing))),
if (imputation) NULL else NA)
colas$Q32[unclass(colas$Q32) == 1] <- NA
colas.small <- colas[, colnames(colas) %in% c("Q32", "Q3", "Q2", "Q4_A", "Q4_B", "Q4_C", "Q11", "Q12")]
colas.small$Q3[1] <- NA
expect_error((suppressWarnings(CART(Q32 ~ Q3, data = colas.small, subset = TRUE,
weights = NULL, output = type, missing = missing))), NA)
expect_error((suppressWarnings(CART(Overall ~ Fees + Interest + Phone + Branch + Online + ATM,
data = bank, subset = TRUE, weights = NULL, output = type, missing = missing))), NA)
# filter
expect_error((suppressWarnings(CART(Overall ~ Fees + Interest + Phone + Branch + Online + ATM,
data = bank, subset = bank$ID > 100, weights = NULL, output = type,
missing = missing))), NA)
# weight
expect_error((suppressWarnings(CART(Overall ~ Fees + Interest + Phone + Branch + Online + ATM,
data = bank, subset = TRUE, weights = bank$ID, output = type,
missing = missing))), NA)
# weight and filter
expect_error((suppressWarnings(CART(Overall ~ Fees + Interest + Phone + Branch + Online + ATM,
data = bank, subset = bank$ID > 100, weights = bank$ID,
output = type, missing = missing))), NA)
})
for (pruning in c("None", "Minimum error", "Smallest tree"))
for (stopping in c(TRUE, FALSE))
test_that(paste(missing, type),
{
expect_error((suppressWarnings(CART(Overall ~ Fees + Interest + Phone + Branch + Online + ATM, data = bank,
subset = bank$ID > 100, weights = bank$ID,
output = "Sankey", missing = "Exclude cases with missing data",
prune = pruning, early.stopping = stopping))), NA)
})
test_that("CART: dot in formula", {
cart <- CART(yesno ~ ., data = spam7)
cart2 <- CART(yesno ~ crl.tot + dollar + bang + money + n000 + make, data = spam7)
expect_equal(cart, cart2)
})
test_that("CART: many levels", {
many.levels <- replicate(100, paste(sample(LETTERS, 2), collapse = ""))
spam7$new <- as.factor(sample(many.levels, nrow(spam7), replace = TRUE))
expect_error(CART(yesno ~ ., data = spam7, early.stopping = FALSE), NA)
})
|
c1e192f03b6ff0f043f40fdcefca2beaf9210d3e
|
38088096f84050aece0bc1d73b33b7d845286bba
|
/man/wrapChiSqTestImpl.Rd
|
141cc6092c12993ba3ee5528cd424805869e50d9
|
[] |
no_license
|
Sandy4321/sigr
|
2ea94d62e82cd89eaeed3bf570546a07d03b27cf
|
2129bede56acf7673710893eaf400d4a6dc891ec
|
refs/heads/master
| 2021-01-14T02:35:47.657148
| 2017-02-10T01:21:54
| 2017-02-10T01:21:54
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 759
|
rd
|
wrapChiSqTestImpl.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ChiSqTest.R
\name{wrapChiSqTestImpl}
\alias{wrapChiSqTestImpl}
\title{Format quality of a logistic regression roughly in "APA Style"
( American Psychological Association ).}
\usage{
wrapChiSqTestImpl(df.null, df.residual, null.deviance, deviance)
}
\arguments{
\item{df.null}{null degrees of freedom.}
\item{df.residual}{residual degrees of freedom.}
\item{null.deviance}{null deviance}
\item{deviance}{residual deviance}
}
\value{
wrapped statistic
}
\description{
Format quality of a logistic regression roughly in "APA Style"
( American Psychological Association ).
}
\examples{
wrapChiSqTestImpl(df.null=7,df.residual=6,
null.deviance=11.09035,deviance=10.83726)
}
|
2878092ba633fde8f48c215dd44f85bca0b87639
|
ce3bc493274116150497e73aa7539fef1c07442a
|
/man/replacefactor.Rd
|
0d82318a943406f6cc4b6295690924ff70fa2cc9
|
[] |
no_license
|
laresbernardo/lares
|
6c67ff84a60efd53be98d05784a697357bd66626
|
8883d6ef3c3f41d092599ffbdd4c9c352a9becef
|
refs/heads/main
| 2023-08-10T06:26:45.114342
| 2023-07-27T23:47:30
| 2023-07-27T23:48:57
| 141,465,288
| 235
| 61
| null | 2023-07-27T15:58:31
| 2018-07-18T17:04:39
|
R
|
UTF-8
|
R
| false
| true
| 1,491
|
rd
|
replacefactor.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/wrangling.R
\name{replacefactor}
\alias{replacefactor}
\title{Replace Factor Values}
\usage{
replacefactor(x, original, change)
}
\arguments{
\item{x}{Factor (or Character) Vector}
\item{original}{String or Vector. Original text you wish to replace}
\item{change}{String or Vector. Values you wish to replace the originals with}
}
\value{
Factor vector with transformed levels.
}
\description{
This function lets the user replace levels on a factor vector.
}
\examples{
library(dplyr)
data(dft)
# Replace a single value
dft <- mutate(dft, Pclass = replacefactor(Pclass, original = "1", change = "First"))
levels(dft$Pclass)
# Replace multiple values
dft <- mutate(dft, Pclass = replacefactor(Pclass, c("2", "3"), c("Second", "Third")))
levels(dft$Pclass)
}
\seealso{
Other Data Wrangling:
\code{\link{balance_data}()},
\code{\link{categ_reducer}()},
\code{\link{cleanText}()},
\code{\link{date_cuts}()},
\code{\link{date_feats}()},
\code{\link{file_name}()},
\code{\link{formatHTML}()},
\code{\link{holidays}()},
\code{\link{impute}()},
\code{\link{left}()},
\code{\link{normalize}()},
\code{\link{num_abbr}()},
\code{\link{ohe_commas}()},
\code{\link{ohse}()},
\code{\link{quants}()},
\code{\link{removenacols}()},
\code{\link{replaceall}()},
\code{\link{textFeats}()},
\code{\link{textTokenizer}()},
\code{\link{vector2text}()},
\code{\link{year_month}()},
\code{\link{zerovar}()}
}
\concept{Data Wrangling}
|
f5f7747c9a8aac9e73fcb554b300ebdbd2b7c064
|
032bfd7f855a5bf615bbcaea68efdd3c081d3cdb
|
/plot2.R
|
2cb887460b6e14a2f32a9cd910533c929ec39c0f
|
[] |
no_license
|
czhang81/ExData_Plotting1
|
9be0949881c372309e2e35a022f4f1f33682a8bc
|
c63de05336709378cc3c2f5a526803d5627fb756
|
refs/heads/master
| 2021-01-12T18:59:00.016534
| 2014-12-08T17:30:16
| 2014-12-08T17:30:16
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 503
|
r
|
plot2.R
|
mydata<-read.table("~/R2/household_power_consumption.txt",header=T,sep=";",na.strings="?")
mydata$Date<-as.Date(mydata$Date, format="%d/%m/%Y")
datatrue<-subset(mydata,Date>="2007-02-01"& Date <= "2007-02-02")
rm(mydata)
datetime <- paste(as.Date(datatrue$Date), datatrue$Time)
datatrue$Datetime <- as.POSIXct(datetime)
Sys.setlocale("LC_ALL", "en_US")
plot(datatrue$Datetime,datatrue$Global_active_power,type="l", xlab="" ,ylab="Global Active Power(kilowatts)")
dev.copy(png,file="plot2.png")
dev.off()
|
8c091864e13691c28a24ed7626795871bae94acd
|
c7840acfb3c7bba9ac956bb0872de40052dc18b5
|
/PCA.r
|
4513c67fee3656e6cfb976d37197923df72c72af
|
[] |
no_license
|
HaihuaWang-hub/Data-Graphics
|
4b24b1c0a650bc0e6b41e000075ff89e39e2d9bb
|
bf4e3fbfa2980eb64c4b02d7af289d61aa1096cd
|
refs/heads/master
| 2023-03-02T15:29:33.162245
| 2021-01-20T01:17:54
| 2021-01-20T01:17:54
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 803
|
r
|
PCA.r
|
library(maptools)
library(ggplot2)
library(ggrepel)
# Import files
setwd("~/R/Analysis/1_Test")
METADATA <- read.csv(file="metadata.csv",header=T)
# Make dataset
pilots.pca <- prcomp(na.omit(METADATA),scale=TRUE, center = TRUE) #standardized
biplot(pilots.pca) # Check the result
loading <- sweep(pilots.pca$rotation,MARGIN=2,pilots.pca$sdev,FUN="*")
loading <- data.frame(loading)
# ggplot
ggplot(loading) +
geom_segment(aes(xend=PC1, yend=PC2), x=0, y=0, arrow=arrow(length = unit(0.5,"cm"))) +
geom_text_repel(aes(x=PC1, y=PC2, label=rownames(loading)), size=8, color='black') +
xlim(-1,1) +
ylim(-1,1) +
theme_classic()+
theme(text=element_text(size=14,color="black"),
axis.text=element_text(size=12,color="black"))+
coord_fixed()
# Save
ggsave(file = "PCA.png")
|
0f9477ca9afa73f64c0c9ea720fa45339c640134
|
b6785fb75ce1a9ba7e37a91eee1f1c55af8b17b7
|
/man/readin.Rd
|
1cbc6b00ecc83c6f94c0fc55dc56d6d9adc24780
|
[
"MIT"
] |
permissive
|
kbario/NMRalter8r
|
19d642fc3d852d74478d9bf0f007066d1162ecbc
|
8c90da677d317d7e8df1ac459d38099dd89b3f75
|
refs/heads/main
| 2023-04-16T16:11:09.117793
| 2021-09-09T07:17:00
| 2021-09-09T07:17:00
| 404,548,734
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,502
|
rd
|
readin.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/readin.R
\name{readin}
\alias{readin}
\title{Import 1D NMR spectra}
\usage{
readin(path)
}
\arguments{
\item{path}{Given as a string, the path to the overarching files containing the NMR spectrum.}
}
\value{
The function exports the following three objects into the currently active R environment (no variable assignments needed):
\enumerate{
\item \strong{x}: The NMR spectrum in an array of values matched to p
\item \strong{p}: The column-matched ppm array of the x variable
\item \strong{m}: The spectrometer metadata as extracted from the \emph{acqus} file, row-matched to x
}
}
\description{
Import 1D NMR spectra
}
\details{
This function imports TopSpin processed NMR spectra as well as spectrometer and processing parameters found in files \emph{acqus} and \emph{procs}. Experiments can be filtered according to data acquisition variables using the \code{exp_type} argument: For example, to read standard 1D NMR experiments use \code{exp_type=list(exp='noesygppr1d')}. More than one argument can be provided as list element. \strong{Objects in the R environment with the same variable names will be overwritten.}
}
\section{}{
NA
}
\examples{
readin(path = system.file('extdata/15', package = 'NMRalter8r'))
}
\seealso{
Other {preproc}:
\code{\link{bl_}()},
\code{\link{cali}()},
\code{\link{flip_}()},
\code{\link{pproc}()}
}
\author{
Torben Kimhofer \email{torben.kimhofer@murdoch.edu.au}
}
\concept{{preproc}}
|
c06b29231a6ceae0c296f58eb0025a6ebac02a6c
|
71d504e4af359cd6b2aa5fef69d456389e23f4db
|
/Premier League Analysis/R/modelling_goals.R
|
6ec09fb5f06313d56d803cbd6dfddcdd1ed9a9bc
|
[] |
no_license
|
mfaisalshahid/Premier-League-Analysis
|
121e630502dd7f9ef639b4bccabd75f270fbfdbf
|
4d7917cb77294d70bc808e227a4592fe802ce8ab
|
refs/heads/master
| 2022-12-11T16:13:02.024192
| 2020-08-31T02:20:38
| 2020-08-31T02:20:38
| 291,592,882
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,789
|
r
|
modelling_goals.R
|
############################################################################################
########### GOALS_FOR VS XG ################################################################
########### NO TRANSFORMATIONS #############################################################
df <- select(season_all, pts, goals_for, xG)
linear_model <- lm(pts ~ ., data=df)
summary(linear_model)
########### NORMALIZED FEATURES #############################################################
df <- select(season_all, pts, goals_for, xG)
df <- normalize(df, exclude="pts")
linear_model <- lm(pts ~ ., data=df)
summary(linear_model)
########### NORMALIZED ALL ##################################################################
df <- select(season_all, pts, goals_for, xG)
df <- normalize(df)
linear_model <- lm(pts ~ ., data=df)
summary(linear_model)
########### STANDARDIZED FEATURES ###########################################################
df <- select(season_all, pts, goals_for, xG)
df <- standardize(df, exclude="pts")
linear_model <- lm(pts ~ ., data=df)
summary(linear_model)
########### STANDARDIZED ALL ################################################################
df <- select(season_all, pts, goals_for, xG)
df <- standardize(df)
linear_model <- lm(pts ~ ., data=df)
summary(linear_model)
########### STANDARDIZED->NORMALIZE FEATURES ################################################
df <- select(season_all, pts, goals_for, xG)
df <- normalize(standardize(df, exclude="pts"), exclude="pts")
linear_model <- lm(pts ~ ., data=df)
summary(linear_model)
########### STANDARDIZED->NORMALIZE ALL #####################################################
df <- select(season_all, pts, goals_for, xG)
df <- normalize(standardize(df))
linear_model <- lm(pts ~ ., data=df)
summary(linear_model)
########### NORMALIZE->STANDARDIZED FEATURES ################################################
df <- select(season_all, pts, goals_for, xG)
df <- normalize(standardize(df, exclude="pts"), exclude="pts")
linear_model <- lm(pts ~ ., data=df)
summary(linear_model)
########### NORMALIZE->STANDARDIZED ALL #####################################################
df <- select(season_all, pts, goals_for, xG)
df <- normalize(standardize(df))
linear_model <- lm(pts ~ ., data=df)
summary(linear_model)
########### goals only ALL #####################################################
df <- select(season_all, pts, goals_for)
linear_model <- lm(pts ~ ., data=df)
summary(linear_model)
########### xg only ALL #####################################################
df <- select(season_all, pts, xG)
linear_model <- lm(pts ~ ., data=df)
summary(linear_model)
########### goals_for for wins #####################################################
df <- select(season_all, win, goals_for)
linear_model <- lm(win ~ ., data=df)
summary(linear_model)
#############################################################################################
########### xG for wins #####################################################
df <- select(season_all, win, xG)
linear_model <- lm(win ~ ., data=df)
summary(linear_model)
########### goal_diff for pts #####################################################
df <- select(season_all, pts, goal_diff)
linear_model <- lm(pts ~ ., data=df)
summary(linear_model)
########### xgd for pts #####################################################
df <- select(season_all, pts, xGD)
linear_model <- lm(pts ~ ., data=df)
summary(linear_model)
########### goal_diff for wins #####################################################
df <- select(season_all, win, goal_diff)
linear_model <- lm(win ~ ., data=df)
summary(linear_model)
########### xgd for wins #####################################################
df <- select(season_all, win, xGD)
linear_model <- lm(win ~ ., data=df)
summary(linear_model)
|
8f8f8b451f04e195355cd4ddaa2a6909dd3abda5
|
6157f5e76faaae00866a71e4911dc0dc0d5c8f04
|
/inst/doc/metadata-and-data-units.R
|
287b4c80949c2d45e7e96f8d80c08befcb1facb5
|
[] |
no_license
|
cran/sapfluxnetr
|
78ba9f3a550723d0db7188afec669ba6cb3105ee
|
6eff89be6271507f642449f5b2c40beae831eda5
|
refs/heads/master
| 2023-02-04T22:13:44.103316
| 2023-01-25T14:30:02
| 2023-01-25T14:30:02
| 184,409,486
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,490
|
r
|
metadata-and-data-units.R
|
## ----setup, include = FALSE---------------------------------------------------
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
## ----describe_md_variable-----------------------------------------------------
library(sapfluxnetr)
describe_md_variable('si_elev')
describe_md_variable('st_age')
## ----md_vars_table, echo=FALSE, results='asis'--------------------------------
suppressMessages(library(dplyr))
library(magrittr)
site_md_table <- sapfluxnetr:::.metadata_architecture() %>%
magrittr::extract2(., 'site_md') %>%
purrr::map_dfr(magrittr::extract, c('description', 'type', 'units')) %>%
dplyr::mutate(
variable = sapfluxnetr:::.metadata_architecture() %>%
magrittr::extract2(., 'site_md') %>%
names()
) %>%
select(variable, everything())
stand_md_table <- sapfluxnetr:::.metadata_architecture() %>%
magrittr::extract2(., 'stand_md') %>%
purrr::map_dfr(magrittr::extract, c('description', 'type', 'units')) %>%
dplyr::mutate(
variable = sapfluxnetr:::.metadata_architecture() %>%
magrittr::extract2(., 'stand_md') %>%
names()
) %>%
select(variable, everything())
species_md_table <- sapfluxnetr:::.metadata_architecture() %>%
magrittr::extract2(., 'species_md') %>%
purrr::map_dfr(magrittr::extract, c('description', 'type', 'units')) %>%
dplyr::mutate(
variable = sapfluxnetr:::.metadata_architecture() %>%
magrittr::extract2(., 'species_md') %>%
names()
) %>%
select(variable, everything())
plant_md_table <- sapfluxnetr:::.metadata_architecture() %>%
magrittr::extract2(., 'plant_md') %>%
purrr::map_dfr(magrittr::extract, c('description', 'type', 'units')) %>%
dplyr::mutate(
variable = sapfluxnetr:::.metadata_architecture() %>%
magrittr::extract2(., 'plant_md') %>%
names()
) %>%
select(variable, everything())
env_md_table <- sapfluxnetr:::.metadata_architecture() %>%
magrittr::extract2(., 'env_md') %>%
purrr::map_dfr(magrittr::extract, c('description', 'type', 'units')) %>%
dplyr::mutate(
variable = sapfluxnetr:::.metadata_architecture() %>%
magrittr::extract2(., 'env_md') %>%
names()
) %>%
select(variable, everything())
bind_rows(
site_md_table, stand_md_table, species_md_table, plant_md_table, env_md_table
) %>%
xtable::xtable(align = c('lcccc')) %>%
print(type = 'html')
## ----environmetal_vars_table, echo=FALSE, results='asis'----------------------
tibble::tibble(
Variable = c(
'env_ta', 'env_rh', 'env_vpd', 'env_sw_in', 'env_ppfd', 'env_netrad',
'env_ws', 'env_precip', 'env_swc_shallow', 'env_swc_deep'
),
Description = c(
'Air temperature', 'Air relative humidity',
'Vapour pressure deficit', 'Shortwave incoming radiation',
'Incoming photosynthetic photon flux density',
'Net radiation', 'Wind speed', 'Precipitation',
'Shallow soil water content',
'Deep soil water content'
),
Units = c(
'ยบC', '%', 'kPa', 'W m-2', 'micromols m-2 s-1', 'W m-2', 'm s-1', 'mm timestep-1',
'cm3 cm-3', 'cm3 cm-3'
)
) %>%
xtable::xtable(align = c('lccc')) %>%
print(type = 'html')
## ----TIMESTAMP_var------------------------------------------------------------
library(dplyr)
library(lubridate)
# timezone provided by contributor
get_env_md(ARG_TRE) %>% pull(env_time_zone)
# timezone in the TIMESTAMP
get_timestamp(ARG_TRE) %>% tz()
## ----solar_TIMESTAMP----------------------------------------------------------
get_solar_timestamp(ARG_TRE) %>% tz()
|
efe6fd2a890f407c61938d01b09abc87d9a4ce56
|
df0e9f804c7708481b021f20b3a9d372fc752254
|
/R/print.bal.tab.R
|
efb3038a164c3a5edf8960d468c265eb3e60c32f
|
[] |
no_license
|
Zchristian955/cobalt
|
3a132bca1d6a7fe3286e9d0f7154e072766a2f79
|
92596ad30186a06f263db8b32c005989c720f345
|
refs/heads/master
| 2023-03-14T20:50:03.661026
| 2021-03-30T08:50:18
| 2021-03-30T08:50:18
| 436,739,071
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 58,339
|
r
|
print.bal.tab.R
|
print.bal.tab <- function(x, imbalanced.only, un, disp.bal.tab, disp.call,
stats, disp.thresholds, disp,
which.subclass, subclass.summary,
which.imp, imp.summary, imp.fun,
which.treat, multi.summary,
which.time, msm.summary,
which.cluster, cluster.summary, cluster.fun,
digits = max(3, getOption("digits") - 3), ...) {
#Replace .all and .none with NULL and NA respectively
.call <- match.call(expand.dots = TRUE)
if (any(sapply(seq_along(.call), function(x) identical(as.character(.call[[x]]), ".all") || identical(as.character(.call[[x]]), ".none")))) {
.call[sapply(seq_along(.call), function(x) identical(as.character(.call[[x]]), ".all"))] <- expression(NULL)
.call[sapply(seq_along(.call), function(x) identical(as.character(.call[[x]]), ".none"))] <- expression(NA)
return(eval.parent(.call))
}
tryCatch(args <- c(as.list(environment()), list(...))[-1], error = function(e) stop(conditionMessage(e), call. = FALSE))
args[vapply(args, rlang::is_missing, logical(1L))] <- NULL
unpack_p.ops <- function(b) {
out <- do.call("print_process", c(list(b), args), quote = TRUE)
if (is_(b, c("bal.tab.bin", "bal.tab.cont"))) return(out)
else {
b_ <- b[[which(endsWith(names(b), ".Balance"))]][[1]]
if (is_(b_, "bal.tab")) out <- c(out, unpack_p.ops(b_))
else return(out)
}
}
p.ops <- unpack_p.ops(x)
#Prevent exponential notation printing
op <- options(scipen=getOption("scipen"))
options(scipen = 999)
on.exit(options(op))
bal.tab_print(x, p.ops)
}
bal.tab_print <- function(x, p.ops) {
UseMethod("bal.tab_print")
}
bal.tab_print.bal.tab <- function(x, p.ops) {
call <- if (p.ops$disp.call) x$call else NULL
balance <- x$Balance
thresholds <- setdiff(names(p.ops$thresholds), p.ops$drop.thresholds)
baltal <- setNames(x[paste.("Balanced", thresholds)], thresholds)
maximbal <- setNames(x[paste.("Max.Imbalance", thresholds)], thresholds)
nn <- x$Observations
if (is_not_null(call)) {
cat(underline("Call") %+% "\n " %+% paste(deparse(call), collapse = "\n") %+% "\n\n")
}
if (p.ops$disp.bal.tab) {
if (p.ops$imbalanced.only) {
keep.row <- rowSums(apply(balance[grepl(".Threshold", names(balance), fixed = TRUE)], 2, function(x) !is.na(x) & startsWith(x, "Not Balanced"))) > 0
}
else keep.row <- rep(TRUE, nrow(balance))
keep.col <- setNames(as.logical(c(TRUE,
rep(unlist(lapply(p.ops$compute[p.ops$compute %nin% all_STATS()], function(s) {
p.ops$un && s %in% p.ops$disp
})), switch(p.ops$type, bin = 2, cont = 1)),
unlist(lapply(p.ops$compute[p.ops$compute %in% all_STATS()[!get_from_STATS("adj_only")]], function(s) {
c(p.ops$un && s %in% p.ops$disp,
if (p.ops$un && !p.ops$disp.adj && is_not_null(p.ops$thresholds[[s]])) s %in% thresholds)
})),
rep(c(rep(unlist(lapply(p.ops$compute[p.ops$compute %nin% all_STATS()], function(s) {
p.ops$disp.adj && s %in% p.ops$disp
})), switch(p.ops$type, bin = 2, cont = 1)),
unlist(lapply(p.ops$compute[p.ops$compute %in% all_STATS()], function(s) {
c(p.ops$disp.adj && s %in% p.ops$disp,
if (p.ops$disp.adj && is_not_null(p.ops$thresholds[[s]])) s %in% thresholds)
}))
),
p.ops$nweights + !p.ops$disp.adj))),
names(balance))
cat(underline("Balance Measures") %+% "\n")
if (all(!keep.row)) cat(italic("All covariates are balanced.") %+% "\n")
else print.data.frame_(round_df_char(balance[keep.row, keep.col, drop = FALSE], p.ops$digits, na_vals = "."))
cat("\n")
}
for (s in p.ops$compute) {
if (is_not_null(baltal[[s]])) {
cat(underline(paste("Balance tally for", STATS[[s]]$balance_tally_for)) %+% "\n")
print.data.frame_(baltal[[s]])
cat("\n")
}
if (is_not_null(maximbal[[s]])) {
cat(underline(paste("Variable with the greatest", STATS[[s]]$variable_with_the_greatest)) %+% "\n")
print.data.frame_(round_df_char(maximbal[[s]], p.ops$digits, na_vals = "."), row.names = FALSE)
cat("\n")
}
}
if (is_not_null(nn)) {
drop.nn <- rowSums(nn) == 0
ss.type <- attr(nn, "ss.type")[!drop.nn]
nn <- nn[!drop.nn, , drop = FALSE]
if (all(c("Matched (ESS)", "Matched (Unweighted)") %in% rownames(nn)) &&
all(check_if_zero(nn["Matched (ESS)",] - nn["Matched (Unweighted)",]))) {
nn <- nn[rownames(nn)!="Matched (Unweighted)", , drop = FALSE]
rownames(nn)[rownames(nn) == "Matched (ESS)"] <- "Matched"
}
cat(underline(attr(nn, "tag")) %+% "\n")
print.warning <- FALSE
if (length(ss.type) > 1 && nunique.gt(ss.type[-1], 1)) {
ess <- ifelse(ss.type == "ess", "*", "")
nn <- setNames(cbind(nn, ess), c(names(nn), ""))
print.warning <- TRUE
}
print.data.frame_(round_df_char(nn, digits = min(2, p.ops$digits), pad = " "))
if (print.warning) cat(italic("* indicates effective sample size"))
}
invisible(x)
}
bal.tab_print.bal.tab.cluster <- function(x, p.ops) {
call <- if (p.ops$disp.call) x$call else NULL
c.balance <- x$Cluster.Balance
c.balance.summary <- x$Balance.Across.Clusters
thresholds <- setdiff(names(p.ops$thresholds), p.ops$drop.thresholds)
baltal <- setNames(x[paste.("Balanced", thresholds)], thresholds)
maximbal <- setNames(x[paste.("Max.Imbalance", thresholds)], thresholds)
nn <- x$Observations
#Printing
if (is_not_null(call)) {
cat(underline("Call") %+% "\n " %+% paste(deparse(call), collapse = "\n") %+% "\n\n")
}
if (is_not_null(p.ops$which.cluster)) {
cat(underline("Balance by cluster") %+% "\n")
for (i in p.ops$which.cluster) {
cat("\n - - - " %+% italic("Cluster: " %+% names(c.balance)[i]) %+% " - - - \n")
bal.tab_print(c.balance[[i]], p.ops)
}
cat(paste0(paste(rep(" -", round(nchar(paste0("\n - - - Cluster: ", names(c.balance)[i], " - - - "))/2)), collapse = ""), " \n"))
cat("\n")
}
if (isTRUE(as.logical(p.ops$cluster.summary)) && is_not_null(c.balance.summary)) {
s.keep.col <- setNames(as.logical(c(TRUE,
unlist(lapply(p.ops$compute[p.ops$compute %in% all_STATS()[!get_from_STATS("adj_only")]], function(s) {
c(unlist(lapply(p.ops$computed.cluster.funs, function(af) {
p.ops$un && s %in% p.ops$disp && af %in% p.ops$cluster.fun
})),
if (p.ops$un && !p.ops$disp.adj && length(p.ops$cluster.fun) == 1 && is_not_null(p.ops$thresholds[[s]])) s %in% thresholds)
})),
rep(
unlist(lapply(p.ops$compute[p.ops$compute %in% all_STATS()], function(s) {
c(unlist(lapply(p.ops$computed.cluster.funs, function(af) {
p.ops$disp.adj && s %in% p.ops$disp && af %in% p.ops$cluster.fun
})),
if (p.ops$disp.adj && length(p.ops$cluster.fun) == 1 && is_not_null(p.ops$thresholds[[s]])) s %in% thresholds)
})),
p.ops$nweights + !p.ops$disp.adj)
)), names(c.balance.summary))
if (p.ops$disp.bal.tab) {
cat(underline("Balance summary across all clusters") %+% "\n")
print.data.frame_(round_df_char(c.balance.summary[, s.keep.col, drop = FALSE], p.ops$digits, na_vals = "."))
cat("\n")
}
for (s in p.ops$compute) {
if (is_not_null(baltal[[s]])) {
cat(underline(paste("Balance tally for", STATS[[s]]$balance_tally_for)) %+% "\n")
print.data.frame_(baltal[[s]])
cat("\n")
}
if (is_not_null(maximbal[[s]])) {
cat(underline(paste("Variable with the greatest", STATS[[s]]$variable_with_the_greatest)) %+% "\n")
print.data.frame_(round_df_char(maximbal[[s]], p.ops$digits, na_vals = "."), row.names = FALSE)
cat("\n")
}
}
if (is_not_null(nn)) {
drop.nn <- rowSums(nn) == 0
ss.type <- attr(nn, "ss.type")[!drop.nn]
nn <- nn[!drop.nn, , drop = FALSE]
if (all(c("Matched (ESS)", "Matched (Unweighted)") %in% rownames(nn)) &&
all(check_if_zero(nn["Matched (ESS)",] - nn["Matched (Unweighted)",]))) {
nn <- nn[rownames(nn)!="Matched (Unweighted)", , drop = FALSE]
rownames(nn)[rownames(nn) == "Matched (ESS)"] <- "Matched"
}
cat(underline(attr(nn, "tag")) %+% "\n")
print.warning <- FALSE
if (length(ss.type) > 1 && nunique.gt(ss.type[-1], 1)) {
ess <- ifelse(ss.type == "ess", "*", "")
nn <- setNames(cbind(nn, ess), c(names(nn), ""))
print.warning <- TRUE
}
print.data.frame_(round_df_char(nn, digits = min(2, p.ops$digits), pad = " "))
if (print.warning) cat(italic("* indicates effective sample size"))
}
}
invisible(x)
}
bal.tab_print.bal.tab.imp <- function(x, p.ops) {
call <- if (p.ops$disp.call) x$call else NULL
i.balance <- x[["Imputation.Balance"]]
i.balance.summary <- x[["Balance.Across.Imputations"]]
thresholds <- setdiff(names(p.ops$thresholds), p.ops$drop.thresholds)
baltal <- setNames(x[paste.("Balanced", thresholds)], thresholds)
maximbal <- setNames(x[paste.("Max.Imbalance", thresholds)], thresholds)
nn <- x$Observations
#Printing output
if (is_not_null(call)) {
cat(underline("Call") %+% "\n " %+% paste(deparse(call), collapse = "\n") %+% "\n\n")
}
if (is_not_null(p.ops$which.imp)) {
cat(underline("Balance by imputation") %+% "\n")
for (i in p.ops$which.imp) {
cat("\n - - - " %+% italic("Imputation " %+% names(i.balance)[i]) %+% " - - - \n")
bal.tab_print(i.balance[[i]], p.ops)
}
cat(paste0(paste(rep(" -", round(nchar(paste0("\n - - - Imputation: ", names(i.balance)[i], " - - - "))/2)), collapse = ""), " \n"))
cat("\n")
}
if (isTRUE(as.logical(p.ops$imp.summary)) && is_not_null(i.balance.summary)) {
s.keep.col <- as.logical(c(TRUE,
unlist(lapply(p.ops$compute[p.ops$compute %in% all_STATS()[!get_from_STATS("adj_only")]], function(s) {
c(unlist(lapply(p.ops$computed.imp.funs, function(af) {
p.ops$un && s %in% p.ops$disp && af %in% p.ops$imp.fun
})),
if (p.ops$un && !p.ops$disp.adj && length(p.ops$imp.fun) == 1 && is_not_null(p.ops$thresholds[[s]])) s %in% thresholds)
})),
rep(
unlist(lapply(p.ops$compute[p.ops$compute %in% all_STATS()], function(s) {
c(unlist(lapply(p.ops$computed.imp.funs, function(af) {
p.ops$disp.adj && s %in% p.ops$disp && af %in% p.ops$imp.fun
})),
if (p.ops$disp.adj && length(p.ops$imp.fun) == 1 && is_not_null(p.ops$thresholds[[s]])) s %in% thresholds)
})),
p.ops$nweights + !p.ops$disp.adj)
))
if (p.ops$disp.bal.tab) {
cat(underline("Balance summary across all imputations") %+% "\n")
print.data.frame_(round_df_char(i.balance.summary[, s.keep.col, drop = FALSE], p.ops$digits, na_vals = "."))
cat("\n")
}
for (s in p.ops$compute) {
if (is_not_null(baltal[[s]])) {
cat(underline(paste("Balance tally for", STATS[[s]]$balance_tally_for)) %+% "\n")
print.data.frame_(baltal[[s]])
cat("\n")
}
if (is_not_null(maximbal[[s]])) {
cat(underline(paste("Variable with the greatest", STATS[[s]]$variable_with_the_greatest)) %+% "\n")
print.data.frame_(round_df_char(maximbal[[s]], p.ops$digits, na_vals = "."), row.names = FALSE)
cat("\n")
}
}
if (is_not_null(nn)) {
drop.nn <- rowSums(nn) == 0
ss.type <- attr(nn, "ss.type")[!drop.nn]
nn <- nn[!drop.nn, , drop = FALSE]
if (all(c("Matched (ESS)", "Matched (Unweighted)") %in% rownames(nn)) &&
all(check_if_zero(nn["Matched (ESS)",] - nn["Matched (Unweighted)",]))) {
nn <- nn[rownames(nn)!="Matched (Unweighted)", , drop = FALSE]
rownames(nn)[rownames(nn) == "Matched (ESS)"] <- "Matched"
}
cat(underline(attr(nn, "tag")) %+% "\n")
print.warning <- FALSE
if (length(ss.type) > 1 && nunique.gt(ss.type[-1], 1)) {
ess <- ifelse(ss.type == "ess", "*", "")
nn <- setNames(cbind(nn, ess), c(names(nn), ""))
print.warning <- TRUE
}
print.data.frame_(round_df_char(nn, digits = min(2, p.ops$digits), pad = " "))
if (print.warning) cat(italic("* indicates effective sample size"))
}
}
invisible(x)
}
bal.tab_print.bal.tab.multi <- function(x, p.ops) {
call <- if (p.ops$disp.call) x$call else NULL
m.balance <- x[["Pair.Balance"]]
m.balance.summary <- x[["Balance.Across.Pairs"]]
thresholds <- setdiff(names(p.ops$thresholds), p.ops$drop.thresholds)
baltal <- setNames(x[paste.("Balanced", thresholds)], thresholds)
maximbal <- setNames(x[paste.("Max.Imbalance", thresholds)], thresholds)
nn <- x$Observations
#Printing output
if (is_not_null(call)) {
cat(underline("Call") %+% "\n " %+% paste(deparse(call), collapse = "\n") %+% "\n\n")
}
if (is_not_null(p.ops$disp.treat.pairs)) {
headings <- setNames(character(length(p.ops$disp.treat.pairs)), p.ops$disp.treat.pairs)
if (p.ops$pairwise) cat(underline("Balance by treatment pair") %+% "\n")
else cat(underline("Balance by treatment group") %+% "\n")
for (i in p.ops$disp.treat.pairs) {
headings[i] <- "\n - - - " %+% italic(attr(m.balance[[i]], "print.options")$treat_names[1] %+% " (0) vs. " %+%
attr(m.balance[[i]], "print.options")$treat_names[2] %+% " (1)") %+% " - - - \n"
cat(headings[i])
bal.tab_print(m.balance[[i]], p.ops)
}
cat(paste0(paste(rep(" -", round(max(nchar(headings))/2)), collapse = ""), " \n"))
cat("\n")
}
if (isTRUE(as.logical(p.ops$multi.summary)) && is_not_null(m.balance.summary)) {
if (p.ops$imbalanced.only) {
keep.row <- rowSums(apply(m.balance.summary[grepl(".Threshold", names(m.balance.summary), fixed = TRUE)], 2, function(x) !is.na(x) & startsWith(x, "Not Balanced"))) > 0
}
else keep.row <- rep(TRUE, nrow(m.balance.summary))
computed.agg.funs <- "max"
s.keep.col <- as.logical(c(TRUE,
unlist(lapply(p.ops$compute[p.ops$compute %in% all_STATS("bin")[!get_from_STATS("adj_only")]], function(s) {
c(unlist(lapply(computed.agg.funs, function(af) {
p.ops$un && s %in% p.ops$disp && af %in% "max"
})),
if (p.ops$un && !p.ops$disp.adj && is_not_null(p.ops$thresholds[[s]])) s %in% thresholds)
})),
rep(
unlist(lapply(p.ops$compute[p.ops$compute %in% all_STATS("bin")], function(s) {
c(unlist(lapply(computed.agg.funs, function(af) {
p.ops$disp.adj && s %in% p.ops$disp && af %in% "max"
})),
if (p.ops$disp.adj && is_not_null(p.ops$thresholds[[s]])) s %in% thresholds)
})),
p.ops$nweights + !p.ops$disp.adj)
))
names(s.keep.col) <- names(m.balance.summary)
if (p.ops$disp.bal.tab) {
cat(underline("Balance summary across all treatment pairs") %+% "\n")
if (all(!keep.row)) cat(italic("All covariates are balanced.") %+% "\n")
else print.data.frame_(round_df_char(m.balance.summary[keep.row, s.keep.col, drop = FALSE], p.ops$digits, na_vals = "."))
cat("\n")
}
for (s in p.ops$compute) {
if (is_not_null(baltal[[s]])) {
cat(underline(paste("Balance tally for", STATS[[s]]$balance_tally_for)) %+% "\n")
print.data.frame_(baltal[[s]])
cat("\n")
}
if (is_not_null(maximbal[[s]])) {
cat(underline(paste("Variable with the greatest", STATS[[s]]$variable_with_the_greatest)) %+% "\n")
print.data.frame_(round_df_char(maximbal[[s]], p.ops$digits, na_vals = "."), row.names = FALSE)
cat("\n")
}
}
if (is_not_null(nn)) {
tag <- attr(nn, "tag")
drop.nn <- rowSums(nn) == 0
ss.type <- attr(nn, "ss.type")[!drop.nn]
nn <- nn[!drop.nn, , drop = FALSE]
if (all(c("Matched (ESS)", "Matched (Unweighted)") %in% rownames(nn)) &&
all(check_if_zero(nn["Matched (ESS)",] - nn["Matched (Unweighted)",]))) {
nn <- nn[rownames(nn)!="Matched (Unweighted)", , drop = FALSE]
rownames(nn)[rownames(nn) == "Matched (ESS)"] <- "Matched"
}
cat(underline(tag) %+% "\n")
print.warning <- FALSE
if (length(ss.type) > 1 && nunique.gt(ss.type[-1], 1)) {
ess <- ifelse(ss.type == "ess", "*", "")
nn <- setNames(cbind(nn, ess), c(names(nn), ""))
print.warning <- TRUE
}
print.data.frame_(round_df_char(nn, digits = min(2, p.ops$digits), pad = " "))
if (print.warning) cat(italic("* indicates effective sample size"))
}
}
invisible(x)
}
bal.tab_print.bal.tab.msm <- function(x, p.ops){
call <- if (p.ops$disp.call) x$call else NULL
msm.balance <- x[["Time.Balance"]]
msm.balance.summary <- x[["Balance.Across.Times"]]
thresholds <- setdiff(names(p.ops$thresholds), p.ops$drop.thresholds)
baltal <- setNames(x[paste.("Balanced", thresholds)], thresholds)
maximbal <- setNames(x[paste.("Max.Imbalance", thresholds)], thresholds)
nn <- x$Observations
#Printing output
if (is_not_null(call)) {
cat(underline("Call") %+% "\n " %+% paste(deparse(call), collapse = "\n") %+% "\n\n")
}
if (is_not_null(p.ops$which.time)) {
cat(underline("Balance by Time Point") %+% "\n")
for (i in p.ops$which.time) {
cat("\n - - - " %+% italic("Time: " %+% as.character(i)) %+% " - - - \n")
bal.tab_print(msm.balance[[i]], p.ops)
}
cat(paste0(paste(rep(" -", round(nchar(paste0("\n - - - Time: ", i, " - - - "))/2)), collapse = ""), " \n"))
cat("\n")
}
if (isTRUE(as.logical(p.ops$msm.summary)) && is_not_null(msm.balance.summary)) {
if (p.ops$imbalanced.only) {
keep.row <- rowSums(apply(msm.balance.summary[grepl(".Threshold", names(msm.balance.summary), fixed = TRUE)], 2, function(x) !is.na(x) & startsWith(x, "Not Balanced"))) > 0
}
else keep.row <- rep(TRUE, nrow(msm.balance.summary))
computed.agg.funs <- "max"
s.keep.col <- as.logical(c(TRUE,
TRUE,
unlist(lapply(p.ops$compute[p.ops$compute %in% all_STATS()[!get_from_STATS("adj_only")]], function(s) {
c(unlist(lapply(computed.agg.funs, function(af) {
p.ops$un && s %in% p.ops$disp && af %in% "max"
})),
if (p.ops$un && !p.ops$disp.adj && is_not_null(p.ops$thresholds[[s]])) s %in% thresholds)
})),
rep(
unlist(lapply(p.ops$compute[p.ops$compute %in% all_STATS()], function(s) {
c(unlist(lapply(computed.agg.funs, function(af) {
p.ops$disp.adj && s %in% p.ops$disp && af %in% "max"
})),
if (p.ops$disp.adj && is_not_null(p.ops$thresholds[[s]])) s %in% thresholds)
})),
p.ops$nweights + !p.ops$disp.adj)
))
if (p.ops$disp.bal.tab) {
cat(underline("Balance summary across all time points") %+% "\n")
if (all(!keep.row)) cat(italic("All covariates are balanced.") %+% "\n")
else print.data.frame_(round_df_char(msm.balance.summary[keep.row, s.keep.col, drop = FALSE], p.ops$digits, na_vals = "."))
cat("\n")
}
for (s in p.ops$compute) {
if (is_not_null(baltal[[s]])) {
cat(underline(paste("Balance tally for", STATS[[s]]$balance_tally_for)) %+% "\n")
print.data.frame_(baltal[[s]])
cat("\n")
}
if (is_not_null(maximbal[[s]])) {
cat(underline(paste("Variable with the greatest", STATS[[s]]$variable_with_the_greatest)) %+% "\n")
print.data.frame_(round_df_char(maximbal[[s]], p.ops$digits, na_vals = "."), row.names = FALSE)
cat("\n")
}
}
if (is_not_null(nn)) {
print.warning <- FALSE
cat(underline(attr(nn[[1]], "tag")) %+% "\n")
for (ti in seq_along(nn)) {
cat(" - " %+% italic("Time " %+% as.character(ti)) %+% "\n")
drop.nn <- rowSums(nn[[ti]]) == 0
ss.type <- attr(nn[[ti]], "ss.type")[!drop.nn]
nn[[ti]] <- nn[[ti]][!drop.nn, , drop = FALSE]
if (all(c("Matched (ESS)", "Matched (Unweighted)") %in% rownames(nn[[ti]])) &&
all(check_if_zero(nn[[ti]]["Matched (ESS)",] - nn[[ti]]["Matched (Unweighted)",]))) {
nn[[ti]] <- nn[[ti]][rownames(nn[[ti]])!="Matched (Unweighted)", , drop = FALSE]
rownames(nn[[ti]])[rownames(nn[[ti]]) == "Matched (ESS)"] <- "Matched"
}
if (length(ss.type) > 1 && nunique.gt(ss.type[-1], 1)) {
ess <- ifelse(ss.type == "ess", "*", "")
nn[[ti]] <- setNames(cbind(nn[[ti]], ess), c(names(nn[[ti]]), ""))
print.warning <- TRUE
}
print.data.frame_(round_df_char(nn[[ti]], digits = min(2, p.ops$digits), pad = " "))
}
if (print.warning) cat(italic("* indicates effective sample size"))
}
}
invisible(x)
}
bal.tab_print.bal.tab.subclass <- function(x, p.ops) {
call <- if (p.ops$disp.call) x$call else NULL
s.balance <- x$Subclass.Balance
b.a.subclass <- x$Balance.Across.Subclass
s.nn <- x$Observations
thresholds <- setdiff(names(p.ops$thresholds), p.ops$drop.thresholds)
baltal <- setNames(x[paste.("Balanced", thresholds, "Subclass")], thresholds)
maximbal <- setNames(x[paste.("Max.Imbalance", thresholds, "Subclass")], thresholds)
if (is_not_null(call)) {
cat(underline("Call") %+% "\n " %+% paste(deparse(call), collapse = "\n") %+% "\n\n")
}
#Print subclass balance
if (p.ops$disp.bal.tab) {
if (is_not_null(p.ops$which.subclass)) {
s.keep.col <- setNames(c(TRUE,
rep(unlist(lapply(p.ops$compute[p.ops$compute %nin% all_STATS()], function(s) {
s %in% p.ops$disp
})), switch(p.ops$type, bin = 2, cont = 1)),
unlist(lapply(p.ops$compute[p.ops$compute %in% all_STATS()], function(s) {
c(s %in% p.ops$disp,
if (is_not_null(p.ops$thresholds[[s]])) s %in% thresholds)
}))),
names(s.balance[[1]]))
cat(underline("Balance by subclass"))
for (i in p.ops$which.subclass) {
if (p.ops$imbalanced.only) {
s.keep.row <- rowSums(apply(s.balance[[i]][grepl(".Threshold", names(s.balance), fixed = TRUE)], 2, function(x) !is.na(x) & startsWith(x, "Not Balanced"))) > 0
}
else s.keep.row <- rep(TRUE, nrow(s.balance[[i]]))
cat("\n - - - " %+% italic("Subclass " %+% as.character(i)) %+% " - - - \n")
if (all(!s.keep.row)) cat(italic("All covariates are balanced.") %+% "\n")
else print.data.frame_(round_df_char(s.balance[[i]][s.keep.row, s.keep.col, drop = FALSE], p.ops$digits, na_vals = "."))
}
cat("\n")
}
}
#Print balance across subclasses
if (p.ops$subclass.summary && is_not_null(b.a.subclass)) {
if (p.ops$disp.bal.tab) {
if (p.ops$imbalanced.only) {
a.s.keep.row <- rowSums(apply(b.a.subclass[grepl(".Threshold", names(b.a.subclass), fixed = TRUE)], 2, function(x) !is.na(x) & startsWith(x, "Not Balanced"))) > 0
}
else a.s.keep.row <- rep(TRUE, nrow(b.a.subclass))
a.s.keep.col <- setNames(as.logical(c(TRUE,
rep(unlist(lapply(p.ops$compute[p.ops$compute %nin% all_STATS()], function(s) {
p.ops$un && s %in% p.ops$disp
})), switch(p.ops$type, bin = 2, cont = 1)),
unlist(lapply(p.ops$compute[p.ops$compute %in% all_STATS()[!get_from_STATS("adj_only")]], function(s) {
c(p.ops$un && s %in% p.ops$disp,
if (p.ops$un && !p.ops$disp.adj && is_not_null(p.ops$thresholds[[s]])) s %in% thresholds)
})),
rep(c(rep(unlist(lapply(p.ops$compute[p.ops$compute %nin% all_STATS()], function(s) {
p.ops$disp.adj && s %in% p.ops$disp
})), 2),
unlist(lapply(p.ops$compute[p.ops$compute %in% all_STATS()], function(s) {
c(p.ops$disp.adj && s %in% p.ops$disp,
if (p.ops$disp.adj && is_not_null(p.ops$thresholds[[s]])) s %in% thresholds)
}))
),
p.ops$disp.adj)
)),
names(b.a.subclass))
cat(underline("Balance measures across subclasses") %+% "\n")
if (all(!a.s.keep.row)) cat(italic("All covariates are balanced.") %+% "\n")
else print.data.frame_(round_df_char(b.a.subclass[a.s.keep.row, a.s.keep.col, drop = FALSE], p.ops$digits, na_vals = "."))
cat("\n")
}
for (s in p.ops$compute) {
if (is_not_null(baltal[[s]])) {
cat(underline(paste("Balance tally for", STATS[[s]]$balance_tally_for, "across subclasses")) %+% "\n")
print.data.frame_(baltal[[s]])
cat("\n")
}
if (is_not_null(maximbal[[s]])) {
cat(underline(paste("Variable with the greatest", STATS[[s]]$variable_with_the_greatest, "across subclasses")) %+% "\n")
print.data.frame_(round_df_char(maximbal[[s]], p.ops$digits, na_vals = "."), row.names = FALSE)
cat("\n")
}
}
if (is_not_null(s.nn)) {
cat(underline(attr(s.nn, "tag")) %+% "\n")
print.data.frame_(round_df_char(s.nn, digits = min(2, p.ops$digits), pad = " "))
}
}
invisible(x)
}
#Process arguments
print_process <- function(x, ...) {
UseMethod("print_process")
}
print_process.bal.tab.cluster <- function(x, which.cluster, cluster.summary, cluster.fun, ...) {
A <- list(...)
c.balance <- x$Cluster.Balance
p.ops <- attr(x, "print.options")
if (!missing(cluster.summary)) {
if (!rlang::is_bool(cluster.summary)) stop("'cluster.summary' must be TRUE or FALSE.")
if (p.ops$quick && p.ops$cluster.summary == FALSE && cluster.summary == TRUE) {
warning("'cluster.summary' cannot be set to TRUE if quick = TRUE in the original call to bal.tab().", call. = FALSE)
}
else p.ops$cluster.summary <- cluster.summary
}
if (!missing(which.cluster)) {
if (paste(deparse1(substitute(which.cluster)), collapse = "") == ".none") which.cluster <- NA
else if (paste(deparse1(substitute(which.cluster)), collapse = "") == ".all") which.cluster <- NULL
p.ops$which.cluster <- which.cluster
}
if (!p.ops$quick || is_null(p.ops$cluster.fun)) computed.cluster.funs <- c("min", "mean", "max")
else computed.cluster.funs <- p.ops$cluster.fun
if (!missing(cluster.fun) && is_not_null(cluster.fun)) {
if (!is.character(cluster.fun) || !all(cluster.fun %pin% computed.cluster.funs)) stop(paste0("'cluster.fun' must be ", word_list(computed.cluster.funs, and.or = "or", quotes = 2)), call. = FALSE)
}
else {
if (p.ops$abs) cluster.fun <- c("mean", "max")
else cluster.fun <- c("min", "mean", "max")
}
cluster.fun <- match_arg(tolower(cluster.fun), computed.cluster.funs, several.ok = TRUE)
#Checks and Adjustments
if (is_null(p.ops$which.cluster))
which.cluster <- seq_along(c.balance)
else if (anyNA(p.ops$which.cluster)) {
which.cluster <- integer(0)
}
else if (is.numeric(p.ops$which.cluster)) {
which.cluster <- intersect(seq_along(c.balance), p.ops$which.cluster)
if (is_null(which.cluster)) {
warning("No indices in 'which.cluster' are cluster indices. Displaying all clusters instead.", call. = FALSE)
which.cluster <- seq_along(c.balance)
}
}
else if (is.character(p.ops$which.cluster)) {
which.cluster <- seq_along(c.balance)[names(c.balance) %in% p.ops$which.cluster]
if (is_null(which.cluster)) {
warning("No names in 'which.cluster' are cluster names. Displaying all clusters instead.", call. = FALSE)
which.cluster <- seq_along(c.balance)
}
}
else {
warning("The argument to 'which.cluster' must be .all, .none, or a vector of cluster indices or cluster names. Displaying all clusters instead.", call. = FALSE)
which.cluster <- seq_along(c.balance)
}
out <- list(cluster.summary = p.ops$cluster.summary,
cluster.fun = cluster.fun,
which.cluster = which.cluster,
computed.cluster.funs = computed.cluster.funs)
out
}
print_process.bal.tab.imp <- function(x, which.imp, imp.summary, imp.fun, ...) {
A <- list(...)
i.balance <- x[["Imputation.Balance"]]
p.ops <- attr(x, "print.options")
if (!missing(imp.summary)) {
if (!rlang::is_bool(imp.summary)) stop("'imp.summary' must be TRUE or FALSE.")
if (p.ops$quick && p.ops$imp.summary == FALSE && imp.summary == TRUE) {
warning("'imp.summary' cannot be set to TRUE if quick = TRUE in the original call to bal.tab().", call. = FALSE)
}
else p.ops$imp.summary <- imp.summary
}
if (!missing(which.imp)) {
if (paste(deparse1(substitute(which.imp)), collapse = "") == ".none") which.imp <- NA
else if (paste(deparse1(substitute(which.imp)), collapse = "") == ".all") which.imp <- NULL
p.ops$which.imp <- which.imp
}
if (!p.ops$quick || is_null(p.ops$imp.fun)) computed.imp.funs <- c("min", "mean", "max")
else computed.imp.funs <- p.ops$imp.fun
if (!missing(imp.fun) && is_not_null(imp.fun)) {
if (!is.character(imp.fun) || !all(imp.fun %pin% computed.imp.funs)) stop(paste0("'imp.fun' must be ", word_list(computed.imp.funs, and.or = "or", quotes = 2)), call. = FALSE)
}
else {
if (p.ops$abs) imp.fun <- c("mean", "max")
else imp.fun <- c("min", "mean", "max")
}
imp.fun <- match_arg(tolower(imp.fun), computed.imp.funs, several.ok = TRUE)
#Checks and Adjustments
if (is_null(p.ops$which.imp))
which.imp <- seq_along(i.balance)
else if (anyNA(p.ops$which.imp)) {
which.imp <- integer(0)
}
else if (is.numeric(p.ops$which.imp)) {
which.imp <- intersect(seq_along(i.balance), p.ops$which.imp)
if (is_null(which.imp)) {
warning("No numbers in 'which.imp' are imputation numbers. No imputations will be displayed.", call. = FALSE)
which.imp <- integer(0)
}
}
else {
warning("The argument to 'which.imp' must be .all, .none, or a vector of imputation numbers.", call. = FALSE)
which.imp <- integer(0)
}
out <- list(imp.summary = p.ops$imp.summary,
imp.fun = imp.fun,
which.imp = which.imp,
computed.imp.funs = computed.imp.funs)
out
}
print_process.bal.tab.multi <- function(x, which.treat, multi.summary, ...) {
A <- list(...)
m.balance <- x[["Pair.Balance"]]
m.balance.summary <- x[["Balance.Across.Pairs"]]
p.ops <- attr(x, "print.options")
if (!missing(multi.summary)) {
if (!rlang::is_bool(multi.summary)) stop("'multi.summary' must be TRUE or FALSE.")
if (p.ops$quick && p.ops$multi.summary == FALSE && multi.summary == TRUE) {
warning("'multi.summary' cannot be set to TRUE if quick = TRUE in the original call to bal.tab().", call. = FALSE)
}
else p.ops$multi.summary <- multi.summary
}
if (!missing(which.treat)) {
if (paste(deparse1(substitute(which.treat)), collapse = "") == ".none") which.treat <- NA
else if (paste(deparse1(substitute(which.treat)), collapse = "") == ".all") which.treat <- NULL
p.ops$which.treat <- which.treat
}
#Checks and Adjustments
if (is_null(p.ops$which.treat))
which.treat <- p.ops$treat_vals_multi
else if (anyNA(p.ops$which.treat)) {
which.treat <- character(0)
}
else if (!is_(p.ops$which.treat, c("numeric", "character"))) {
warning("The argument to 'which.treat' must be .all, .none, or a vector of treatment names or indices. No treatment pairs will be displayed.", call. = FALSE)
which.treat <- character(0)
}
else {
if (length(p.ops$treat_vals_multi) == 2) p.ops$which.treat <- as.character(p.ops$which.treat)
if (is.numeric(p.ops$which.treat)) {
which.treat <- p.ops$treat_vals_multi[seq_along(p.ops$treat_vals_multi) %in% p.ops$which.treat]
if (is_null(which.treat)) {
warning("No numbers in 'which.treat' correspond to treatment values. No treatment pairs will be displayed.", call. = FALSE)
which.treat <- character(0)
}
}
else if (is.character(p.ops$which.treat)) {
which.treat <- p.ops$treat_vals_multi[p.ops$treat_vals_multi %in% p.ops$which.treat]
if (is_null(which.treat)) {
warning("No names in 'which.treat' correspond to treatment values. No treatment pairs will be displayed.", call. = FALSE)
which.treat <- character(0)
}
}
}
if (is_null(which.treat)) {
disp.treat.pairs <- character(0)
}
else {
if (p.ops$pairwise) {
if (length(which.treat) == 1) {
disp.treat.pairs <- names(m.balance)[sapply(m.balance, function(z) {
treat_names <- attr(z, "print.options")$treat_names
any(p.ops$treat_vals_multi[treat_names] == which.treat)
})]
}
else {
disp.treat.pairs <- names(m.balance)[sapply(m.balance, function(z) {
treat_names <- attr(z, "print.options")$treat_names
all(p.ops$treat_vals_multi[treat_names] %in% which.treat)
})]
}
}
else {
if (length(which.treat) == 1) {
disp.treat.pairs <- names(m.balance)[sapply(m.balance, function(z) {
treat_names <- attr(z, "print.options")$treat_names
any(p.ops$treat_vals_multi[treat_names[treat_names != "All"]] == which.treat)
})]
}
else {
disp.treat.pairs <- names(m.balance)[sapply(m.balance, function(z) {
treat_names <- attr(z, "print.options")$treat_names
all(p.ops$treat_vals_multi[treat_names[treat_names != "All"]] %in% which.treat)
})]
}
}
}
out <- list(disp.treat.pairs = disp.treat.pairs,
multi.summary = p.ops$multi.summary,
pairwise = p.ops$pairwise)
out
}
print_process.bal.tab.msm <- function(x, which.time, msm.summary, ...) {
A <- list(...)
A <- clear_null(A[!vapply(A, function(x) identical(x, quote(expr =)), logical(1L))])
msm.balance <- x[["Time.Balance"]]
p.ops <- attr(x, "print.options")
if (!missing(msm.summary)) {
if (!rlang::is_bool(msm.summary)) stop("'msm.summary' must be TRUE or FALSE.")
if (p.ops$quick && p.ops$msm.summary == FALSE && msm.summary == TRUE) {
warning("'msm.summary' cannot be set to TRUE if quick = TRUE in the original call to bal.tab().", call. = FALSE)
}
else p.ops$msm.summary <- msm.summary
}
if (!missing(which.time)) {
if (paste(deparse1(substitute(which.time)), collapse = "") == ".none") which.time <- NA
else if (paste(deparse1(substitute(which.time)), collapse = "") == ".all") which.time <- NULL
p.ops$which.time <- which.time
}
#Checks and Adjustments
if (is_null(p.ops$which.time))
which.time <- seq_along(msm.balance)
else if (anyNA(p.ops$which.time)) {
which.time <- integer(0)
}
else if (is.numeric(p.ops$which.time)) {
which.time <- seq_along(msm.balance)[seq_along(msm.balance) %in% p.ops$which.time]
if (is_null(which.time)) {
warning("No numbers in 'which.time' are treatment time points. No time points will be displayed.", call. = FALSE)
which.time <- integer(0)
}
}
else if (is.character(p.ops$which.time)) {
which.time <- seq_along(msm.balance)[names(msm.balance) %in% p.ops$which.time]
if (is_null(which.time)) {
warning("No names in 'which.time' are treatment names. No time points will be displayed.", call. = FALSE)
which.time <- integer(0)
}
}
else {
warning("The argument to 'which.time' must be .all, .none, or a vector of time point numbers. No time points will be displayed.", call. = FALSE)
which.time <- integer(0)
}
out <- list(msm.summary = p.ops$msm.summary,
which.time = which.time)
out
}
print_process.bal.tab <- function(x, imbalanced.only, un, disp.bal.tab, disp.call, stats, disp.thresholds, disp, digits = max(3, getOption("digits") - 3), ...) {
A <- list(...)
p.ops <- attr(x, "print.options")
drop.thresholds <- c()
#Adjustments to print options
if (!missing(un) && p.ops$disp.adj) {
if (!rlang::is_bool(un)) stop("'un' must be TRUE or FALSE.", call. = FALSE)
if (p.ops$quick && p.ops$un == FALSE && un == TRUE) {
warning("'un' cannot be set to TRUE if quick = TRUE in the original call to bal.tab().", call. = FALSE)
}
else p.ops$un <- un
}
if (!missing(disp)) {
if (!rlang::is_character(disp)) stop("'disp' must be a character vector.", call. = FALSE)
allowable.disp <- c("means", "sds", all_STATS(p.ops$type))
if (any(disp %nin% allowable.disp)) {
stop(paste(word_list(disp[disp %nin% allowable.disp], and.or = "and", quotes = 2, is.are = TRUE),
"not allowed in 'disp'."), call. = FALSE)
}
if (any(disp %nin% p.ops$compute)) {
warning(paste("'disp' cannot include", word_list(disp[disp %nin% p.ops$compute], and.or = "or", quotes = 2), "if quick = TRUE in the original call to bal.tab()."), call. = FALSE)
}
else p.ops$disp <- disp
}
if (is_not_null(A[["disp.means"]])) {
if (!rlang::is_bool(A[["disp.means"]])) stop("'disp.means' must be TRUE or FALSE.")
if ("means" %nin% p.ops$compute && A[["disp.means"]] == TRUE) {
warning("'disp.means' cannot be set to TRUE if quick = TRUE in the original call to bal.tab().", call. = FALSE)
}
else p.ops$disp <- unique(c(p.ops$disp, "means"[A[["disp.means"]]]))
}
if (is_not_null(A[["disp.sds"]])) {
if (!rlang::is_bool(A[["disp.sds"]])) stop("'disp.sds' must be TRUE or FALSE.", call. = FALSE)
if ("sds" %nin% p.ops$compute && A[["disp.sds"]] == TRUE) {
warning("'disp.sds' cannot be set to TRUE if quick = TRUE in the original call to bal.tab().", call. = FALSE)
}
else p.ops$disp <- unique(c(p.ops$disp, "sds"[A[["disp.sds"]]]))
}
if (!missing(stats)) {
if (!rlang::is_character(stats)) stop("'stats' must be a string.", call. = FALSE)
stats <- match_arg(stats, all_STATS(p.ops$type), several.ok = TRUE)
stats_in_p.ops <- stats %in% p.ops$compute
if (any(!stats_in_p.ops)) {
stop(paste0("'stats' cannot contain ", word_list(stats[!stats_in_p.ops], and.or = "or", quotes = 2), " when ",
if (sum(!stats_in_p.ops) > 1) "they were " else "it was ",
"not requested in the original call to bal.tab()."), call. = TRUE)
}
else p.ops$disp <- unique(c(p.ops$disp[p.ops$disp %nin% all_STATS()], stats))
}
for (s in all_STATS(p.ops$type)) {
if (is_not_null(A[[STATS[[s]]$disp_stat]])) {
if (!rlang::is_bool(A[[STATS[[s]]$disp_stat]])) {
stop(paste0("'", STATS[[s]]$disp_stat, "' must be TRUE or FALSE."), call. = FALSE)
}
if (s %nin% p.ops$compute && isTRUE(A[[STATS[[s]]$disp_stat]])) {
warning(paste0("'", STATS[[s]]$disp_stat, "' cannot be set to TRUE if quick = TRUE in the original call to bal.tab()."), call. = FALSE)
}
else p.ops$disp <- unique(c(p.ops$disp, s))
}
}
for (s in p.ops$compute[p.ops$compute %in% all_STATS(p.ops$type)]) {
if (STATS[[s]]$threshold %in% names(A)) {
temp.thresh <- A[[STATS[[s]]$threshold]]
if (is_not_null(temp.thresh) &&
(!is.numeric(temp.thresh) || length(temp.thresh) != 1 ||
is_null(p.ops[["thresholds"]][[s]]) ||
p.ops[["thresholds"]][[s]] != temp.thresh))
stop(paste0("'", STATS[[s]]$threshold, "' must be NULL or left unspecified."))
if (is_null(temp.thresh)) {
drop.thresholds <- c(drop.thresholds, s)
}
}
if (s %nin% p.ops$disp) {
drop.thresholds <- c(drop.thresholds, s)
}
}
if (!missing(disp.thresholds)) {
if (!rlang::is_logical(disp.thresholds) || anyNA(disp.thresholds)) stop("'disp.thresholds' must only contain TRUE or FALSE.", call. = FALSE)
if (is_null(names(disp.thresholds))) {
if (length(disp.thresholds) <= length(p.ops[["thresholds"]])) {
if (length(disp.thresholds) == 1) disp.thresholds <- rep(disp.thresholds, length(p.ops[["thresholds"]]))
names(disp.thresholds) <- names(p.ops[["thresholds"]])[seq_along(disp.thresholds)]
}
else {
stop("More entries were given to 'disp.thresholds' than there are thresholds in the bal.tab object.", call. = FALSE)
}
}
if (!all(names(disp.thresholds) %pin% names(p.ops[["thresholds"]]))) {
warning(paste0(word_list(names(disp.thresholds)[!names(disp.thresholds) %pin% names(p.ops[["thresholds"]])],
quotes = 2, is.are = TRUE), " not available in thresholds and will be ignored."), call. = FALSE)
disp.thresholds <- disp.thresholds[names(disp.thresholds) %pin% names(p.ops[["thresholds"]])]
}
names(disp.thresholds) <- match_arg(names(disp.thresholds), names(p.ops[["thresholds"]]), several.ok = TRUE)
for (i in names(disp.thresholds)) {
if (!disp.thresholds[i]) {
drop.thresholds <- c(drop.thresholds, i)
}
}
}
if (!missing(disp.bal.tab)) {
if (!rlang::is_bool(disp.bal.tab)) stop("'disp.bal.tab' must be TRUE or FALSE.")
p.ops$disp.bal.tab <- disp.bal.tab
}
if (p.ops$disp.bal.tab) {
if (!missing(imbalanced.only)) {
if (!rlang::is_bool(imbalanced.only)) stop("'imbalanced.only' must be TRUE or FALSE.")
p.ops$imbalanced.only <- imbalanced.only
}
if (p.ops$imbalanced.only) {
if (is_null(p.ops$thresholds)) {
warning("A threshold must be specified if imbalanced.only = TRUE. Displaying all covariates.", call. = FALSE)
p.ops$imbalanced.only <- FALSE
}
}
}
else p.ops$imbalanced.only <- FALSE
if (!missing(disp.call)) {
if (!rlang::is_bool(disp.call)) stop("'disp.call' must be TRUE or FALSE.", call. = FALSE)
if (disp.call && is_null(x$call)) {
warning("'disp.call' cannot be set to TRUE if the input object does not have a call component.", call. = FALSE)
}
else p.ops$disp.call <- disp.call
}
out <- list(un = p.ops$un,
disp = p.ops$disp,
compute = p.ops$compute,
drop.thresholds = drop.thresholds,
disp.bal.tab = p.ops$disp.bal.tab,
imbalanced.only = p.ops$imbalanced.only,
digits = digits,
disp.adj = p.ops$disp.adj,
thresholds = p.ops$thresholds,
type = p.ops$type,
nweights = p.ops$nweights,
disp.call = p.ops$disp.call)
out
}
print_process.bal.tab.subclass <- function(x, imbalanced.only, un, disp.bal.tab, disp.call, stats, disp.thresholds, disp, digits = max(3, getOption("digits") - 3), which.subclass, subclass.summary, ...) {
A <- list(...)
s.balance <- x$Subclass.Balance
p.ops <- attr(x, "print.options")
drop.thresholds <- c()
#Adjustments to print options
if (!missing(un) && p.ops$disp.adj) {
if (!rlang::is_bool(un)) stop("'un' must be TRUE or FALSE.", call. = FALSE)
if (p.ops$quick && p.ops$un == FALSE && un == TRUE) {
warning("'un' cannot be set to TRUE if quick = TRUE in the original call to bal.tab().", call. = FALSE)
}
else p.ops$un <- un
}
if (!missing(disp)) {
if (!rlang::is_character(disp)) stop("'disp' must be a character vector.", call. = FALSE)
allowable.disp <- c("means", "sds", all_STATS(p.ops$type))
if (any(disp %nin% allowable.disp)) {
stop(paste(word_list(disp[disp %nin% allowable.disp], and.or = "and", quotes = 2, is.are = TRUE),
"not allowed in 'disp'."), call. = FALSE)
}
if (any(disp %nin% p.ops$compute)) {
warning(paste("'disp' cannot include", word_list(disp[disp %nin% p.ops$compute], and.or = "or", quotes = 2), "if quick = TRUE in the original call to bal.tab()."), call. = FALSE)
}
else p.ops$disp <- disp
}
if (is_not_null(A[["disp.means"]])) {
if (!rlang::is_bool(A[["disp.means"]])) stop("'disp.means' must be TRUE or FALSE.")
if ("means" %nin% p.ops$compute && A[["disp.means"]] == TRUE) {
warning("'disp.means' cannot be set to TRUE if quick = TRUE in the original call to bal.tab().", call. = FALSE)
}
else p.ops$disp <- unique(c(p.ops$disp, "means"[A[["disp.means"]]]))
}
if (is_not_null(A[["disp.sds"]])) {
if (!rlang::is_bool(A[["disp.sds"]])) stop("'disp.sds' must be TRUE or FALSE.", call. = FALSE)
if ("sds" %nin% p.ops$compute && A[["disp.sds"]] == TRUE) {
warning("'disp.sds' cannot be set to TRUE if quick = TRUE in the original call to bal.tab().", call. = FALSE)
}
else p.ops$disp <- unique(c(p.ops$disp, "sds"[A[["disp.sds"]]]))
}
if (!missing(stats)) {
if (!rlang::is_character(stats)) stop("'stats' must be a string.", call. = FALSE)
stats <- match_arg(stats, all_STATS(p.ops$type), several.ok = TRUE)
stats_in_p.ops <- stats %in% p.ops$compute
if (any(!stats_in_p.ops)) {
stop(paste0("'stats' cannot contain ", word_list(stats[!stats_in_p.ops], and.or = "or", quotes = 2), " when ",
if (sum(!stats_in_p.ops) > 1) "they were " else "it was ",
"not requested in the original call to bal.tab()."), call. = TRUE)
}
else p.ops$disp <- unique(c(p.ops$disp[p.ops$disp %nin% all_STATS()], stats))
}
for (s in all_STATS(p.ops$type)) {
if (is_not_null(A[[STATS[[s]]$disp_stat]])) {
if (!rlang::is_bool(A[[STATS[[s]]$disp_stat]])) {
stop(paste0("'", STATS[[s]]$disp_stat, "' must be TRUE or FALSE."), call. = FALSE)
}
if (s %nin% p.ops$compute && isTRUE(A[[STATS[[s]]$disp_stat]])) {
warning(paste0("'", STATS[[s]]$disp_stat, "' cannot be set to TRUE if quick = TRUE in the original call to bal.tab()."), call. = FALSE)
}
else p.ops$disp <- unique(c(p.ops$disp, s))
}
}
for (s in p.ops$compute[p.ops$compute %in% all_STATS(p.ops$type)]) {
if (STATS[[s]]$threshold %in% names(A)) {
temp.thresh <- A[[STATS[[s]]$threshold]]
if (is_not_null(temp.thresh) &&
(!is.numeric(temp.thresh) || length(temp.thresh) != 1 ||
is_null(p.ops[["thresholds"]][[s]]) ||
p.ops[["thresholds"]][[s]] != temp.thresh))
stop(paste0("'", STATS[[s]]$threshold, "' must be NULL or left unspecified."))
if (is_null(temp.thresh)) {
drop.thresholds <- c(drop.thresholds, s)
}
}
if (s %nin% p.ops$disp) {
drop.thresholds <- c(drop.thresholds, s)
}
}
if (!missing(disp.thresholds)) {
if (!rlang::is_logical(disp.thresholds) || anyNA(disp.thresholds)) stop("'disp.thresholds' must only contain TRUE or FALSE.", call. = FALSE)
if (is_null(names(disp.thresholds))) {
if (length(disp.thresholds) <= length(p.ops[["thresholds"]])) {
if (length(disp.thresholds) == 1) disp.thresholds <- rep(disp.thresholds, length(p.ops[["thresholds"]]))
names(disp.thresholds) <- names(p.ops[["thresholds"]])[seq_along(disp.thresholds)]
}
else {
stop("More entries were given to 'disp.thresholds' than there are thresholds in the bal.tab object.", call. = FALSE)
}
}
if (!all(names(disp.thresholds) %pin% names(p.ops[["thresholds"]]))) {
warning(paste0(word_list(names(disp.thresholds)[!names(disp.thresholds) %pin% names(p.ops[["thresholds"]])],
quotes = 2, is.are = TRUE), " not available in thresholds and will be ignored."), call. = FALSE)
disp.thresholds <- disp.thresholds[names(disp.thresholds) %pin% names(p.ops[["thresholds"]])]
}
names(disp.thresholds) <- match_arg(names(disp.thresholds), names(p.ops[["thresholds"]]), several.ok = TRUE)
for (x in names(disp.thresholds)) {
if (!disp.thresholds[x]) {
drop.thresholds <- c(drop.thresholds, x)
}
}
}
if (!missing(disp.bal.tab)) {
if (!rlang::is_bool(disp.bal.tab)) stop("'disp.bal.tab' must be TRUE or FALSE.")
p.ops$disp.bal.tab <- disp.bal.tab
}
if (p.ops$disp.bal.tab) {
if (!missing(imbalanced.only)) {
if (!rlang::is_bool(imbalanced.only)) stop("'imbalanced.only' must be TRUE or FALSE.")
p.ops$imbalanced.only <- imbalanced.only
}
if (p.ops$imbalanced.only) {
if (is_null(p.ops$thresholds)) {
warning("A threshold must be specified if imbalanced.only = TRUE. Displaying all covariates.", call. = FALSE)
p.ops$imbalanced.only <- FALSE
}
}
}
else p.ops$imbalanced.only <- FALSE
if (!missing(disp.call)) {
if (!rlang::is_bool(disp.call)) stop("'disp.call' must be TRUE or FALSE.", call. = FALSE)
if (disp.call && is_null(x$call)) {
warning("'disp.call' cannot be set to TRUE if the input object does not have a call component.", call. = FALSE)
}
else p.ops$disp.call <- disp.call
}
if (!missing(subclass.summary)) {
if (!rlang::is_bool(subclass.summary)) stop("'subclass.summary' must be TRUE or FALSE.")
if (p.ops$quick && p.ops$subclass.summary == FALSE && subclass.summary == TRUE) {
warning("'subclass.summary' cannot be set to TRUE if quick = TRUE in the original call to bal.tab().", call. = FALSE)
}
else p.ops$subclass.summary <- subclass.summary
}
if (!missing(which.subclass)) {
p.ops$which.subclass <- which.subclass
}
else if (is_not_null(A[["disp.subclass"]])) {
p.ops$which.subclass <- if (isTRUE(A[["disp.subclass"]])) NULL else NA
}
#Checks and Adjustments
if (is_null(p.ops$which.subclass))
which.subclass <- seq_along(s.balance)
else if (anyNA(p.ops$which.subclass)) {
which.subclass <- integer(0)
}
else if (is.numeric(p.ops$which.subclass)) {
which.subclass <- intersect(seq_along(s.balance), p.ops$which.subclass)
if (is_null(which.subclass)) {
warning("No indices in 'which.subclass' are subclass indices. No subclasses will be displayed.", call. = FALSE)
which.subclass <- NA
}
}
else {
warning("The argument to 'which.subclass' must be .all, .none, or a vector of subclass indices. No subclasses will be displayed.", call. = FALSE)
which.subclass <- NA
}
out <- list(un = p.ops$un,
disp = p.ops$disp,
compute = p.ops$compute,
drop.thresholds = drop.thresholds,
disp.bal.tab = p.ops$disp.bal.tab,
imbalanced.only = p.ops$imbalanced.only,
digits = digits,
disp.adj = p.ops$disp.adj,
thresholds = p.ops$thresholds,
type = p.ops$type,
subclass.summary = p.ops$subclass.summary,
which.subclass = which.subclass,
disp.call = p.ops$disp.call)
out
}
|
9e30498815217a8115a9b6140a72843e24ad6745
|
611963c993f211782d67628d20da92118672c176
|
/logic/obselete/randomForestImpl.R
|
9660ad0a8382c12def9265865e478928392630c6
|
[] |
no_license
|
CalvinHuynh/DataAnalysis
|
310a3af7285f61ceb224bde42ffb4229d1c0e86d
|
1b234200628413bcbac93810a201bbf5b6cd2522
|
refs/heads/master
| 2021-09-05T11:10:45.242255
| 2018-01-26T20:19:10
| 2018-01-26T20:19:10
| 112,754,264
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,448
|
r
|
randomForestImpl.R
|
Sys.setenv(lang = "en")
library(tm)
library(SnowballC)
library(caTools)
library(rpart)
library(rpart.plot)
library(randomForest)
source("logic/reader.R")
trainIMDBData <- readFirstDataset()
combinedTrainData2 <- readSecondDataset(trainIMDBData)
trainIMDBData$review <- convertToUtf8Enc(trainIMDBData$review)
# trainIMDBData$review <- commonCleaning(trainIMDBData$review)
# trainIMDBData$review <- removeCommonStopWords(trainIMDBData$review)
# combinedTrainData2$review <-
# convertToUtf8Enc(combinedTrainData2$review)
# combinedTrainData2$review <-
# commonCleaning(combinedTrainData2$review)
# combinedTrainData2$review <-
# removeCommonStopWords(combinedTrainData2$review)
# Test with a small sample size
attempt1 <- function() {
random2000rows <- shuffleDataframe(trainIMDBData)[1:2000,]
table(random2000rows$sentiment)
corpus <- Corpus(VectorSource(random2000rows$review))
corpus <- corpus %>%
tm_map(content_transformer(tolower)) %>%
tm_map(removePunctuation) %>%
tm_map(removeNumbers) %>%
tm_map(removeWords, stopwords(kind = "en")) %>%
tm_map(stripWhitespace)
corpus[[1]]$content
frequencies <- DocumentTermMatrix(corpus)
inspect(frequencies[1995:2000, 505:515])
str(findFreqTerms(frequencies, lowfreq = 20))
sparse <- removeSparseTerms(frequencies, 0.995)
IMDBtest <- as.data.frame(as.matrix(sparse))
colnames(IMDBtest) <- make.names(colnames(IMDBtest))
IMDBtest$sentiment <- random2000rows$sentiment
set.seed(1)
# split <- sample.split(IMDBtest$sentiment, SplitRatio= .70)
# trainSparse <- subset(IMDBtest, split == TRUE)
# testSparse <- subset(IMDBtest, split == FALSE)
splitIndex = sample(1:nrow(IMDBtest),
size = round(0.7 * nrow(IMDBtest)),
replace = FALSE)
trainSparse <- IMDBtest[splitIndex,]
testSparse <- IMDBtest[-splitIndex,]
IMDBCart <- rpart(sentiment ~ ., data = trainSparse, method = 'class')
prp(IMDBCart)
predictCART <- predict(IMDBCart, newdata = testSparse, type = 'class')
table(testSparse$sentiment, predictCART)
# Accuracy calculation
# (203 + 208) / nrow(testSparse)
table(testSparse$sentiment)
# Accuracy calculation
# 296 / nrow(testSparse)
IMDBRF <- randomForest(sentiment ~ ., data = trainSparse)
predictRF <- predict(IMDBRF, newdata = testSparse)
table(testSparse$sentiment, predictRF)
recall_accuracy(testSparse$sentiment, predictRF)
}
|
22f16dac51fbc4e89da985da7ed785c8dad9918e
|
a8f30efa30a78176b94a343d5c5bdbc705e84cc4
|
/Project2-R-file.R
|
6955fef3576eb00a0e675e6e1f4539f7aafb06b9
|
[] |
no_license
|
psoland/Predictive-analytics-with-R
|
5480f369f3332491f0dc2afc6260929cdb9aaf8f
|
925a12ffac6db5dbfb8517b02c8bc99d0b047e3c
|
refs/heads/master
| 2020-06-05T16:37:52.901086
| 2019-06-18T20:04:53
| 2019-06-18T20:04:53
| 192,485,500
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 11,203
|
r
|
Project2-R-file.R
|
library(tidyverse)
library(caret)
library(MASS)
library(parallel)
library(doParallel)
library(pROC)
library(rpart)
library(grid)
#--------------------------------------- Descriptive statistics ---------------------------------------
marketing.full <- ElemStatLearn::marketing
marketing.full$High <- ifelse(marketing.full$Income>=8,1,0)
stats.summary <- function(df){
temp <- t(apply(df, 2, mean, na.rm = T))
temp <- rbind(temp,t(apply(df, 2, median, na.rm = T)))
temp <- rbind(temp,t(apply(df, 2, min, na.rm = T)))
temp <- rbind(temp,t(apply(df, 2, max, na.rm = T)))
temp <- rbind(temp,apply(df,2, function(y) sum(length(which(is.na(y))))))
temp <- round(temp, 3)
rownames(temp) <- c("Mean","Median","Min","Max","No.NA")
temp
}
descriptive <- stats.summary(marketing.full)
#--------------------------------------- Plot variables ---------------------------------------
##### Histogram of all variables #####
plot.fn <- function(df){
t <- ncol(df)
local({
for(i in 1:t){
df[,names(df)] <- lapply(df[,names(df)] , factor)
assign(paste0("p",i), eval(parse(text = paste0("qplot(df[,",i,"],data = df, xlab = \"",
colnames(df)[i], "\")+theme_minimal()"))))
}
mylist <- mget(ls(pattern = "p."))
gridExtra::grid.arrange(grobs = mylist,nrow = 3)
})
}
plot.fn(marketing.full)
##### Plot all variables as a function of High #####
dist.fn <- function(df, nrow = 3){
t <- ncol(df)
local({
for(i in 2:(t)){
df[,names(df)] <- lapply(df[,names(df)] , factor)
temp <- data.frame(table(df[,"High"],df[,i]))
names(temp) <- c("High",colnames(df)[i],"Count")
assign(paste0("p",i), eval(parse(text =paste0("ggplot(data = temp, aes(x = High, y = Count, fill = ",
colnames(df)[i], "))+ geom_bar(stat=\"identity\")+
theme_minimal()"))))
}
mylist <- mget(ls(pattern = "p."))
gridExtra::grid.arrange(grobs = mylist,nrow = nrow)
})
}
marketing.graph <- subset(marketing.mean.lived, select = c(High,Edu,Status,Marital, Occupation))
dist.fn(marketing.graph, nrow = 2)
gridExtra::grid.arrange(ggplot())
#--------------------------------------- Data cleaning ---------------------------------------
create.data <- function(){
##### Remove Income #####
marketing.full <- ElemStatLearn::marketing
marketing.full$High <- ifelse(marketing.full$Income>=8,1,0)
marketing.full$High <- as.factor(marketing.full$High)
levels(marketing.full$High) <- c("zero","one")
marketing <<- subset(marketing.full, select = -c(Income, Lived))
marketing.lived <<- subset(marketing.full, select = -c(Income))
##### Mean imputation #####
impute.mean <- function(df){
for(i in 1:ncol(df)){
for(j in 1:nrow(df)){
if(is.na(df[j,i])){
df[j,i] <- as.integer(mean(df[,i], na.rm = T))
}
}
}
df
}
marketing.mean <<- impute.mean(marketing)
marketing.mean.lived <<- impute.mean(marketing.lived)
#sum(is.na(marketing.mean))
##### Remove NAs #####
marketing <<- na.omit(marketing)
marketing.lived <<- na.omit(marketing.lived)
mark <<- list(marketing.mean.lived=marketing.mean.lived,marketing = marketing,
marketing.mean = marketing.mean, marketing.lived=marketing.lived)
}
#--------------------------------------- Models ---------------------------------------
envir.clean <- c("envir.clean","create.data")
rm(list=setdiff(ls(), envir.clean))
create.data()
seed <- 14
percent <- 0.75
fit_control <- trainControl(method = "cv", number = 10, classProbs = TRUE,
summaryFunction=twoClassSummary)
#df <- marketing.mean.lived
rm(list="nogo")
cl <- makeCluster(detectCores())
doParallel::registerDoParallel(cl)
for(i in 1:length(mark)){
nogo <- "nogo"
df <- mark[[i]]
############# LDA #############
lda.fn <- function(df){
if(!exists("nogo")){
cl <- makeCluster(detectCores())
doParallel::registerDoParallel(cl)
}
set.seed(seed)
n <- nrow(df)
shuffled <- df[sample(n),]
train.lda <- shuffled[1:round(percent * n),]
test.lda <<- shuffled[(round(percent * n) + 1):n,]
rm(list="shuffled")
mod.lda <<- train(High ~ ., data=train.lda, method="lda",
trControl = fit_control,
metric = "ROC")
pred.lda <<- predict(mod.lda, newdata = test.lda, type = "prob")
test.lda$pred <<- ifelse(pred.lda$zero>0.5, "zero","one")
if(!exists("nogo")){
stopCluster(cl)
registerDoSEQ()
}
}
lda.fn(df)
#table(test.lda$pred, test.lda$High)
#caret::confusionMatrix(test.lda$High,test.lda$pred)
accuracy.lda = round(mean(test.lda$pred == test.lda$High)*100,2)
#print(lda.mod)
# Plot LDA
ROC.lda <- roc(as.numeric(test.lda$High),as.numeric(pred.lda$one))
#plot(ROC.lda, col = "red")
#auc(ROC.lda)
############# GBM #############
gbm.fn <- function(df){
if(!exists("nogo")){
cl <- makeCluster(detectCores())
doParallel::registerDoParallel(cl)}
set.seed(seed)
n <- nrow(df)
shuffled <- df[sample(n),]
train.gbm <- shuffled[1:round(percent * n),]
test.gbm <<- shuffled[(round(percent * n) + 1):n,]
rm(list="shuffled")
gbmGrid <- expand.grid(interaction.depth = c(1,3,5,7,9),
n.trees = (1:50)*20,
shrinkage = c(0.1,0.01),
n.minobsinnode = 10)
mod.gbm <<- train(High ~ ., data=train.gbm, method="gbm",
trControl = fit_control,metric = "ROC", tuneGrid = gbmGrid, verbose = FALSE)
pred.gbm <<- predict(mod.gbm, newdata = test.gbm,
n.trees = mod.gbm$results$n.trees[which.max(mod.gbm$results$ROC)],
interaction.depth = mod.gbm$results$interaction.depth[which.max(mod.gbm$results$ROC)],
shrinkage = mod.gbm$results$shrinkage[which.max(mod.gbm$results$ROC)],
type = "prob")
test.gbm$pred <<- ifelse(pred.gbm$zero>0.5, "zero","one")
if(!exists("nogo")){
stopCluster(cl)
registerDoSEQ()
}
}
gbm.fn(df)
#table(test.gbm$pred, test.gbm$High)
#caret::confusionMatrix(test.gbm$High,test.gbm$pred)
accuracy.gbm = round(mean(test.gbm$pred == test.gbm$High)*100,2)
# Plot GBM
ggplot(mod.gbm)+theme_minimal()
ROC.gbm <- roc(as.numeric(test.gbm$High),as.numeric(pred.gbm$one))
#plot(ROC.gbm, col = "red")
auc(ROC.gbm)
#rm(list=setdiff(ls(), envir.clean))
############# Logreg #############
log.fn <- function(df){
if(!exists("nogo")){
cl <- makeCluster(detectCores())
doParallel::registerDoParallel(cl)
}
set.seed(seed)
n <- nrow(df)
shuffled <- df[sample(n),]
train.log <- shuffled[1:round(percent * n),]
test.log <<- shuffled[(round(percent * n) + 1):n,]
rm(list="shuffled")
mod.log <<- train(High ~., data = train.log, method = "glm",
trControl = fit_control, family = binomial, metric = "ROC")
pred.log <<- predict(mod.log, newdata = test.log, type = "prob")
test.log$pred <<- ifelse(pred.log$zero>0.5,"zero","one")
if(!exists("nogo")){
stopCluster(cl)
registerDoSEQ()
}
}
log.fn(df)
#table(test.log$pred, test.log$High)
#caret::confusionMatrix(test.log$High,test.log$pred)
accuracy.log = round(mean(test.log$pred == test.log$High)*100,2)
ROC.log <- roc(as.numeric(test.log$High),as.numeric(pred.log$one))
#plot(ROC.log, col = "red")
#auc(ROC.log)
############# Classification with pruning #############
tree.fn <- function(df){
if(!exists("nogo")){
cl <- makeCluster(detectCores())
doParallel::registerDoParallel(cl)
}
set.seed(seed)
n <- nrow(df)
shuffled <- df[sample(n),]
train.tree <- shuffled[1:round(percent * n),]
test.tree <<- shuffled[(round(percent * n) + 1):n,]
rm(list="shuffled")
mod.tree <<- train(High ~., data = train.tree, method = "rpart",
trControl = fit_control, metric = "ROC", tuneLength = 10)
pred.tree <<- predict(mod.tree, newdata = test.tree, type = "prob")
test.tree$pred <<- ifelse(pred.tree$zero>0.5,"zero","one")
if(!exists("nogo")){
stopCluster(cl)
registerDoSEQ()
}
}
tree.fn(marketing.mean.lived)
#table(test.tree$pred, test.tree$High)
#caret::confusionMatrix(test.tree$High,test.tree$pred)
accuracy.tree = round(mean(test.tree$pred == test.tree$High)*100,2)
# Plot tree
ROC.tree <- roc(as.numeric(test.tree$High),as.numeric(pred.tree$one))
#plot(ROC.tree, col = "red")
#auc(ROC.tree)
############# Random forest #############
rf.fn <- function(df){
if(!exists("nogo")){
cl <- makeCluster(detectCores())
doParallel::registerDoParallel(cl)
}
set.seed(seed)
n <- nrow(df)
shuffled <- df[sample(n),]
train.rf <- shuffled[1:round(percent * n),]
test.rf <<- shuffled[(round(percent * n) + 1):n,]
rm(list="shuffled")
mod.rf <<- train(High ~., data = train.rf, method = "rf",
trControl = fit_control, metric = "ROC", tuneLength = 10)
pred.rf <<- predict(mod.rf, newdata = test.rf, type = "prob")
test.rf$pred <<- ifelse(pred.rf$zero>0.5,"zero","one")
if(!exists("nogo")){
stopCluster(cl)
registerDoSEQ()
}
}
rf.fn(df)
#table(test.rf$pred, test.rf$High)
#caret::confusionMatrix(test.rf$High,test.rf$pred)
accuracy.rf = round(mean(test.rf$pred == test.rf$High)*100,2)
ROC.rf <- roc(as.numeric(test.rf$High),as.numeric(pred.rf$one))
#plot(ROC.rf, col = "red")
#auc(ROC.rf)
############# Metrics #############
summary.all <- data.frame(rbind(cbind(accuracy.lda,auc(ROC.lda)),
cbind(accuracy.gbm,auc(ROC.gbm)),
cbind(accuracy.log,auc(ROC.log)),
cbind(accuracy.tree,auc(ROC.tree)),
cbind(accuracy.rf,auc(ROC.rf))))
rownames(summary.all) <- c("LDA","GBM","Logreg","Tree","Random Forest")
colnames(summary.all) <- c("Accuracy","AUC")
summary.all$AUC <- round(summary.all$AUC*100,2)
assign(paste0("summary.",names(mark)[i]) ,summary.all)
if(i==1){
list.all <<- list(lda = list("pred" = pred.lda,"test" = test.lda,"mod" = mod.lda, "roc" = ROC.lda),
gbm = list("pred" = pred.gbm,"test" = test.gbm,"mod" = mod.gbm, "roc" = ROC.gbm),
log = list("pred" = pred.log,"test" = test.log,"mod" = mod.log, "roc" = ROC.log),
tree = list("pred" = pred.tree,"test" = test.tree,"mod" =mod.tree, "roc" = ROC.tree),
rf = list("pred" = pred.rf,"test" = test.rf,"mod" = mod.rf, "roc" = ROC.rf))
}
print(paste0(i/length(mark)*100,"%"))
}
stopCluster(cl)
registerDoSEQ()
|
b491ad8da4e50d09aa3371a758808e404494a4bd
|
6db72f96fe027cf7596b1a8e715b87233466d81b
|
/tests/testthat/test-uniformG_selection.R
|
cb2346ef54b3dec8d7ff436a6add160f7bb95a0f
|
[] |
no_license
|
cran/biosurvey
|
42d8036c319cbf925d53428778b002350e2b32c7
|
5c428c9b7e4f57b62f015576ca5ac98a4e6eb169
|
refs/heads/master
| 2023-08-11T08:01:51.192879
| 2021-09-15T20:10:07
| 2021-09-15T20:10:07
| 406,425,246
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,795
|
r
|
test-uniformG_selection.R
|
context("Uniform selection of sites in G")
test_that("Correct G master_selection", {
selection <- uniformG_selection(m_matrix_pre, expected_points = 20,
max_n_samplings = 1, replicates = 1)
cnam <- names(selection)
nsel <- nrow(selection$selected_sites_G[[1]])
cls <- class(selection$selected_sites_G)[1]
anames <- c("data_matrix", "preselected_sites", "region", "mask",
"raster_base", "PCA_results", "selected_sites_random",
"selected_sites_G", "selected_sites_E", "selected_sites_EG" )
testthat::expect_s3_class(selection, "master_selection")
testthat::expect_null(selection$selected_sites_random)
testthat::expect_null(selection$selected_sites_E)
testthat::expect_null(selection$selected_sites_EG)
testthat::expect_s3_class(selection$selected_sites_G[[1]], "data.frame")
testthat::expect_length(selection, 10)
testthat::expect_length(selection$selected_sites_G, 1)
testthat::expect_equal(cls, "list")
testthat::expect_equal(cnam, anames)
testthat::expect_equal(nsel, 20)
})
test_that("Errors and messages G selection", {
testthat::expect_message(uniformG_selection(m_matrix_pre,
expected_points = 20,
max_n_samplings = 1,
replicates = 1))
testthat::expect_error(uniformG_selection(1:100, expected_points = 20,
max_n_samplings = 1,
replicates = 1))
testthat::expect_error(uniformG_selection(m_matrix))
testthat::expect_error(uniformG_selection())
testthat::expect_error(uniformG_selection(expected_points = 10))
})
#----
|
c01a6e174dab689d5aa549cfb20de031886e2b13
|
259fe6446e0f059be228f95745db1aa54ad5ce31
|
/man/constraint_all_zeros.Rd
|
590b793bbacfc958e229676c3d049186875610da
|
[] |
no_license
|
tpq/caress
|
9fd1c306e8f6bb23f88203f6e6329a72d4689aaa
|
04386b3ab61ef9036e91ab1bbd6e42a1265b5ea9
|
refs/heads/master
| 2021-06-24T08:16:31.155396
| 2021-03-03T03:34:27
| 2021-03-03T03:34:27
| 202,971,472
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 341
|
rd
|
constraint_all_zeros.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/3-constraint.R
\name{constraint_all_zeros}
\alias{constraint_all_zeros}
\title{Constrain All Weights to Zero}
\usage{
constraint_all_zeros(w)
}
\arguments{
\item{w}{A weights matrix.}
}
\description{
This weights constraint function forces all weights to zero.
}
|
d19a6b94039d3fcf648ef08d9db4bba571d11db3
|
a91c8d6928115e7ba12c76db197bc61fff3eab85
|
/Models/Counts_Corr.R
|
d74f506552b1f59f174c7e592891f538f6c38ab0
|
[] |
no_license
|
no33mis/MSc-Dissertation
|
f308799cdbce8f780bcbee4dbb7ee7145b286f6e
|
f7d5676872d2d7388a556a79a79e3a8aa62e480f
|
refs/heads/master
| 2022-11-30T19:35:02.659772
| 2020-08-12T04:02:48
| 2020-08-12T04:02:48
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 823
|
r
|
Counts_Corr.R
|
######################################## Correlation Matrix ###################################################
### Correlation Matrix of the features
##############################################################################################################
##CLEAR R MEMORY
rm(list = ls())
library(corrplot)
##set the working directory and check the files within
setwd("//../")
list.files()
##read the file and create a subset
file <- read.csv("Input_Amenities_v1_wRE.csv", stringsAsFactors = FALSE)
##select the rows and remove NAs
data <- file[c(3:13)]
data <- na.omit(data)
##define the correlation coefficients
cor <- cor(data, method = "pearson")
##visualise the results
corrplot(cor, method = "square", type = "upper" , order = "hclust", addCoef.col = "dark red",
tl.col = "black", tl.srt = 45)
|
0b2252fd46c7ffd1dd86b53dcc5eef9b0a56391d
|
16d6f9a925fb8ae78938baf67173afc7b4e3f94b
|
/R/ranges-reduce.R
|
f59909de33b5b77f4a2d85d64fdc6c0386bde7d9
|
[] |
no_license
|
liupfskygre/plyranges
|
809851435ac1d9a60df8400b8c7c409862966418
|
c82b7eb8ec31478e0f8439207d20897f0c102a6f
|
refs/heads/master
| 2023-06-04T10:52:44.864177
| 2021-06-28T01:15:43
| 2021-06-28T01:15:43
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,668
|
r
|
ranges-reduce.R
|
# ranges-reduce
group_by_revmap <- function(.data, indexes, groups) {
group_vars <- syms(c(group_vars(.data), "revmap"))
.data <- ungroup(.data)
.data <- .data[unlist(indexes)]
mcols(.data)[["revmap"]] <- groups
return(group_by(.data, !!!group_vars))
}
make_key_rle <- function(x) {
Rle(as.integer(S4Vectors::runValue(x)), S4Vectors::runLength(x))
}
make_revmap_rle <- function(x) {
Rle(seq_along(x), elementNROWS(x))
}
unsplice <- function(x, y) x[unlist(y)]
add_revmap_grouping <- function(.data, key, revmap) {
# lookup indexes for each group
lookup <- S4Vectors::splitAsList(revmap, key)
inx <- .group_rows(.data)
# indexes we will expand by
indexes <- S4Vectors::mendoapply(unsplice, inx, lookup)
# groupings from revmap
groups <- make_revmap_rle(revmap)
group_by_revmap(.data, indexes, groups)
}
reduce_single <- function(.data, ..., rfun = reduce) {
dots <- set_dots_named(...)
if (length(dots) == 0L) {
return(rfun(.data))
}
reduced <- rfun(.data, with.revmap = TRUE)
.data <- group_by_revmap(.data,
mcols(reduced)[["revmap"]],
make_revmap_rle(mcols(reduced)[["revmap"]]))
sd <- summarise(.data, !!!dots)
sd <- sd[order(sd[["revmap"]]), -which(names(sd) == "revmap")[1], drop = FALSE]
mcols(reduced) <- sd
reduced
}
reduce_by_grp <- function(.data, ..., rfun = IRanges::reduce) {
dots <- set_dots_named(...)
by_groups <- dplyr::group_split(.data)
if (length(dots) == 0L) {
rng <- IRanges::stack(rfun(by_groups))
sd <- dplyr::group_keys(.data)
key <- make_key_rle(mcols(rng)[["name"]])
mcols(rng) <- sd[key, , drop = FALSE]
return(rng)
}
rng <- IRanges::stack(rfun(by_groups, with.revmap = TRUE))
.data <- add_revmap_grouping(.data,
mcols(rng)[["name"]],
mcols(rng)[["revmap"]])
sd <- summarise(.data, !!!dots)
sd <- sd[order(sd[["revmap"]]), -which(names(sd) == "revmap"), drop = FALSE]
mcols(rng) <- sd
rng
}
#' Reduce then aggregate a Ranges object
#'
#' @param .data a Ranges object to reduce
#' @param ... Name-value pairs of summary functions.
#'
#' @return a Ranges object with the
#' @rdname ranges-reduce
#' @importFrom IRanges reduce
#' @importFrom utils relist
#' @examples
#' set.seed(10)
#' df <- data.frame(start = sample(1:10),
#' width = 5,
#' seqnames = "seq1",
#' strand = sample(c("+", "-", "*"), 10, replace = TRUE),
#' gc = runif(10))
#'
#' rng <- as_granges(df)
#' rng %>% reduce_ranges()
#' rng %>% reduce_ranges(gc = mean(gc))
#' rng %>% reduce_ranges_directed(gc = mean(gc))
#'
#' x <- data.frame(start = c(11:13, 2, 7:6),
#' width=3,
#' id=sample(letters[1:3], 6, replace = TRUE),
#' score= sample(1:6))
#' x <- as_iranges(x)
#' x %>% reduce_ranges()
#' x %>% reduce_ranges(score = sum(score))
#' x %>% group_by(id) %>% reduce_ranges(score = sum(score))
#' @export
reduce_ranges <- function(.data, ...) { UseMethod("reduce_ranges") }
#' @method reduce_ranges IntegerRanges
#' @export
reduce_ranges.IntegerRanges <- function(.data, ...) {
reduce_single(.data, ...)
}
#' @method reduce_ranges GroupedIntegerRanges
#' @export
reduce_ranges.GroupedIntegerRanges <- function(.data, ...) {
reduce_by_grp(.data, ...)
}
#' @method reduce_ranges GroupedGenomicRanges
#' @export
reduce_ranges.GroupedGenomicRanges <- function(.data, ...) {
reduce_by_grp(.data, ...,
rfun = function(x, ...) {
reduce(x, ..., ignore.strand = TRUE)
})
}
#' @method reduce_ranges GenomicRanges
#' @export
reduce_ranges.GenomicRanges <- function(.data, ...) {
reduce_single(.data, ...,
rfun = function(x, ...) {
reduce(x, ..., ignore.strand = TRUE)
})
}
#' @rdname ranges-reduce
#' @export
reduce_ranges_directed <- function(.data, ...) {
UseMethod("reduce_ranges_directed")
}
#' @importFrom IRanges reduce
#' @method reduce_ranges_directed GenomicRanges
#' @export
reduce_ranges_directed.GenomicRanges <- function(.data, ...) {
reduce_single(.data, ...,
rfun = function(x, ...) {
reduce(x, ..., ignore.strand = FALSE)
})
}
#' @method reduce_ranges_directed GroupedGenomicRanges
#' @export
reduce_ranges_directed.GroupedGenomicRanges <- function(.data, ...) {
reduce_by_grp(.data, ...,
rfun = function(x, ...) {
reduce(x, ..., ignore.strand = FALSE)
})
}
|
6dd030586169393f258efd4f9812700c28aff9c6
|
38e63376593fe5028ed86606833da87ee10a1c83
|
/Los Angeles/EDA_Los_Angeles.R
|
423d7d22df47c2c1c01747c7b48bb6cb07d22c8e
|
[
"MIT"
] |
permissive
|
Abhinav-Git19/Airbnb-Hosuing-Recommendation
|
659164b73c0823ca5409fa55e9dc57930289a2cd
|
a57bccedc34762473504e12a864d510d985cfff3
|
refs/heads/master
| 2021-06-26T14:25:18.334521
| 2021-01-14T15:30:33
| 2021-01-14T15:30:33
| 196,685,318
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 798
|
r
|
EDA_Los_Angeles.R
|
library(tidyverse)
library(readr)
library(corrplot)
listings2<-listings
listings2 %>% ggplot(mapping = aes(x=minimum_nights,y=price))+geom_point()+ggtitle("Price Vs Min_Nights")
ggplot(listings2,mapping = aes(x=availability_365,y=price))+geom_point()
sub<-select(listings2,price:number_of_reviews,reviews_per_month:availability_365)
View(cor(sub,use = "pairwise.complete.obs"))
listings2 %>% filter(minimum_nights<=15) %>% ggplot(mapping = aes(x=minimum_nights,y=price,group=minimum_nights))+geom_boxplot(outlier.color = "red")+ggtitle("Price vs minimum_nights")
count(listings2,neighbourhood_group)
subdat<-filter(calendar,available =='t')
subdat %>% filter(listing_id =='16228948' | listing_id =='6749145') %>%
ggplot(subdat,mapping = aes(x=date,y=price,color=listing_id))+geom_smooth()
|
b103c384c65314b3204bd235d4f77e33767993a9
|
15b17e0f1ece59719e4d6a7bd9b8f1bf28f4af8c
|
/FF/FFTeam.R
|
285e0119fa6ee1e1cfa7adaca9958fdd7752bb3b
|
[] |
no_license
|
paulelong/TestProj1
|
687a10793863e4e628645eb12cb256ef342b311d
|
92c786222e702fec29d9ccb64bd72deecb114359
|
refs/heads/master
| 2021-04-26T16:53:12.645703
| 2017-10-27T13:06:33
| 2017-10-27T13:06:33
| 106,869,941
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,156
|
r
|
FFTeam.R
|
source(file="FFLib.R")
source(file="FFData.R")
InitEnv(7)
InitYahooEnv()
InitLeague()
aop <- GetAllOwnedPlayers()
aop
YahooAllPlayerStatsAtPosition("TE", 3)
GetTeamRoster(7)
Teams()
Roster()
ps <- YahooPlayerStats("371.p.25812", 3)
si <- YahooLeagueStatInfo()
lset <- LeagueSettings()
GetPlayerKey(332)
cpl <- apply(cbind(p1,p2), 1, unlist)
rbind(p1d, p2d) #pld[1], #pld[1,]
rd[rd$full == "Tony Romo",]
df[nrow(df),] # get the last row
lf = as.list(df)
lf[[1]] # get first row...
pl2 <- AllPlayers(44)
p1 <- GetPlayers(pl)
pl1 <- AllPlayers(1)
p2 <- GetPlayers(pl1)
ap <- YahooAllPlayers(1)
u <- "https://query.yahooapis.com/v1/yql?q=select%20*%20from%20fantasysports.teams%20where%20team_key%3D'371.l.272272.t.8'&format=json&diagnostics=true&callback="
mylist <- info$query$results$team
t1 <- mylist[c(3,6,7,8,9,14)]
t1
URL.team.roster <- "https://query.yahooapis.com/v1/yql?q=select%20*%20from%20fantasysports.teams.roster%20where%20team_key%3D'371.l.272272.t.8'&format=json&diagnostics=true&callback="
resp <- GET(URL.team.roster , config(token = yahoo_token))
roster.info = fromJSON(content(resp, as = "text"))
roster.info
players <- roster.info$query$results$team$roster$players$player
c1 <- c(players[c("player_key","editorial_team_full_name", "display_position", "status_full")], players$name[1])
c1.df <- as.data.frame(c1)
URL.player <- "https://query.yahooapis.com/v1/yql?q=select%20*%20from%20fantasysports.players%20where%20player_key%3D'371.p.29236'&format=json&diagnostics=true&callback="
yahoo_token = YahooAuth()
p1 <- YahooGetData(URL.player, yahoo_token)
URL.player.stats = "https://query.yahooapis.com/v1/yql?q=select%20*%20from%20fantasysports.players.stats%20where%20league_key%3D'371.l.272272'%20and%20player_key%3D'371.p.29236'%20and%20stats_week%3D1&format=json&diagnostics=true&callback="
ps1 <- YahooGetData(URL.player.stats, yahoo_token)
pstats <- ps1$query$results$player
#https://query.yahooapis.com/v1/public/yql?q=select%20*%20from%20fantasysports.teams.stats%20where%20team_key%3D'238.l.627060.t.8'%20and%20stats_type%3D'date'%20and%20stats_date%3D'2010-05-14'&format=json&diagnostics=true&callback=
|
7dd44e5606771937e7bb1f086a3e4b529c11fa77
|
62c4005400f184f99d7409fad00d3f3400d728a3
|
/2-create_physical_map/5-Translate_maps-2-split_scaffolds.R
|
236758be7792ca2c7587660cc9e0328fad1946f5
|
[
"MIT"
] |
permissive
|
parkingvarsson/Recombination_rate_variation
|
800b7f7622b72e3c51c87be58d5a035f2c42892c
|
e0b046851c001d3368e564ebd5f38766df75bf12
|
refs/heads/master
| 2020-06-01T07:53:37.442299
| 2020-05-26T13:07:57
| 2020-05-26T13:07:57
| 190,707,515
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,909
|
r
|
5-Translate_maps-2-split_scaffolds.R
|
read.csv("Female_with_bins.csv",head=T)->Female_orig
read.csv("Male_with_bins.csv",head=T)->Male_orig
read.table("v1.1_Potra01-genome.fa.masked.agp",head=F)->scaffold_translate
Female_translated_map<-as.data.frame(matrix(NA,nrow = dim(Female_orig)[1],ncol=dim(Female_orig)[2]))
names(Female_translated_map)<-c("Scaffold ID","scaffold position","LG","genetic position")
for(i in 1:dim(Female_orig)[1]){
scaffold<-scaffold_translate[scaffold_translate$V6==as.character(Female_orig$Scaffold.ID[i]),]
for(j in 1:dim(scaffold)[1]){
if(Female_orig$scaffold.position[i] > scaffold$V7[j] & Female_orig$scaffold.position[i] < scaffold$V8[j]){
Female_translated_map$`Scaffold ID`[i]<-as.character(scaffold$V1[j])
Female_translated_map$`scaffold position`[i]<-Female_orig$scaffold.position[i] - scaffold$V7[j] + scaffold$V2[j]
Female_translated_map$LG[i]<-Female_orig$Chr[i]
Female_translated_map$`genetic position`[i]<-Female_orig$genetic.position[i]
}
}
}
Male_translated_map<-as.data.frame(matrix(NA,nrow = dim(Male_orig)[1],ncol=dim(Male_orig)[2]))
names(Male_translated_map)<-c("Scaffold ID","scaffold position","LG","genetic position")
for(i in 1:dim(Male_orig)[1]){
scaffold<-scaffold_translate[scaffold_translate$V6==as.character(Male_orig$Scaffold.ID[i]),]
for(j in 1:dim(scaffold)[1]){
if(Male_orig$scaffold.position[i] > scaffold$V7[j] & Male_orig$scaffold.position[i] < scaffold$V8[j]){
Male_translated_map$`Scaffold ID`[i]<-as.character(scaffold$V1[j])
Male_translated_map$`scaffold position`[i]<-Male_orig$scaffold.position[i] - scaffold$V7[j] + scaffold$V2[j]
Male_translated_map$LG[i]<-Male_orig$Chr[i]
Male_translated_map$`genetic position`[i]<-Male_orig$genetic.position[i]
}
}
}
write.csv(Male_translated_map,"F1_Male.csv",quote = F,row.names = F)
write.csv(Female_translated_map,"F1_Female.csv",quote = F,row.names = F)
|
cfc40526a60ec4b6766a6392c9da464eb7c2c6ba
|
9a808268700a7ddf02c3b11b3820eed269acd9b9
|
/run_shiny_app.R
|
dfb40569c04cc7e01600e11f5d5a3201688f5e67
|
[] |
no_license
|
wmattbrown/dndhelper
|
92c532b9e2fbbe5d4351df401ea8e18ca9a67de0
|
1ab4b25d83dc90939c9bbb9dc2bd03f6e5a2900c
|
refs/heads/master
| 2022-05-29T12:41:48.356959
| 2020-05-02T20:36:36
| 2020-05-02T20:36:36
| 259,116,249
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 31
|
r
|
run_shiny_app.R
|
# run shiny app
shiny::runApp()
|
0166e6eaa362592a17acdd127d9cb8abcc17c375
|
9a446d74975a2dd0eae8e728ebf3af0dbdee68b7
|
/man/sparse_eye.Rd
|
13c48b9855913ef6fab9ff486c8412fc25c9993c
|
[] |
no_license
|
ifrit98/R2deepR
|
fcef56ee72ce3f604584f8455934a6b8e8e8694e
|
d5d0ddfb08fc54c5ce8f48c9decbbbefc71e1b1a
|
refs/heads/master
| 2022-10-18T11:05:48.346916
| 2020-06-17T17:29:42
| 2020-06-17T17:29:42
| 265,967,957
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 255
|
rd
|
sparse_eye.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils.R
\name{sparse_eye}
\alias{sparse_eye}
\title{Creates a sparse eye_matrix tensor}
\usage{
sparse_eye(size, axis = 1L)
}
\description{
Creates a sparse eye_matrix tensor
}
|
0dfffe9c022b46fcd00d70f9a1e8733acc0add26
|
8dc3f2a21244c018fbd646e12220626d7834aebc
|
/R/summaryBvsA.R
|
a6b5fa62af67395bec26ba37d9b9d751c1190052
|
[
"MIT"
] |
permissive
|
bakuhatsu/measuRoots
|
07146866c0ee5bab8aa4d337273e19cc9d9f4379
|
ca44a282ae5ee5057bded095e6220c4049c2d95f
|
refs/heads/master
| 2021-01-19T09:26:46.273025
| 2018-07-18T16:53:56
| 2018-07-18T16:53:56
| 82,109,259
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,728
|
r
|
summaryBvsA.R
|
#################################
# 4/25/2016 #
# Sven Nelson #
# function: summaryBvsA #
#################################
# Write a function to create a summary between WW and WS
# summaryWWvsWS (A = WW, B = WS)
pkg.env <- new.env(parent = emptyenv())
#'
#' @export
#'
summaryBvsA <- function(A, B, measurevar = "length", pCuttoff = 0.05, Aname = "WW", Bname = "WS") {
B$seed <- B$seed + max(A$seed)
# Add a treatment column to each dataframe with their treatment (WW or WS)
A$treatment <- Aname
B$treatment <- Bname
# Then combine the two dataframes into one.
combinedData <- rbind(A, B)
if (length(dplyr::filter(combinedData, structure == "r6")$structure) > 0) {
structList <- c("shoot", "r1", "r2", "r3", "r4", "r5", "r6")
} else {
r4AllZero <- nrow(dplyr::filter(combinedData, structure == "r4" & length != 0)) == 0
r5AllZero <- nrow(dplyr::filter(combinedData, structure == "r5" & length != 0)) == 0
if (!r4AllZero & !r5AllZero) {
structList <- c("shoot", "r1", "r2", "r3", "r4", "r5")
} else if (r4AllZero & r5AllZero) {
structList <- c("shoot", "r1", "r2", "r3")
} else if (r4AllZero & !r5AllZero) {
structList <- c("shoot", "r1", "r2", "r3", "r5")
} else if (!r4AllZero & r5AllZero) {
structList <- c("shoot", "r1", "r2", "r3", "r4")
}
}
# combinedData$treatment <- factor(combinedData$treatment, levels = c("WW", "WS"))
# combinedData$seed <- factor(combinedData$seed, levels = c(1:max(combinedData$seed)))
# combinedData$structure <- factor(combinedData$structure, levels = structList)
# combinedData$day <- factor(combinedData$day, levels = c(0:2))
combinedData <- combinedData[,c(1,2,3,4,6)] # Remove genotype row (otherwise, need as factor)
#### Create new dataframe with differences ####
summA <- rootPlot(rootDF = A, returnSummary = T)
summB <- rootPlot(rootDF = B, returnSummary = T)
# create dataframe
# columns: diff (WW$length - WS$length), day (keep), structure (keep),
BvsAdf <- summA[,1:2]
BvsAdf$diff <- summB$length - summA$length
BvsAdf$pval <- NA
BvsAdf$rowNM <- row.names(BvsAdf)
#### Now to do some statistics on this ####
for (struct in structList) {
#combData_trim <<- data.frame()
if (length(dplyr::filter(combinedData, structure == struct)$structure) > 0) {
# trim to 1 structure at a time for processing
combData_trim <- dplyr::filter(combinedData, structure == struct)
combData_trim$treatment <- factor(combData_trim$treatment, levels = c("WW", "WS"))
combData_trim$seed <- factor(combData_trim$seed, levels = c(1:max(combData_trim$seed)))
combData_trim$structure <- factor(combData_trim$structure, levels = struct)
combData_trim$day <- factor(combData_trim$day, levels = c(0:max(unique(combData_trim$day))))
# lsmeans has an env issue and cannot always access the local variables, so use pkg.env
pkg.env$combData_tr <- combData_trim
# Mixed Design Anova with post hoc lsmeans analysis
# Independent Variable between: treatment
# Independent Variable within: day
# Dependent Variable: length
# require(lsmeans)
# require(afex)
# Mixed effects modelling
utils::capture.output( # Capture printing from mixed function to console (don't dispay)
utils::capture.output( # Capture messages from mixed function to console (don't display)
fit_mixed <- afex::mixed(length ~ treatment*day + (1|seed), data = pkg.env$combData_tr),
type = "message")
)
## Pairwise comparisons
ref3 <- emmeans::emmeans(fit_mixed, ~ treatment|day, data = pkg.env$combData_tr) # | is same as "by"
comps <- emmeans::contrast(ref3, method="pairwise")
# adjusting for each level
outputLSM <- summary(comps)
rm(comps)
outputPvals <- outputLSM$p.value
rowNums <- c()
rowNums[1] <- dplyr::filter(BvsAdf, structure == struct & day == 0)$rowNM
rowNums[2] <- dplyr::filter(BvsAdf, structure == struct & day == 1)$rowNM
rowNums[3] <- dplyr::filter(BvsAdf, structure == struct & day == 2)$rowNM
# BvsAdf$pval[row1] <- outputPvals[1] # day 0
# BvsAdf$pval[row2] <- outputPvals[2] # day 1
# BvsAdf$pval[row3] <- outputPvals[3] # day 2
for (i in 1:length(rowNums)) {
pvalue <- outputPvals[i] # day i
if (!is.na(pvalue) & pvalue < pCuttoff) {
BvsAdf[rowNums[i],]$pval <- pvalue
} else {
BvsAdf[rowNums[i],]$pval <- NA
}
}
rm(pvalue)
}
rm(combData_trim)
rm(combData_tr, envir = pkg.env)
}
BvsAdf <- BvsAdf[,1:4] # remove the rowNums column
return(BvsAdf)
}
|
b4ccd57a0c604ccf2b5597d51683fa8a8dd8e239
|
ee503bac3ea764666106b3eff49406903f066d7d
|
/R/plot_longterm_daily_stats.R
|
cade9e984116cfeed2b731494699820773ffede6
|
[
"Apache-2.0"
] |
permissive
|
bcgov/fasstr
|
a90a88702543084c7d36c7f7386745d4c24672b7
|
10da0bb28e2f55d0b9c2b71de8b028f5a4071c21
|
refs/heads/main
| 2023-04-02T17:38:35.947960
| 2023-03-22T20:25:08
| 2023-03-22T20:25:08
| 108,884,386
| 61
| 14
|
Apache-2.0
| 2023-03-22T20:26:18
| 2017-10-30T17:23:30
|
R
|
UTF-8
|
R
| false
| false
| 16,344
|
r
|
plot_longterm_daily_stats.R
|
# Copyright 2019 Province of British Columbia
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and limitations under the License.
#' @title Plot long-term summary statistics from daily mean flows
#'
#' @description Plots the long-term mean, median, maximum, minimum, and percentiles of daily flow values for over all months and
#' all data (Long-term) from a daily streamflow data set. Calculates statistics from all values, unless specified.
#' The Maximum-Minimum band can be removed using the \code{plot_extremes} argument and the percentile bands can be
#' customized using the \code{inner_percentiles} and \code{outer_percentiles} arguments. Data calculated using the
#' \code{calc_longterm_daily_stats()} function. Returns a list of plots.
#'
#' @inheritParams calc_longterm_daily_stats
#' @inheritParams plot_annual_stats
#' @inheritParams plot_daily_stats
#' @param add_year Numeric value indicating a year of daily flows to add to the daily statistics plot. Leave blank
#' or set to \code{NULL} for no years.
#' @param plot_extremes Logical value to indicate plotting a ribbon with the range of daily minimum and maximum flows.
#' Default \code{TRUE}.
#' @param inner_percentiles Numeric vector of two percentile values indicating the lower and upper limits of the
#' inner percentiles ribbon for plotting. Default \code{c(25,75)}, set to \code{NULL} for no inner ribbon.
#' @param outer_percentiles Numeric vector of two percentile values indicating the lower and upper limits of the
#' outer percentiles ribbon for plotting. Default \code{c(5,95)}, set to \code{NULL} for no outer ribbon.
#'
#' @return A list of ggplot2 objects with the following for each station provided:
#' \item{Long-term_Monthly_Statistics}{a plot that contains long-term flow statistics}
#' Default plots on each object:
#' \item{Monthly Mean}{mean of all annual monthly means for a given month over all years}
#' \item{Monthly Median}{median of all annual monthly means for a given month over all years}
#' \item{25-75 Percentiles Range}{a ribbon showing the range of data between the monthly 25th and 75th percentiles}
#' \item{5-95 Percentiles Range}{a ribbon showing the range of data between the monthly 5th and 95th percentiles}
#' \item{Max-Min Range}{a ribbon showing the range of data between the monthly minimum and maximums}
#'
#' @seealso \code{\link{calc_longterm_daily_stats}}
#'
#' @examples
#' # Run if HYDAT database has been downloaded (using tidyhydat::download_hydat())
#' if (file.exists(tidyhydat::hy_downloaded_db())) {
#'
#' # Plot longterm daily statistics using data argument with defaults
#' flow_data <- tidyhydat::hy_daily_flows(station_number = "08NM116")
#' plot_longterm_daily_stats(data = flow_data,
#' start_year = 1980)
#'
#' # Plot longterm daily statistics for water years starting in October
#' plot_longterm_daily_stats(station_number = "08NM116",
#' start_year = 1980,
#' end_year = 2010,
#' water_year_start = 10)
#'
#' }
#' @export
plot_longterm_daily_stats <- function(data,
dates = Date,
values = Value,
groups = STATION_NUMBER,
station_number,
roll_days = 1,
roll_align = "right",
water_year_start = 1,
start_year,
end_year,
exclude_years,
months = 1:12,
complete_years = FALSE,
ignore_missing = FALSE,
plot_extremes = TRUE,
plot_inner_percentiles = TRUE,
plot_outer_percentiles = TRUE,
inner_percentiles = c(25,75),
outer_percentiles = c(5,95),
add_year,
log_discharge = TRUE,
log_ticks = ifelse(log_discharge, TRUE, FALSE),
include_title = FALSE){
## ARGUMENT CHECKS
## ---------------
if (missing(data)) {
data <- NULL
}
if (missing(station_number)) {
station_number <- NULL
}
if (missing(start_year)) {
start_year <- 0
}
if (missing(end_year)) {
end_year <- 9999
}
if (missing(exclude_years)) {
exclude_years <- NULL
}
if (missing(add_year)) {
add_year <- NULL
}
logical_arg_check(log_discharge)
log_ticks_checks(log_ticks, log_discharge)
logical_arg_check(include_title)
ptile_ribbons_checks(inner_percentiles, outer_percentiles)
logical_arg_check(plot_extremes)
logical_arg_check(plot_inner_percentiles)
logical_arg_check(plot_outer_percentiles)
## FLOW DATA CHECKS AND FORMATTING
## -------------------------------
# Check if data is provided and import it
flow_data <- flowdata_import(data = data, station_number = station_number)
# Check and rename columns
flow_data <- format_all_cols(data = flow_data,
dates = as.character(substitute(dates)),
values = as.character(substitute(values)),
groups = as.character(substitute(groups)),
rm_other_cols = TRUE)
## CALC STATS
## ----------
longterm_stats_all <- suppressWarnings(
calc_longterm_daily_stats(data = flow_data,
water_year_start = water_year_start,
start_year = start_year,
end_year = end_year))
longterm_stats_all <- longterm_stats_all[,1:2]
longterm_stats <- calc_longterm_daily_stats(data = flow_data,
percentiles = c(inner_percentiles, outer_percentiles),
roll_days = roll_days,
roll_align = roll_align,
water_year_start = water_year_start,
start_year = start_year,
end_year = end_year,
exclude_years = exclude_years,
complete_years = complete_years,
ignore_missing = ignore_missing,
months = months)
longterm_stats <- dplyr::left_join(longterm_stats_all, longterm_stats,
by = c("STATION_NUMBER", "Month"))
## PLOT STATS
## ----------
# Make longterm mean and median their own columns
longterm_stats_months <- dplyr::filter(longterm_stats, Month != "Long-term")
# remove NA's from start and end for plotting
longterm_stats_months <- longterm_stats_months[cumsum(stats::complete.cases(longterm_stats_months)) != 0, ]
longterm_stats_months <- dplyr::arrange(longterm_stats_months, dplyr::desc(Month))
longterm_stats_months <- longterm_stats_months[cumsum(stats::complete.cases(longterm_stats_months)) != 0, ]
longterm_stats_months <- dplyr::arrange(longterm_stats_months, Month)
longterm_stats_longterm <- dplyr::filter(longterm_stats, Month == "Long-term")
longterm_stats_longterm <- dplyr::select(longterm_stats_longterm, STATION_NUMBER, "LT_Mean" = Mean, "LT_Med" = Median)
longterm_stats <- dplyr::left_join(longterm_stats_months, longterm_stats_longterm, by = "STATION_NUMBER")
## ADD YEAR IF SELECTED
## --------------------
if(!is.null(add_year)){
# data for testing if year is in flow_data
flow_data_year <- add_date_variables(data = flow_data, water_year_start = water_year_start)
flow_data_year <- dplyr::filter(flow_data_year, WaterYear %in% start_year:end_year)
# if year is in data and not excluded, calculate those values
if (add_year %in% min(flow_data_year$WaterYear):max(flow_data_year$WaterYear) & !(add_year %in% exclude_years)) {
year_data <- suppressWarnings(calc_monthly_stats(data = flow_data,
roll_days = roll_days,
roll_align = roll_align,
water_year_start = water_year_start,
start_year = start_year,
end_year = end_year,
exclude_years = exclude_years,
ignore_missing = ignore_missing))
year_data <- dplyr::filter(year_data, Year == add_year)
year_data <- dplyr::mutate(year_data, Month = factor(Month, levels = c(month.abb, "Long-term")))
year_data <- dplyr::select(year_data, STATION_NUMBER, Month, Year_mean = Mean)
# Warning if all daily values are NA from the add_year
for (stn in unique(year_data$STATION_NUMBER)) {
year_test <- dplyr::filter(year_data, STATION_NUMBER == stn)
if(all(is.na(year_test$Year_mean))) {
warning("Monthly data does not exist for the year listed in add_year and was not plotted.", call. = FALSE)
add_year <- NULL
}
}
if(!all(is.na(year_data$Year_mean))) {
longterm_stats <- dplyr::left_join(longterm_stats, year_data, by = c("STATION_NUMBER", "Month"))
}
} else {
warning("Monthly data does not exist for the year listed in add_year and was not plotted.", call. = FALSE)
add_year <- NULL
}
}
if (all(sapply(longterm_stats[3:ncol(longterm_stats)], function(x)all(is.na(x))))) {
longterm_stats[is.na(longterm_stats)] <- 1
}
# Create manual colour and fill options
fill_manual_list <- c()
if (plot_extremes) {
fill_manual_list <- c(fill_manual_list, "lightblue2")
names(fill_manual_list) <- c(names(fill_manual_list), "Minimum-Maximum")
}
if (is.numeric(outer_percentiles)) {
fill_manual_list <- c(fill_manual_list, "lightblue3")
outer_name <- paste0(min(outer_percentiles),"-",max(outer_percentiles), " Percentiles")
names(fill_manual_list) <- c(names(fill_manual_list)[1:(length(fill_manual_list)-1)], outer_name)
}
if (is.numeric(inner_percentiles)) {
fill_manual_list <- c(fill_manual_list, "lightblue4")
inner_name <- paste0(min(inner_percentiles),"-",max(inner_percentiles), " Percentiles")
names(fill_manual_list) <- c(names(fill_manual_list)[1:(length(fill_manual_list)-1)], inner_name)
}
colour_manual_list <- c("Mean" = "paleturquoise", "Median" = "dodgerblue4")
colour_manual_labels <- c("Mean", "Median")
if (is.numeric(add_year)) {
colour_manual_list <- c(colour_manual_list, "yr.colour" = "red")
colour_manual_labels <- c(colour_manual_labels, paste0(add_year, " Mean"))
}
# Create axis label based on input columns
y_axis_title <- ifelse(as.character(substitute(values)) == "Volume_m3", "Volume (cubic metres)", #expression(Volume~(m^3))
ifelse(as.character(substitute(values)) == "Yield_mm", "Yield (mm)",
"Discharge (cms)")) #expression(Discharge~(m^3/s))
# Plot
lt_plots <- dplyr::group_by(longterm_stats, STATION_NUMBER)
lt_plots <- tidyr::nest(lt_plots)
lt_plots <- dplyr::mutate(
lt_plots,
plot = purrr::map2(
data, STATION_NUMBER,
~ggplot2::ggplot(data = ., ggplot2::aes(x = Month, group = 1)) +
{if(plot_extremes) ggplot2::geom_ribbon(ggplot2::aes(ymin = Minimum, ymax = Maximum, fill = "Minimum-Maximum"), na.rm = FALSE)} +
{if(is.numeric(outer_percentiles) & plot_outer_percentiles)
ggplot2::geom_ribbon(ggplot2::aes_string(ymin = paste0("P",min(outer_percentiles)),
ymax = paste0("P",max(outer_percentiles)),
fill = paste0("'",outer_name,"'")), na.rm = FALSE)} +
{if(is.numeric(inner_percentiles) & plot_inner_percentiles)
ggplot2::geom_ribbon(ggplot2::aes_string(ymin = paste0("P",min(inner_percentiles)),
ymax = paste0("P",max(inner_percentiles)),
fill = paste0("'",inner_name,"'")), na.rm = FALSE)} +
ggplot2::geom_line(ggplot2::aes(y = Mean, color = "Mean"), size = .9, na.rm = TRUE) +
ggplot2::geom_line(ggplot2::aes(y = Median, color = "Median"), size = .9, na.rm = TRUE) +
ggplot2::geom_point(ggplot2::aes(y = Mean), size = 2, na.rm = TRUE, colour = "paleturquoise") +
ggplot2::geom_point(ggplot2::aes(y = Median), size = 2, na.rm = TRUE, colour = "dodgerblue4") +
{if(!log_discharge) ggplot2::scale_y_continuous(expand = c(0, 0), breaks = scales::pretty_breaks(n = 8),
labels = scales::label_number(scale_cut = scales::cut_short_scale()))}+
{if(log_discharge) ggplot2::scale_y_log10(expand = c(0, 0), breaks = scales::log_breaks(n = 8, base = 10),
labels = scales::label_number(scale_cut = scales::cut_short_scale()))} +
{if(log_discharge & log_ticks) ggplot2::annotation_logticks(base = 10, "l", colour = "grey25", size = 0.3, short = ggplot2::unit(0.07, "cm"),
mid = ggplot2::unit(0.15, "cm"), long = ggplot2::unit(0.2, "cm"))} +
ggplot2::scale_x_discrete(expand = c(0.01,0.01)) +
ggplot2::ylab(y_axis_title) +
ggplot2::xlab(NULL) +
ggplot2::theme_bw()+
ggplot2::labs(colour = 'Daily Statistics') +
{if (include_title & unique(.y) != "XXXXXXX") ggplot2::labs(colour = paste0(.y,'\n \nDaily Statistics')) } +
ggplot2::theme(legend.position = "right",
legend.justification = "right",
legend.text = ggplot2::element_text(size = 9),
panel.border = ggplot2::element_rect(colour = "black", fill = NA, size = 1),
panel.grid = ggplot2::element_line(size = .2),
axis.title = ggplot2::element_text(size = 12),
axis.text = ggplot2::element_text(size = 10),
legend.spacing = ggplot2::unit(-0.4, "cm"),
legend.background = ggplot2::element_blank()) +
ggplot2::scale_fill_manual(values = fill_manual_list) +
ggplot2::scale_color_manual(values = colour_manual_list, labels = colour_manual_labels) +
{if (is.numeric(add_year)) ggplot2::geom_line(ggplot2::aes(x= Month, y = Year_mean, colour = "yr.colour"), size = 0.9, na.rm = TRUE) } +
{if (is.numeric(add_year)) ggplot2::geom_point(ggplot2::aes(y = Year_mean), size = 2, na.rm = TRUE, colour = "red") } +
ggplot2::guides(colour = ggplot2::guide_legend(order = 1), fill = ggplot2::guide_legend(order = 2, title = NULL))
))
# Create a list of named plots extracted from the tibble
plots <- lt_plots$plot
if (nrow(lt_plots) == 1) {
names(plots) <- "Long-term_Daily_Statistics"
} else {
names(plots) <- paste0(lt_plots$STATION_NUMBER, "_Long-term_Daily_Statistics")
}
plots
}
|
40e2b7b9a0f67f33f7969ebe69ffe16232d9403d
|
8f9d971f28ef816be82dc4904fefd34bfd77bfb8
|
/warfkit/bin/merge.R
|
f4a73a6421494430f913e49c3ba9ebf95ba73399
|
[] |
no_license
|
MarcusWalz/RogueClinicalAvatars
|
d44f17ae9bd7b6d91e13ddd65c27f198a8e27d22
|
b37bc9bbb4100aaf830ad227dbb63028dd374616
|
refs/heads/master
| 2016-09-10T16:20:54.386403
| 2015-12-11T18:22:34
| 2015-12-11T18:22:34
| 31,743,949
| 0
| 2
| null | 2015-04-08T14:43:51
| 2015-03-06T00:16:00
|
R
|
UTF-8
|
R
| false
| false
| 140
|
r
|
merge.R
|
#!/usr/bin/env Rscript
args = commandArgs(T)
output = args[1]
inputs = args[-1]
saveRDS(Reduce(append, Map(readRDS,inputs)), file=output)
|
c3dacf21a051cd8939e2398930e1ee26ff75a0a5
|
aeffdf7c301a7180f7a92a837fca2cb61076bdec
|
/R/kmh2knots.r
|
45b0fe8b4a6d84896ee43a31fdab708b3ece77e5
|
[
"MIT"
] |
permissive
|
alfcrisci/biometeoR
|
5d5e874b0b02c79d41f2737107f880d31de318b8
|
e9ee73da6ecc515ecd471cb9ae059a4370a51493
|
refs/heads/master
| 2021-01-10T14:26:38.483831
| 2016-11-30T18:05:40
| 2016-11-30T18:05:40
| 75,196,823
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 355
|
r
|
kmh2knots.r
|
#' kmh2knots
#'
#' Conversion from kilometer per hour to knot per second.
#'
#' @param numeric kmh Speed in kilometer per hour.
#' @return
#'
#'
#' @author Istituto di Biometeorologia Firenze Italy Alfonso Crisci \email{a.crisci@@ibimet.cnr.it}
#' @keywords kmh2knots
#'
#' @export
#'
#'
#'
#'
kmh2knots<-function(kmh)
{
return(kmh * 0.539957);
}
|
80352559f47633db65eecfa5b7c6a44887f58347
|
a2968913dcfecff3fe9d190cdaf6306b6e8ea67a
|
/Problem_1/Vicky_problem_1.R
|
0d76e4fb24d5ee395b5e7776385fc0c7ec425823
|
[] |
no_license
|
RobinL/dash_lunch_n_code
|
d013c1628fadbe3cc4837d33b7d0cf163bdc0033
|
f92a06162b2cdacef6a1356b6d656b9ed2da223f
|
refs/heads/master
| 2021-04-30T17:25:00.175609
| 2017-03-06T15:23:36
| 2017-03-06T15:23:36
| 80,207,383
| 0
| 0
| null | 2017-03-06T15:23:37
| 2017-01-27T12:56:35
|
Python
|
UTF-8
|
R
| false
| false
| 298
|
r
|
Vicky_problem_1.R
|
# EXERCISE 1
threes <- seq(3, 999, by=3)
print(threes)
fives <- seq(5, 999, by=5)
print(fives)
all <- c(threes, fives)
uniques <- unique(all)
answer <- sum(uniques)
# Make it into a function
exercise1 = function(n) sum(unique(c(
seq(3, n-1, by = 3), seq(5, n-1, by = 5))))
exercise1(1000)
|
48f134272e95b205747fff087159f9d73b20d382
|
0daef96b634ea50138677a7df05c68e97a931df3
|
/analysis/old_files/final_analysis.r
|
74e0a6409076f55d248bc3e446a7174574767100
|
[] |
no_license
|
sbmkvp/footfall_from_wifi_sensors
|
f4f35798f4beb59d6e923cda8932e4e89f4620e6
|
3ae21cd176c468b9f5e989c1d3bf7d3910662e60
|
refs/heads/master
| 2021-03-19T15:09:22.272394
| 2019-03-12T20:31:20
| 2019-03-12T20:31:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 354
|
r
|
final_analysis.r
|
rm(list=ls())
library(tidyverse)
library(magrittr)
# we need to read data first.
sensor <-
"../data/oxst_sensor.csv" %>%
read.csv(stringsAsFactors = FALSE) %>%
as_tibble() %>%
mutate(X=NULL,
sig=cut(signal,
classIntervals(signal, 2, "kmeans")$brks,
c("low","high"))) %>%
print()
# Reclassifying some data by combining last 40%
|
00353247f42d16c0ee3f5264a253b332c92dc101
|
0ccdd0abbf3d39f1c5e971e26a60f53719ba6fb4
|
/ml/scripts/ml_examples/randomForest1.R
|
7f937e9425bfe16dc0572fa6fc15116595d4476a
|
[] |
no_license
|
pickle-donut/RScripts
|
9a87bd616ea3cd89a94c98e8438c3bc80432392b
|
2a60daf6cfbeaa194f696daf699b387544f8f163
|
refs/heads/master
| 2022-11-09T00:01:57.999428
| 2020-06-15T23:27:00
| 2020-06-15T23:27:00
| 270,508,904
| 0
| 0
| null | 2020-06-15T23:27:01
| 2020-06-08T03:06:02
|
R
|
UTF-8
|
R
| false
| false
| 399
|
r
|
randomForest1.R
|
library(randomForest)
features = data.frame(inFeatures)
myData = data.frame(IDs, target, features)
myData = na.omit(myData)
myData = myData[-c(1,2)]
rdf = randomForest(x = myData, y = actual, nodesize = nodesize, importance = TRUE, keep.forest = TRUE, ntree = ntree)
predictedRDF = predict(rdf, myData)
columns = colnames(features)
varImportance = data.frame(features = columns, importance(rdf))
|
878726d56976149ac54f7a0e9bde9f576d479f33
|
76fd0fea6f657e2d926fff82561aceff8dd84b5c
|
/Raster Data.R
|
2ddbf46ab01e4138dc500da2a623a2498b44c71e
|
[] |
no_license
|
carterpowell/Bryphy2
|
2ec2a7217ebd5acf1817c9fd85ccc515878ee4e2
|
afdc26c5229ad47d4748a5854c967c8e9c8c471f
|
refs/heads/master
| 2021-01-25T09:45:19.269191
| 2018-04-15T20:05:25
| 2018-04-15T20:05:25
| 123,317,871
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,876
|
r
|
Raster Data.R
|
# Libraries ---------------------------------------------------------------
require(raster)
require(rgdal)
library(maps)
library(mapdata)
library(dismo)
library(rJava)
library(maptools)
library(jsonlite)
require(grDevices)
require(ggplot2)
library(devtools)
install_github("ggbiplot", "vqv")
library(ggbiplot)
library(factoextra)
# Overlaying A World Clim Raster onto our Raster --------------------------
#Download data from World Clim
currentEnv=getData("worldclim", var="bio", res=2.5)
#Crop Our Data to show only the new world
model.extent<-extent(min(-170),max(-20),min(-60),max(110))
modelEnv=crop(currentEnv,model.extent)
#Map mean annual temperature as a test (may take ahile)
plot(modelEnv[["bio1"]]/10, main="Annual Mean Temperature")
map('worldHires',xlim=c(min(-170),max(-20)), ylim=c(min(-60),max(100)), fill=FALSE, add=TRUE)
#Project latitude and longitude onto our raster
#Create new gradient
colfunc <- colorRampPalette(c("dodgerblue", "darkgreen","darkgoldenrod1", "firebrick1"))
MossRichnessRasterNAll <- projectRaster(MossRichnessRasterNA, crs='+proj=longlat')
croppedMossRichnessRasterNAll = crop(MossRichnessRasterNAll, model.extent)
plot(croppedMossRichnessRasterNAll, col = colfunc(200))
#Resample to same grid:
WC.new = resample(modelEnv, croppedMossRichnessRasterNAll, "bilinear")
#If required (for masking), set extents to match:
ex = extent(modelEnv)
moss.raster.projection.cropped = crop(MossRichnessRasterNAll, ex)
#Removed data which falls outside one of the rasters (if you need to):
WC.new = mask(WC.new, moss.raster.projection.cropped)
colfunc1 <- colorRampPalette(c("dodgerblue", "firebrick1"))
plot(WC.new[["bio1"]]/10, main="Annual Mean Temperature", col = colfunc1(200))
Moss.LL.data <- as.data.frame(moss.raster.projection.cropped, xy =TRUE)
names(Moss.LL.data)[names(Moss.LL.data) == 'blank_100km_raster'] <- 'Richness'
Moss.LL.data.new <- Moss.LL.data[complete.cases(Moss.LL.data),]
Moss.LL.data.new1 <- Moss.LL.data.new[,1:2]
data <- data.frame(coordinates(Moss.LL.data.new1),
extract(WC.new, Moss.LL.data.new1))
finaldataset <- merge(data, Moss.LL.data.new, by=c("x","y"))
#Take NA's out
finalslimdataset<- finaldataset[complete.cases(finaldataset),]
#create a dataset with only bioclim variables and no NAs
nonadata <- data[complete.cases(data),]
pcadata <- nonadata[,3:21]
#Run PCA on this Data
pca <- prcomp(pcadata, center = TRUE, scale. = TRUE)
#Loadings
head(pca$x)
pca$rotation
#Visuals
plot(pca, type = "l")
#Scatterplot
p <- ggbiplot(pca, obs.scale = 1, var.scale = 1, ellipse = TRUE, circle = TRUE)+
geom_point(size = 0.01)
print(p)
#Circle Plot
fviz_pca_var(pca,
col.var = "contrib", # Color by contributions to the PC
gradient.cols = c("#00AFBB", "#E7B800", "#FC4E07"),
repel = TRUE)
#Plot of Variance for Each Variable
fviz_eig(pca)
|
9331fc29890da71030336415925b99eebbc5da25
|
6bbd67ced962b50b1bb1d84ba0dc1dccd1f8cc61
|
/R/bacteria_new.R
|
6b68ff8117f0ebc121116afb3c11bc171344350d
|
[] |
no_license
|
chrislopez28/bacteria
|
1d1c3e1199f0c2df197bd0c92fdf6fb02a12ba88
|
3b5a42d7f95e4113af888cf279e9700cf7a5cafd
|
refs/heads/master
| 2020-04-08T16:40:53.887193
| 2019-11-14T19:41:54
| 2019-11-14T19:41:54
| 159,529,752
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,403
|
r
|
bacteria_new.R
|
grab_limits_geo_new <- function(BU, water_type){
# TODO: warnings
# TODO: SHELL limits
if (water_type == "marine"){
if(BU == "REC-1"){
limits <- c(NA, NA, NA, 30)
}
}
if (water_type == "fresh"){
if (BU == "REC-1"){
limits <- c(100, NA, NA, NA)
} else if (BU == "LREC-1"){
limits <- c(126, NA, NA, NA)
} else if (BU == "REC-2"){
limits <- c(NA, 2000, NA, NA)
}
}
names(limits) <- c("ecoli_WQO", "fc_WQO", "tc_WQO", "ent_WQO")
return(limits)
}
check_geolimits_new <- function(df, BU = "REC-1", water_type = "marine"){
# TODO: Check if df has appropriate columns / Use colcheck()
# TODO: Check if df has consecutive SampleDates from first row to last row / Create consecutivecheck()
limits <- grab_limits_geo_new(BU, water_type)
dt <- tibble::frame_data(
~geomean, ~geocount, ~WQO_geo, ~WQO_geo_val, ~exceed_WQO_geo,
"ecoli_geomean", "ecoli_geo_count", "ecoli_WQO_geo", limits["ecoli_WQO"], "exceed_ecoli_WQO_geo",
"fc_geomean", "fc_geo_count", "fc_WQO_geo", limits["fc_WQO"], "exceed_fc_WQO_geo",
"tc_geomean", "tc_geo_count", "tc_WQO_geo", limits["tc_WQO"], "exceed_tc_WQO_geo",
"ent_geomean", "ent_geo_count", "ent_WQO_geo", limits["ent_WQO"], "exceed_ent_WQO_geo"
)
for (i in seq_along(dt$geomean)){
df <- df %>% dplyr::mutate(!!as.name(dt$WQO_geo[[i]]) := dplyr::if_else(lubridate::wday(SampleDate, label = TRUE) == "Sun", dt$WQO_geo_val[[i]], as.double(NA))) %>%
dplyr::mutate(!!as.name(dt$exceed_WQO_geo[[i]]) :=
dplyr::if_else(!!as.name(dt$geomean[[i]]) > !!as.name(dt$WQO_geo[[i]]) & !!as.name(dt$geocount[[i]]) >= 5, TRUE, FALSE))
}
return(df)
}
check_sslimits_new <- function(df, BU = "REC-1", water_type = "marine"){
# TODO: Check if df has appropriate columns / Use colcheck()
# TODO: Check if df has consecutive SampleDates from first row to last row / Create consecutivecheck()
limits <- grab_limits_ss_new(BU, water_type)
dt <- tibble::frame_data(
~result, ~qual, ~mdl, ~rl, ~WQO_ss, ~WQO_ss_val, ~exceed_WQO_ss,
"ecoli", "ecoli_qual", "ecoli_mdl", "ecoli_rl", "ecoli_WQO_ss", limits["ecoli_WQO"], "exceed_ecoli_WQO_ss",
"fecal_coliform", "fc_qual", "fc_mdl", "fc_rl", "fc_WQO_ss", limits["fc_WQO"], "exceed_fc_WQO_ss",
"total_coliform", "tc_qual", "tc_mdl", "tc_rl", "tc_WQO_ss", limits["tc_WQO"], "exceed_tc_WQO_ss",
"enterococcus", "ent_qual", "ent_mdl", "ent_rl", "ent_WQO_ss", limits["ent_WQO"], "exceed_ent_WQO_ss"
)
for (i in seq_along(dt$result)){
df <- df %>% dplyr::mutate(!!as.name(dt$WQO_ss[[i]]) := dplyr::if_else(!is.na(!!as.name(dt$result[[i]])),
dt$WQO_ss_val[[i]],
as.double(NA)),
!!as.name(dt$exceed_WQO_ss[[i]]) := dplyr::if_else(!!as.name(dt$result[[i]]) > !!as.name(dt$WQO_ss[[i]]),
TRUE,
FALSE))
}
return(df)
}
grab_limits_ss_new <- function(BU, water_type){
# TODO: warnings
# TODO: SHELL limits
if (water_type == "marine"){
if(BU == "REC-1"){
limits <- c(NA, NA, NA, 110)
}
}
if (water_type == "fresh"){
if (BU == "REC-1"){
limits <- c(320, NA, NA, NA)
} else if (BU == "LREC-1"){
limits <- c(576, NA, NA, NA)
} else if (BU == "REC-2"){
limits <- c(NA, 4000, NA, NA)
}
}
names(limits) <- c("ecoli_WQO", "fc_WQO", "tc_WQO", "ent_WQO")
return(limits)
}
bact_check_new <- function(df, sites, BU, water_type, ...){
df <- df %>%
average_results_daily()
df <- tidy_bacteria(df)
df <- replace_nd(df)
analysis_sites <- data.frame(sites, BU, water_type, stringsAsFactors = FALSE)
names(analysis_sites) <- c("StationCode", "BU", "water_type")
analysis_sites <- analysis_sites[(analysis_sites$StationCode %in% unique(df$StationCode)), ]
out <- vector("list", length(analysis_sites$StationCode))
results <- vector("list", length(analysis_sites$StationCode))
for (i in seq_along(analysis_sites$StationCode)){
out[[i]] <- df %>%
dplyr::filter(StationCode == analysis_sites$StationCode[[i]])
out[[i]] <- expand_dates(out[[i]])
out[[i]] <- bact_geomeans(out[[i]], ...) %>%
check_geolimits_new(BU = analysis_sites$BU[[i]], water_type = analysis_sites$water_type[[i]], ...) %>%
check_sslimits_new(BU = analysis_sites$BU[[i]], water_type = analysis_sites$water_type[[i]])
out[[i]] <- out[[i]] %>% mutate("fc_to_tc" = NA,
"tc_WQO_ss_2" = NA,
"exceed_tc_WQO_ss_2" = NA)
out[[i]] <- exceed_ss(out[[i]])
out[[i]] <- order_bacteria_columns(out[[i]])
#results[[i]] <- convertWeather(out[[i]])
#results[[i]] <- results[[i]] %>% filter(Data_Row == TRUE)
#results[[i]] <- results[[i]] %>%
# group_by(StationCode, WeatherCondition)
#results[[i]] %>% summarize(exceedances = sum(exceed_day, na.rm = TRUE), n = n())
}
out
}
bact_ann_exceeds_new <- function(df, sites, BU, water_type, ...){
analysis_sites <- data.frame(sites, BU, water_type, stringsAsFactors = FALSE)
names(analysis_sites) <- c("StationCode", "BU", "water_type")
analysis_sites <- analysis_sites[(analysis_sites$StationCode %in% unique(df$StationCode)), ]
out <- bact_check_new(df, sites, BU, water_type)
results <- vector("list", length(analysis_sites$StationCode))
for (i in seq_along(analysis_sites$StationCode)){
station <- as.character(analysis_sites$StationCode[[i]])
start <- first_date(out[[i]])
end <- last_date(out[[i]])
print(i)
print(start)
print(end)
results[[i]] <- annual_exceedances(out[[i]], station = station, start_date = start, end_date = end)
}
results
}
stv_check <- function(df, sites, BU, water_type){
df <- df %>%
filter(Data_Row %in% c(TRUE)) %>%
mutate(year = year(SampleDate),
month = month(SampleDate))
by_month <- group_by(df, WeatherCondition, StationCode, year, month)
dplyr::summarize(by_month,
above_STV = sum(exceed_day, na.rm = TRUE),
samples = n(),
percent_above = above_STV/samples,
exceed_STV_WQO = ifelse(percent_above > 0.1, TRUE, FALSE))
}
|
87665107c898d9b51ed8d1067f392b9a9bfcbd32
|
1a700256e18c81352fb68dff337547d3c11d0542
|
/scripts/dataframe_to_coverage.R
|
79eb20fa4a78e22816b68098071c0e8965fd635e
|
[] |
no_license
|
endrebak/pyranges-paper
|
bc4fbc0d8e05667bf04f7216f143b5be34f1a44e
|
b006ebd18d487354ce6e115df7172f3de38f52f0
|
refs/heads/master
| 2020-03-13T13:07:25.258777
| 2019-04-04T13:10:28
| 2019-04-04T13:10:28
| 131,132,556
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 457
|
r
|
dataframe_to_coverage.R
|
source("scripts/helpers.R")
library(GenomicRanges)
library(data.table)
## chip = import()
f = snakemake@input[[1]]
gr = file_to_grange(f)
print("Starting to create Rles")
start.time <- Sys.time()
chip_list = coverage(gr)
end.time <- Sys.time()
time.taken <- end.time - start.time
time.taken <- as.numeric(time.taken, units="secs")
write(time.taken, snakemake@output[["timing"]])
capture.output(print(chip_list), file=snakemake@output[["preview"]])
|
4583dada50658c65b963280be24502f9d1a11f3b
|
3231a73caedc0e36bf9a3d65555a31c9602ede16
|
/plot2.R
|
56377dc78f5aa5ed24bc377e7fb5271689a134ed
|
[] |
no_license
|
ezracode/ExData_Plotting1
|
38ec12531839719dc292430b7fd3410c2fda4aa9
|
fd9e9d97fa950dfcd7be6a5c045fd6849da020c4
|
refs/heads/master
| 2020-03-18T07:35:45.922526
| 2018-05-29T18:25:14
| 2018-05-29T18:25:14
| 134,462,415
| 0
| 0
| null | 2018-05-22T19:00:23
| 2018-05-22T19:00:22
| null |
UTF-8
|
R
| false
| false
| 888
|
r
|
plot2.R
|
#setwd("C:/Users/esalazar/Documents/Proyectos R/Programming R/ExData_Plotting1")
library(dplyr)
rm(list = ls())
MyData <- read.csv(file="../household_power_consumption.txt", header=TRUE, sep=";", na.strings = "?")
MyData2 <- filter(MyData,
(as.Date(MyData$Date, "%d/%m/%Y") == as.Date("1/2/2007", "%d/%m/%Y") |
as.Date(MyData$Date, "%d/%m/%Y") == as.Date("2/2/2007", "%d/%m/%Y") )
)
MyData2$datetime <- strptime(paste(MyData2$Date, MyData2$Time), "%d/%m/%Y %H:%M:%S")
png(filename = "plot2.png",
width = 480, height = 480, units = "px", pointsize = 12)
plot(y = as.numeric(MyData2$Global_active_power), x = MyData2$datetime,
type = "n",
ylab = "Global Active Power (kilowatts)",
xlab = ""
)
points(y = as.numeric(MyData2$Global_active_power), x = MyData2$datetime, type = "l", col = "darkcyan")
dev.off()
print("End of the Script")
|
bdd0a3d23af6fc9a2f6ffcd62bfb96d8d5cd280b
|
e57899108b795f2f2f9411150d564128e945f391
|
/Code for Ethnicity vs. Perceived Age.R
|
6f03e20e2da7379fb8fb011e0340631ddec710b8
|
[] |
no_license
|
NayilRArana/Ethnicity-vs-PerceivedAge
|
dcb85ac852e67970955a37ab69b149a3517fcecd
|
b046d28dd071eff96eb7e83d5fe2f259a5266e0e
|
refs/heads/master
| 2020-09-08T21:36:44.965371
| 2020-05-11T15:19:57
| 2020-05-11T15:19:57
| 221,249,003
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,411
|
r
|
Code for Ethnicity vs. Perceived Age.R
|
install.packages("ggplot2")
library("ggplot2")
theme_update(plot.title = element_text(hjust = 0.5))
guesses = read.csv("2019_project_data.csv", head=T)
head(guesses)
summary(guesses[,-c(1,4)])
hist(guesses$error, xlab = "Error", ylab = "Frequency", main = "Histogram of Errors")
hist(guesses$abs_error, xlab = "Absolute Error", ylab = "Frequency", main = "Histogram of Absolute Errors")
round(tapply(X=guesses$error, INDEX=guesses$tru_age, FUN = mean), 3)
round(tapply(X=guesses$error, INDEX=guesses$tru_age, FUN = var), 3)
round(tapply(X=guesses$error, INDEX = guesses$sex, FUN = mean), 3)
tapply(X = guesses$error, INDEX = guesses$race, FUN = mean)
tapply(X = guesses$error, INDEX = guesses$race, FUN = var)
boxplot(error ~ tru_age, data = guesses, main = "Boxplot of Error by True Age", xlab = "True Age", ylab = "Error")
boxplot(error ~ sex, data = guesses, main = "Boxplot of Error by Sex", xlab = "Sex", ylab = "Error")
boxplot(error ~ race, data = guesses, main = "Boxplot of Error by Race", xlab = "Race", ylab = "Error")
#ggplot2 versions
qplot(guesses$error,
geom="histogram",
binwidth = 5,
main = "Histogram of Errors",
xlab = "Error",
ylab = "Frequency",
fill=I("blue"),
col=I("red"),
alpha=I(.2),
xlim=c(-30,20))
qplot(guesses$error,
geom="histogram",
binwidth = 2,
main = "Histogram of Absolute Errors",
xlab = "Absolute Error",
ylab = "Frequency",
fill=I("blue"),
col=I("red"),
alpha=I(.2),
xlim=c(0, 20))
ggplot(guesses, aes(x=factor(tru_age), y=error, group=tru_age)) +
geom_boxplot() +
scale_x_discrete("True Age", labels = guesses$tru_age, breaks = guesses$tru_age) +
ylab("Error") +
ggtitle("Boxplot of Error by True Age")
ggplot(guesses, aes(x=factor(sex), y=error, group=sex)) +
geom_boxplot() +
scale_x_discrete("Sex", labels = guesses$sex, breaks = guesses$sex) +
ylab("Error") +
ggtitle("Boxplot of Error by Sex")
ggplot(guesses, aes(x=factor(race), y=error, group=race)) +
geom_boxplot() +
scale_x_discrete("Race", labels = guesses$race, breaks = guesses$race) +
ylab("Error") +
ggtitle("Boxplot of Error by Race")
asians = guesses[guesses$race == 'Asian',]
whites = guesses[guesses$race == 'White',]
asians_error = asians$error
whites_error = whites$error
t.test(asians_error, whites_error, var.equal = FALSE, alternative = "less")
|
fd54c78b39fddc516be5e47c0ec87091affa0be3
|
95f70abdaa291233dfce40f26b8f4e908b776ed7
|
/Estatistica/Prova/Prova 1 parte 1.R
|
82e949e76e6609cb1095faf627448fcb921332f3
|
[] |
no_license
|
leonardomaruyama/Mestrado
|
bf6be463eafdf9b6d22f9624bf9e07bfa6f9a0c0
|
7e4925c9c429750db9f7e9f5d9f56d2a47a163aa
|
refs/heads/master
| 2020-03-28T02:46:35.878475
| 2018-09-15T18:42:17
| 2018-09-15T18:42:17
| 147,595,639
| 0
| 0
| null | null | null | null |
ISO-8859-1
|
R
| false
| false
| 907
|
r
|
Prova 1 parte 1.R
|
#Prova
#item 1
dados=read.table("exer1.txt",header=T,sep=";",dec=".")
attach(dados)
dados
names(dados)
summary(dados)
sort(Direito))
sd(Direito)
sd(Polรญtica)
sd(Estatรญstica)
moda<-function(d)
{
if ((is.vector(d) || is.matrix(d) || is.factor(d)==TRUE) &&
(is.list(d)==FALSE))
{
dd<-table(d)
valores<-which(dd==max(dd))
vmodal<-0
for(i in 1:(length(valores)))
if (i==1) vmodal<-as.numeric(names(valores[i]))
else
vmodal<-c(vmodal,as.numeric(names(valores[i])))
if (length(vmodal)==length(dd))
print("conjunto sem valor modal")
else return(vmodal)
}
else print("o parรขmetro deve ser um vetor ou uma matriz")
}
sort(Polรญtica)
moda(sort(Direito))
moda(sort(Polรญtica))
moda(sort(Estatรญstica))
boxplot(Direito)
boxplot(Polรญtica)
boxplot(Estatรญstica)
boxplot(Estatรญstica~Seรงรฃo)
prop.table(table(Seรงรฃo, Estatรญstica))
table(Inglรชs)
prop.table(table(Seรงรฃo, Inglรชs))
|
b70f818c890048ac3280cb774348ac43e3fa547e
|
33a057798ba05fd94edd8332fc7c0a8b29af068e
|
/newIndexColumn.r
|
17cdd272159cc678271dc72ff709473ace420d2b
|
[
"MIT"
] |
permissive
|
jluzuria2001/codeSnippets
|
810d333eedeb9543cea1091bc9c93787fd8c70ae
|
71b3ebb3e10236bdd907023208530558544b53ae
|
refs/heads/master
| 2021-06-23T09:49:07.070873
| 2021-06-17T16:08:35
| 2021-06-17T16:08:35
| 21,698,099
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 649
|
r
|
newIndexColumn.r
|
# CREATE A NEW COLUMN OF INDEX
# FROM 0 TO THE SIZE OF THE MATRIX MINUS 1
error1$index<-seq.int(0, nrow(error1)-1, 1)
#COMPARE THE COLUMNS
#AND CREATE A NEW COLUMN
#PUTTING A 0 IF ARE EQUAL
#PUTTING A 1 IF ARE DIFFERENT
error1$order <- ifelse(error1$V6 == error1$index,0,1)
#COUNT THE NUMBER OF ZERO IN A MATRIX
#REMEMBER THAT ZEROS ARE EQUAL
colSums(error1 == 0)
#AS A FUNCTION - SUM ALL THAT ALL ROWS WITH ZERO
library(plyr)
nonzero <- function(x) sum(x == 0)
numcolwise(nonzero)(error1)
#USING A TABLE
zeros<-error1$order
a <- table(zeros)
a
#WHICH
numero_zeros<-length(which(zeros==0))
percent_disorder<-(numero_zeros*100)/2001
|
c52260c8fb6224cc4b5597a527b86ee8725bed49
|
529197dd346db560797340e771a9ef373d960d4d
|
/man/si_physical_constants.Rd
|
d17e655a5563247ac6f3108dc3f7f2d0f1eb2093
|
[] |
no_license
|
khaors/rphysunits
|
1fc375ab6dfd6ccd154a924054a2b7d9e88232b5
|
657bd09185213361c0f24bf3f4e38763f245bdcc
|
refs/heads/master
| 2020-06-20T02:52:20.184551
| 2016-12-13T13:28:59
| 2016-12-13T13:28:59
| 74,886,235
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,413
|
rd
|
si_physical_constants.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/rphysunits.R
\docType{data}
\name{si_physical_constants}
\alias{c_avogadro}
\alias{c_bohr_magneton}
\alias{c_boltzmann}
\alias{c_e}
\alias{c_electric}
\alias{c_electron_charge}
\alias{c_electron_rest_mass}
\alias{c_elementary_charge}
\alias{c_faraday}
\alias{c_fine_structure}
\alias{c_first_radiation}
\alias{c_g_force}
\alias{c_gravity}
\alias{c_gravity_accel}
\alias{c_h_bar}
\alias{c_ice_point}
\alias{c_magnetic}
\alias{c_molar_gas}
\alias{c_nuclear_magneton}
\alias{c_planck}
\alias{c_proton_rest_mass}
\alias{c_second_radiation}
\alias{c_speed_of_light}
\alias{c_standard_molar_volume}
\alias{c_stefan_boltzmann}
\alias{c_universal_gas}
\alias{c_water_triple_point}
\alias{c_wiens_radiation}
\alias{si_physical_constants}
\title{SI_physical_constants}
\format{An object of class \code{physical_quantity} of length 2.}
\usage{
c_speed_of_light
c_magnetic
c_electric
c_planck
c_h_bar
c_avogadro
c_universal_gas
c_molar_gas
c_standard_molar_volume
c_boltzmann
c_electron_charge
c_elementary_charge
c_e
c_faraday
c_first_radiation
c_second_radiation
c_stefan_boltzmann
c_wiens_radiation
c_electron_rest_mass
c_proton_rest_mass
c_fine_structure
c_bohr_magneton
c_nuclear_magneton
c_gravity
c_gravity_accel
c_g_force
c_ice_point
c_water_triple_point
}
\description{
Physical constants
}
\keyword{datasets}
|
2fb3c5d3a35c6461544ec249df91e36519b965d1
|
11e9a640ad60972f0a1ff2fb8509ec998059ccb0
|
/R/LoadActivitiesLong.R
|
bf14a163b3aa2209e4f23a0834b8536bcdc67e1a
|
[
"MIT"
] |
permissive
|
jakeyeung/TissueCiradianAnalysis
|
f53f6a65e1e5489e6ee9c465a612c1cce108d256
|
6c29a33820c8c0ab6dabbd992cc2412b199fc7af
|
refs/heads/master
| 2020-09-21T05:58:35.578267
| 2020-08-07T16:35:13
| 2020-08-07T16:35:13
| 224,702,276
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,576
|
r
|
LoadActivitiesLong.R
|
LoadActivitiesLong <- function(indir, act.file="activities.all", se.file="standarderrors.all", shorten.motif.name=FALSE, make.cnames = TRUE){
source("~/projects/tissue-specificity/scripts/functions/ActivitiesMergedFunctions.R")
source("~/projects/tissue-specificity/scripts/functions/GetTissueTimes.R")
source("~/projects/tissue-specificity/scripts/functions/RemoveP2Name.R")
merged.act <- read.table(file.path(indir, act.file))
merged.se <- read.table(file.path(indir, se.file))
if (make.cnames){
# Rename colnames ---------------------------------------------------------
colnames(merged.act) <- GetMergedColnames(colnames(merged.act))
colnames(merged.se) <- GetMergedColnames(colnames(merged.se))
# Create long -------------------------------------------------------------
tissues <- GetTissues.merged(colnames(merged.act))
times <- GetTimes.merged(colnames(merged.act))
experiments <- GetExperiments.merged(colnames(merged.act))
} else {
# put cnames in tissues and worry about it later
tissues <- colnames(merged.act)
times <- NA
experiments <- NA
}
if (shorten.motif.name){
rownames(merged.act) <- sapply(rownames(merged.act), RemoveP2Name)
}
act.long <- data.frame(gene = rep(rownames(merged.act), ncol(merged.act)),
tissue = rep(tissues, each = nrow(merged.act)),
time = as.numeric(rep(times, each = nrow(merged.act))),
exprs = as.numeric(unlist(merged.act)),
se = as.numeric(unlist(merged.se)),
experiment = rep(experiments, each = nrow(merged.act)))
return(act.long)
}
MakeCnamesLivKidWTKO <- function(act.s){
act.s$sampname <- act.s$tissue
act.s$tissue <- as.character(sapply(as.character(act.s$sampname), function(s) strsplit(s, "_")[[1]][[1]]))
act.s$time <- as.numeric(sapply(as.character(act.s$sampname), function(s) strsplit(s, "_")[[1]][[2]]))
act.s$geno <- as.character(sapply(as.character(act.s$sampname), function(s) strsplit(s, "_")[[1]][c(-1, -2)]))
act.s$tissue <- paste(act.s$tissue, act.s$geno, sep = "_")
act.s$tissue <- factor(act.s$tissue, levels = c("Liver_SV129", "Liver_BmalKO", "Kidney_SV129", "Kidney_BmalKO"))
act.s$experiment <- "rnaseq"
act.s$sampname <- NULL
return(act.s)
}
GetTimesTissuesGenoKL <- function(cnames){
# Get TImes Tissues and Genotypes for Kidney and Liver
times <- lapply(cnames, function(cname){
# either BmalKO or SV129
if (grepl("BmalKO", cname)){
geno <- "BmalKO"
} else if (grepl("SV129", cname)){
geno <- "SV129"
} else {
warning("Neither BmalKO or SV129")
}
# Cnames can be Kidney_BmalKO12 or Kidney_BmalKO_12, I dont know why??
n.divs <- length(strsplit(cname, "_")[[1]])
if (n.divs == 2){
time <- as.numeric(strsplit(cname, geno)[[1]][[2]])
tissue <- strsplit(cname, paste0("_", geno))[[1]][[1]]
} else if (n.divs == 3){
tissue <- strsplit(cname, "_")[[1]][[1]]
time <- strsplit(cname, "_")[[1]][[3]]
} else {
print(cname)
warning("N divs should be 2 or 3")
}
return(list(time = time, geno = geno, tissue = tissue))
})
}
LoadActivitiesLongKidneyLiver <- function(indir, act.file="activities.all", se.file="standarderrors.all", collapse.geno.tissue=TRUE, shorten.motif.name=TRUE){
# handle for Kidney and Liver SV129 and BmalKO
source("~/projects/tissue-specificity/scripts/functions/ActivitiesMergedFunctions.R")
source("~/projects/tissue-specificity/scripts/functions/GetTissueTimes.R")
source("~/projects/tissue-specificity/scripts/functions/RemoveP2Name.R")
source('scripts/functions/LiverKidneyFunctions.R')
merged.act <- read.table(file.path(indir, act.file))
merged.se <- read.table(file.path(indir, se.file))
if (shorten.motif.name){
rownames(merged.act) <- sapply(rownames(merged.act), RemoveP2Name)
}
time.geno.tissue <- GetTimesTissuesGenoKL(colnames(merged.act))
tissues <- sapply(time.geno.tissue, function(ll) ll[["tissue"]])
genos <- sapply(time.geno.tissue, function(ll) ll[["geno"]])
times <- sapply(time.geno.tissue, function(ll) ll[["time"]])
act.long <- data.frame(gene = rep(rownames(merged.act), ncol(merged.act)),
geno = rep(genos, each = nrow(merged.act)),
tissue = rep(tissues, each = nrow(merged.act)),
time = as.numeric(rep(times, each = nrow(merged.act))),
exprs = as.numeric(unlist(merged.act)),
experiment = "rnaseq",
se = as.numeric(unlist(merged.se)))
if (collapse.geno.tissue){
act.long <- CollapseTissueGeno(act.long)
}
return(act.long)
}
LoadActivitiesLongDhs <- function(indir, act.file, se.file){
# expect columns to be just tissues (no time).
source("~/projects/tissue-specificity/scripts/functions/ActivitiesMergedFunctions.R")
source("~/projects/tissue-specificity/scripts/functions/GetTissueTimes.R")
merged.act <- read.table(file.path(indir, act.file))
merged.se <- read.table(file.path(indir, se.file))
# Create long -------------------------------------------------------------
tissues <- colnames(merged.act)
act.long <- data.frame(gene = rep(rownames(merged.act), ncol(merged.act)),
tissue = rep(tissues, each = nrow(merged.act)),
exprs = as.numeric(unlist(merged.act)),
se = as.numeric(unlist(merged.se)))
return(act.long)
}
|
e2ba9f35916a19d323844a48a9587b85e3c1d7ee
|
1a02d9cc7cc28ae04bcca89cd3607e29419ab707
|
/R/CxF.R
|
0b258daa701e7e5f5052af97d4f523e3aedea439
|
[] |
no_license
|
franciscoxaxo/sankeydiagram
|
3b78068dc10ec0ab5031f4f4b26fc34837997576
|
e0fe4a67a6600d9f91fb35865a267d0ac9f86a63
|
refs/heads/main
| 2023-08-02T12:13:22.716039
| 2021-10-08T01:15:07
| 2021-10-08T01:15:07
| 408,893,868
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,047
|
r
|
CxF.R
|
#' Title CxF Columnas a Filas
#' Sort a dataframe that contains "responses in multiple columns" in a single column
#' @param data dataframe
#'
#' @return a CSV file
#' @export
#' @import utils
#' @examples
CxF <- function(data){
i..ActiveScreener.Id <- 1; Time.of.Screening <- 1 ; Title <- 1 ; Authors <- 1
Question <- 1; List.of.Reviewers <- 1; Answers <- 1
df <- data.frame(i..ActiveScreener.Id , Time.of.Screening, Title, Authors, Question, List.of.Reviewers, Answers)
rm(i..ActiveScreener.Id ); rm(Time.of.Screening); rm(Title); rm(Authors); rm(Question); rm(List.of.Reviewers);
rm(Answers)
for(j in 1:nrow(data)){
for(i in 7:ncol(data)){
if(!is.na(data[j, i])){
aux <- c(data[j, 1:6], data[[j, i]])
names(aux) <- c("i..ActiveScreener.Id", "Time.of.Screening", "Title", "Authors",
"Question", "List.of.Reviewers","Answers")
df <- rbind(df, aux)
}
}
}
df = df[-1, ]
write.csv(df, "data.csv", sep = ";", fileEncoding = "UTF-8")
}
|
5c0593ca23c54f1dcabf0c74be1dbc67cf831ecb
|
90bb1dabe91ac66076eefee72e59f8bc75d3315d
|
/man/generate_sub_Gaussian_fn.Rd
|
fb34406027ba80c3fc4f904c717904ae29890027
|
[
"MIT"
] |
permissive
|
shinjaehyeok/SGLRT_paper
|
31b1dfaac5fdae07c8a106ed86802559b4ac3808
|
cbca2c5d9cfc6a2a5fbc8af6a3183fa133b9c377
|
refs/heads/master
| 2022-12-30T23:09:42.248401
| 2020-10-24T07:21:30
| 2020-10-24T07:21:30
| 299,136,203
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 750
|
rd
|
generate_sub_Gaussian_fn.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/breg_fn_class.R
\name{generate_sub_Gaussian_fn}
\alias{generate_sub_Gaussian_fn}
\title{Pre-defined psi^* and Bregman divergence functions for sub-Gaussian family.}
\usage{
generate_sub_Gaussian_fn(sig = 1, is_add = TRUE)
}
\arguments{
\item{sig}{The sigma parameter of the sub-Gaussian family (default = 1).}
\item{is_add}{If \code{is_add} is \code{TRUE} then return psi^* functions for \code{SGRL_CI_additive}. Otherwise, return Bregman divergence functions for \code{SGLR_CI}.}
}
\value{
A list of pre-defined psi^* and Bregman divergence functions for sub-Gaussian family.
}
\description{
Pre-defined psi^* and Bregman divergence functions for sub-Gaussian family.
}
|
d9b7f379456ec1b1f03cedef9238d76a4dd3b06a
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/brt/examples/tpvaltreat.Rd.R
|
b2529142b5d10086b0a9b850a5c497f67a9795ad
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 521
|
r
|
tpvaltreat.Rd.R
|
library(brt)
### Name: tpvaltreat
### Title: Hypothesis testing using the Student t Distribution with H0:
### abs(mu) <= delta
### Aliases: tpvaltreat
### Keywords: htest
### ** Examples
x=seq(from=-30, to=30, length.out=100)
data=do.call(
rbind
, lapply(
seq_len(10)
, function(delta)
rbind(
data.frame(x, pval=tpvaltreat(x, delta=delta, se=1, df=3), delta=delta)
)
)
)
ggplot2::qplot(x, pval, data=data, color=as.factor(delta), linetype=as.factor(delta), geom='line')
|
0855f3ba70536866bac364e3ddf9afe5f98f7e1b
|
9864c9557984a3b58fcb686176620771e2545f00
|
/scrape_alexa.R
|
8665e89bbe056b68ecdad33c898599bdc555c145
|
[] |
no_license
|
Toniiiio/ToLearn
|
31ff9b9dca990d0d30b0962e5e14c5194f9238a5
|
51c4348fef0ac8220bac4e17ca1d3be5ad92e4d0
|
refs/heads/main
| 2023-02-27T00:05:41.370765
| 2021-02-05T21:31:09
| 2021-02-05T21:31:09
| 325,248,399
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 992
|
r
|
scrape_alexa.R
|
url <- "https://www.google.de/async/reviewSort?vet=12ahUKEwjhoKa-ytPuAhW1mFwKHa34DTAQxyx6BAgBEC0..i&ved=2ahUKEwjhoKa-ytPuAhW1mFwKHa34DTAQjit6BQgBEI0B&yv=3&async=feature_id:0x47a84e2206708be7%3A0xd1ea1b76ebd1b17,review_source:All%20reviews,sort_by:qualityScore,start_index:20,is_owner:false,filter_text:,associated_topic:,next_page_token:CgIICg,_pms:s,_fmt:pc"
url2 <- "https://www.google.de/async/reviewSort?vet=12ahUKEwjhoKa-ytPuAhW1mFwKHa34DTAQxyx6BAgBEC0..i&ved=2ahUKEwjhoKa-ytPuAhW1mFwKHa34DTAQjit6BQgBEI0B&yv=3&async=feature_id:0x47a84e2206708be7%3A0xd1ea1b76ebd1b17,review_source:All%20reviews,sort_by:qualityScore,start_index:20,is_owner:false,filter_text:,associated_topic:,next_page_token:CgIIFA,_pms:s,_fmt:pc"
start_index:20
next_page_token:CgIIFA
pattern fornext_page_token
pattern
CgIICg DE
CgIIFA G
CgIIHg IJ
CgIIKA L
CgIIMg NO
CgIIPA Q
Rg ST
UA V
Wg Xy
ZA a
Bg
doc <- url %>% httr::GET() %>% content
rr <- doc %>% SteveAI::showHtmlPage()
|
08d682da56974eb4510467306af6ba628f7241be
|
6b231e2bfd52f7d4e7736f1794a773eea985165f
|
/ira-tweets-2018/Animation.R
|
65a8a697706d3fb3daf7d365c20bb28b531a3941
|
[
"Apache-2.0"
] |
permissive
|
profibadan/social-media-analyses
|
75fa9d9a46955a91605b08862dcd3d753aab8420
|
b979a8a924862aef2bb9b310498475c16d8d7314
|
refs/heads/master
| 2021-09-20T20:02:29.832859
| 2018-08-15T05:04:36
| 2018-08-15T05:04:36
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,292
|
r
|
Animation.R
|
# Script to produce an animated visualization of tweets vs followers and change over time
library(tidyverse)
library(lubridate)
library(scales)
library(ggthemes)
library(gganimate)
# Clone 538 GH repo to this location and read in the files
iraTweets <- map_dfr(list.files('~/git-repos/fivethirtyeight/russian-troll-tweets/', full.names = TRUE, pattern = '*.csv'), function(f) {
read_csv(f, col_types=cols(.default = col_character()))
}) %>% mutate(TweetID=row_number(), publish_date=as_date(mdy_hm(publish_date)), followers=as.integer(followers)) %>%
filter(publish_date >= '2014-10-01')
accounts <- iraTweets %>% group_by(author) %>% mutate(followers=as.integer(followers)) %>% summarize(tweets=n(), followers=max(followers)) %>%
filter(followers > 0) %>%
inner_join(iraTweets %>% select(author, account_category) %>% distinct(), by='author') %>%
mutate(account_category_high=case_when(
!(account_category %in% c('RightTroll', 'LeftTroll', 'NewsFeed', 'Fearmonger')) ~ 'Commercial/HashtagGamer/Unknown',
TRUE ~ account_category
))
# shared characteristics between static and animated plot
createBasePlot <- function(accountsDf) {
accountsDf %>% filter(account_category != 'NonEnglish') %>%
ggplot() +
geom_point(aes(x=followers, y=tweets, color=account_category_high)) +
scale_x_continuous(trans='log', breaks=c(1, 10, 150, 3000, 60000), labels=comma) +
scale_y_continuous(trans='log', breaks=c(1, 10, 150, 3000, 60000), labels=comma) +
scale_color_brewer(type = 'qual', palette = 'Dark2') +
theme_economist_white() +
theme(panel.grid.minor = element_blank(), legend.text = element_text(size=12)) +
labs(x='Maximum Cumulative Followers of the Account (Log Scale)', y='Total Tweets by the Account (Log Scale)',
title='Volume of Tweets and Number of Followers of Russian IRA Twitter Accounts',
subtitle='Tweets published on or after October 1, 2014',
caption='Source: FiveThirtyEight/Clemson University Dataset of Russian IRA Tweets\nNote: Excludes "NonEnglish" category accounts and accounts with no followers',
color='Account Category')
}
# static plot (no fun!)
createBasePlot(accounts) +
geom_vline(xintercept=60, color='blue', alpha=.5, linetype=2) +
geom_vline(xintercept=1000, color='blue', alpha=.5, linetype=2) +
geom_text(aes(x=65, y=70000), label='60 followers', color='blue', alpha=.5, hjust='left', size=3) +
geom_text(aes(x=1025, y=70000), label='1000 followers', color='blue', alpha=.5, hjust='left', size=3)
# need to create a grid of dates so the animation is smooth and continuous
authors <- unique(iraTweets$author)
dates <- seq(from=min(iraTweets$publish_date)-1, to=max(iraTweets$publish_date), by='days')
accountDaysGrid <- tibble(
author=rep(authors, each=length(dates)),
publish_date=rep(dates, length(authors))
)
accountDays <- iraTweets %>% group_by(author, publish_date) %>%
summarize(tweets=n(), followers=max(followers)) %>%
group_by(author) %>%
mutate(idx=row_number(), priorIdx=idx-1) %>% ungroup()
accountDays <- accountDays %>%
left_join(accountDays %>% select(nextIdx=idx, -priorIdx, priorFollowers=followers, author), by=c('author', 'priorIdx'='nextIdx')) %>%
mutate(followerDelta=case_when(idx==1 ~ followers, TRUE ~ followers-priorFollowers)) %>%
select(author, publish_date, followerDelta, tweets) %>%
right_join(accountDaysGrid, by=c('author', 'publish_date')) %>%
mutate(followers=case_when(is.na(followerDelta) ~ 0, TRUE ~ followerDelta), tweets=case_when(is.na(tweets) ~ 0L, TRUE ~ tweets)) %>%
arrange(author, publish_date) %>%
group_by(author) %>%
mutate_at(vars(tweets, followers), cumsum) %>%
filter(followers > 0) %>%
inner_join(iraTweets %>% select(author, account_category) %>% distinct(), by='author') %>%
mutate(account_category_high=case_when(
!(account_category %in% c('RightTroll', 'LeftTroll', 'NewsFeed', 'Fearmonger')) ~ 'Commercial/HashtagGamer/Unknown',
TRUE ~ account_category
)) %>% select(-followerDelta)
# animated plot!
animate(createBasePlot(accountDays) + transition_time(publish_date) +
labs(subtitle='Cumulative tweets published vs. followers as of {frame_time}', x='Followers of the Account (Log Scale)'),
nframes = 120, length = 20, height=700, width=800)
|
925720e59d8efef54d2476b9210f6e14cc99e65e
|
60bcd2f1649970ca07f7f076a25959f9ff3cd1e7
|
/R basics _2.r
|
15a33df3b27e3a3f54a8a7661a06bae151b9ebf0
|
[] |
no_license
|
tarunlahrod/R-basics
|
55a1a5760fdf18d80a2633d75ea7329308daba72
|
02dacda8f8831ff8df4004bcac49fc3a0abb6f44
|
refs/heads/master
| 2020-05-04T21:43:18.550380
| 2019-04-05T13:25:00
| 2019-04-05T13:25:00
| 179,486,079
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,685
|
r
|
R basics _2.r
|
# R_day2
# R lecture 2
m <- matrix(c(3:14), nrow = 4) # assigns data values column-wise
print(m)
m <- matrix(c(3:14), nrow = 4, byrow = TRUE) # assigns data values row-wise
print(m)
r = c("row1", "row2", "row3", "row4") # row vector
c = c("col1", "col2", "col3") # column vector
m <- matrix(c(3:14), nrow = 4, byrow = TRUE, dimnames = list(r,c))
print(m)
print(m[1,3])
print(m[,1]) # prints the complete 1st column
print(m[1,]) #prints the complete 1st row
# Matrix computation
m1 <- matrix(c(3,7,-5,8,2,9), nrow = 2)
print(m1)
m2 <- matrix(c(8,0,2,4,2,7), nrow = 2)
print(m2)
# sum
t <- m1 + m2
print(t)
# differnce
t <- m1 - m2
print(t)
# multiplication (element wise)
t <- m1 * m2
print(t)
# division
t <- m1 / m2
print(t)
# Arrays
v1 <- c(1,2,3)
v2 <- c(9,8,7,6,5,4)
a <- array(c(v1, v2), dim = c(3,3,2))
print(a)
# assigning names to rows and colums of the two arrays
r <- c("row1", "row2", "row3")
c <- c("col1", "col2", "col3")
n <- c("Matrix1", "Matrix2")
a <- array(c(v1,v2), dim = c(3,3,2), dimnames = list(r, c, n))
print(a)
# pick a particular row of a column from these matrices
print(a[1,2,2])
print(a[1, , 2])
# Array to matrix conversion
x <- a[,,1]
print(x)
y <- a[,,2]
# Apply function - for array calculation
r <- apply(a, c(2), sum)
print(r)
# vector and factor
d <- c("East", "West", "East", "North", "East", "West")
print(d)
print(is.factor(d))
fd <- factor(d)
print(is.factor(fd))
# Data frame
height <- c(132, 151, 162, 139, 166, 147, 122)
weight <- c(48, 49, 66, 53, 67, 52, 40)
gender <- c("male", "male", "female", "female", "male", "female", "male")
t <- data.frame(height, weight)
t <- data.frame(height, weight, gender)
print(t)
print(t$gender) # whenever we make a dataframe, the vector is implicitly converted to factor
print(is.factor(t$gender))
print(is.factor(t$height)) # numeric values are never converted to factor
print(is.factor(t$weight))
# another data frame.
emp.data <- data.frame(
emp_id = c(1:5),
emp_name = c("Chhota Bheem", "Raju", "Chhutki", "Kaalia", "Jaggu Bandar"),
salary = c(623.3, 515.2, 611.0, 729.0, 843.25),
start_date = as.Date(c("2012-01-01", "2013-09-23", "2014-11-15", "2014-05-11", "2015-03-27")),
stringsAsFactors = FALSE
)
print(emp.data)
print(summary(emp.data))
r <- data.frame(emp.data$emp_name, emp.data$salary)
print(r)
# print(emp.data[1:2],) #buggy
print(emp.data[c(3,5), c(2, 4)])
emp.data$dept <- c("IT","Operations", "IT", "HR", "Finance")
print(emp.data)
# result <- #some code left out
# To combine two databases
?rbind
.libPaths()
search()
# to install package
# install.packages("XML") # uncomment to install XML package
# get working directory
getwd()
#set working directory
setwd("/home/tarun/Desktop")
getwd()
d = read.csv("data.csv")
print(d)
print(is.data.frame(d))
# operations on data.csv
print(ncol(d))
print(nrow(d))
sal <- max(d$salary)
print(sal)
# print all details of a particular
e <- subset(d, salary == max(salary))
print(e)
# to extract the data of IT dept only
print(subset(d, dept == "IT"))
# find those who are from IT dept and has a pay higher than 600
print(subset(d, dept == "IT" & salary > 600))
# find those having joining date(start) before 2014-01-01
print(subset(d, as.Date(start) < as.Date("01/01/2014")))
print(subset(d, dept == "IT"))
# Writing into a csv file
x <- subset(d, dept == "IT")
write.csv(x, "output.csv") # this will add a new column in beginning for index, to remove it use this...
write.csv(x, "output without index.csv", row.names = FALSE)
# Installing and importing packages
install.packages("xlsx")
library("xlsx")
install.packages("rjson")
library("rjson")
install.packages("RMySQL")
library("RMySQL")
|
3efdb56f2bea82f4d79f0ca2dd643d432ff8bd40
|
034f0428c5fbc4c346c1158d320cc87daaee6030
|
/plot1.R
|
73c1891d4ac46870e6b7c1b4a06aff8ee2295bf3
|
[] |
no_license
|
enrique1790/Exploratory-Data-Analysis-Course-Project-2
|
ba3f90a7b162688a9494295b5c6cffd31e4de2ff
|
559ae9aea4406905c2ca11bd50325d308a9538e0
|
refs/heads/master
| 2020-03-13T18:28:44.959352
| 2018-04-28T18:39:24
| 2018-04-28T18:39:24
| 131,236,252
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,384
|
r
|
plot1.R
|
############################
#Unzipping and Loading Files
############################
library("data.table")
path <- getwd()
download.file(url = "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2FNEI_data.zip"
, destfile = paste(path, "dataFiles.zip", sep = "/"))
unzip(zipfile = "dataFiles.zip")
if (!exists("NEI")) {
# print("Loading NEI Data, please wait.")
NEI <- readRDS("summarySCC_PM25.rds")
}
if (!exists("SCC")) {
# print("Loading SCC Data.")
SCC <- readRDS("Source_Classification_Code.rds")
}
#######################################################################################
# 1. Have total emissions from PM2.5 decreased in the United States from 1999 to 2008?
#Using the base plotting system, make a plot showing the total PM2.5 emission from
#all sources for each of the years 1999, 2002, 2005, and 2008.
#######################################################################################
aggregatedTotalByYear <- aggregate(Emissions ~ year, NEI, sum)
barplot(height=aggregatedTotalByYear$Emissions, names.arg=aggregatedTotalByYear$year,
xlab="years", ylab=expression('total PM'[2.5]*' emission'),
main=expression('Total PM'[2.5]*' emissions at various years'))
dev.copy(png, file="plot1.png", width=480, height=480)
dev.off()
# total emissions from PM2.5 decreased in the United States from 1999 to 2008.
|
4cfd3221689ac6c58fcf5114be9655c1fd459b06
|
a7f4e760b3d3464b4886fbe1a04d734ea64755da
|
/example/R/fcn_documentation.R
|
eff1829ddd88399fd605de95e7b04c2d8eacdf1e
|
[] |
no_license
|
ChrisZasa/MTXQC_documentation
|
88d96b52ceb1e9ecc595b32d3b9b30060b1ae803
|
699d59aa6c510910519e54cd10034c4b9ea4c476
|
refs/heads/master
| 2020-04-21T12:27:09.870629
| 2019-07-21T20:47:26
| 2019-07-21T20:47:26
| 154,499,085
| 0
| 4
| null | 2018-10-24T13:54:23
| 2018-10-24T12:41:13
|
TeX
|
UTF-8
|
R
| false
| false
| 691
|
r
|
fcn_documentation.R
|
library(kableExtra)
library(dplyr)
#' Import csv-files for appendix
#'
#'
#'
appendix_print <- function(path_def, top_n = NULL, ...) {
temp1 = read.csv(path_def, header = TRUE)
if (ncol(temp1) == 1) {
temp1 = read.csv(path_def, header = TRUE, sep = ";")
}
if (!is.null(top_n)) {
temp_return = temp1[1:top_n,]
#str(temp1)
# temp1 %>%
# kable(escape = TRUE, booktabs = TRUE ) %>%
# kable_styling(c("striped", "condensed"),
# latex_options = "striped",
# full_width = TRUE)
} else {
# str(temp1)
temp_return = temp1
}
return(temp_return)
}
options(kableExtra.html.bsTable = T)
|
8fd84e8b128ec57a8952c16a72bed43a047aa77b
|
9dcc1b98baf0d4df40ef9470330993660d725bca
|
/man/printed_taxonomy.Rd
|
393763692217c4ae53b21311213f44bbe1cf0638
|
[
"MIT"
] |
permissive
|
ropensci/taxa
|
b1aa00a0d8256916cdccf5b6a8f39e96e6d5ea9c
|
ed9b38ca95b6dd78ef6e855a1bb8f4a25c14b8fd
|
refs/heads/master
| 2022-04-30T23:28:44.735975
| 2022-04-12T05:10:10
| 2022-04-12T05:10:10
| 53,763,679
| 40
| 9
|
NOASSERTION
| 2021-07-08T18:11:32
| 2016-03-13T02:27:40
|
HTML
|
UTF-8
|
R
| false
| true
| 368
|
rd
|
printed_taxonomy.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/taxonomy.R
\name{printed_taxonomy}
\alias{printed_taxonomy}
\title{Prepare taxonomy for printing}
\usage{
printed_taxonomy(x, color = FALSE)
}
\arguments{
\item{color}{Use color?}
}
\value{
character
}
\description{
Prepare taxonomy for printing. Makes color optional.
}
\keyword{internal}
|
fd67e774bc1a3684079e90143a377a176eb5777e
|
b7dbc8fa280edb6215a6260e1401e0f83b9954b0
|
/OpenDataGroup/Macro/man/item_dat.Rd
|
2c4e095730caccc4207efd979d9dbd9d5bd1ae50
|
[] |
no_license
|
cwcomiskey/Misc
|
071c290dad38e2c2e6a5523d366ea9602c4c4e44
|
1fad457c3a93a5429a96dede88ee8b70ea916132
|
refs/heads/master
| 2021-05-14T18:10:35.612035
| 2020-02-17T15:09:51
| 2020-02-17T15:09:51
| 116,065,072
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 487
|
rd
|
item_dat.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{item_dat}
\alias{item_dat}
\title{Item Data}
\format{A data frame with 399 rows and three columns:
\describe{
\item{item_code}{BLS code for the item}
\item{item_name}
\item{display_level}{Level in categorical hierarchy}
}}
\source{
\url{https://download.bls.gov/pub/time.series/cu/cu.item}
}
\usage{
item_dat
}
\description{
Item code, name, and display level
}
\keyword{datasets}
|
fc123679bbd95aa475a7d858c43d3bed5b821db3
|
39004319c6604b419fb4402bd022213529ca93be
|
/run_analysis.R
|
51628a36e3b972fa6b4045b54690731ff70e3295
|
[] |
no_license
|
thornvol/GettingCleaningData
|
6b13a22cf35553e0a52e94864295cf23f0930d51
|
9207ec29be55c012281cc0f7302e82e448685d8a
|
refs/heads/master
| 2020-05-18T03:44:13.626799
| 2015-02-23T03:18:45
| 2015-02-23T03:18:45
| 31,186,122
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,367
|
r
|
run_analysis.R
|
require(data.table)
require(reshape2)
#######################################################################################################################
# Step 1: Merge training and test sets to create one data set
# Load Test Data
# Before running statments below: set working directory to download samsung data directory
x_test <- read.table("UCI HAR Dataset\\test\\X_test.txt")
y_test <- read.table("UCI HAR Dataset\\test\\y_test.txt")
subject_test <- read.table("UCI HAR Dataset\\test\\subject_test.txt")
# Load Training Data
x_train <- read.table("UCI HAR Dataset\\train\\X_train.txt")
y_train <- read.table("UCI HAR Dataset\\train\\y_train.txt")
subject_train <- read.table("UCI HAR Dataset\\train\\subject_train.txt")
# Combine the columns from x_test and y_test to make one test data set
test <- cbind(x_test, subject_test, y_test)
# Combine the columns from x_train and y_train to make one train data set
train <- cbind(x_train, subject_train, y_train)
# Load features.txt for naming variables in combined test + train data set
features <- read.table("UCI HAR Dataset\\features.txt")
# Load Activity lables
activitylabels <- read.table("UCI HAR Dataset\\activity_labels.txt")
# Combing test + train data set into one data set
combined <- rbind(test, train)
#######################################################################################################################
# Step 2: Extracts only the measurements on the mean and standard deviation for each measurement.
# create column vector of with column indicies representing mean variables
meancolvector <- features[grep("mean", features$V2, ignore.case=T),]$V1
# create column vector of with column indicies representing std deviation variables
stdcolvector <- features[grep("std", features$V2, ignore.case=T),]$V1
## combine mean and std column indicies vector into one column index vector
## with the last 2 indicies (562,563) added for the subject and activity columns
## 562 = Subject | 563 = Activity
columnIndicies <- sort(c(meancolvector, stdcolvector, 562, 563))
# Create data set with mean and std deviation columns extracted
extracted <- combined[,columnIndicies]
#######################################################################################################################
# Step 3: Uses descriptive activity names to name the activities in the data set
# Rename activity label columns
colnames(activitylabels) <- c("ActivityValue", "ActivityName")
# Use merge to match activitylabels values to activity values in extracted measurements data set
# This will add a descriptive activity name to the extracted data set
extractedActivityName = merge(extracted, activitylabels, by.x="V1.2", by.y="ActivityValue",all=T)
#######################################################################################################################
# Step 4: Appropriately labels the data set with descriptive variable names.
# Create data set for looping columns to rename from features data set
rows <- features[sort(c(meancolvector, stdcolvector)),]
for(i in seq_len(nrow(rows))){
# newname = new column name to be set in extracted data set
newname <- make.names(as.character(rows[i,2]))
# Set column name in data set to newname retrieved from features data set
colnames(extractedActivityName)[i+1] <- newname
}
# Rename column with subject value to "Subject"
colnames(extractedActivityName)[colnames(extractedActivityName)=="V1.1"] <- "Subject"
colnames(extractedActivityName)[colnames(extractedActivityName)=="V1.2"] <- "ActivityValue"
#######################################################################################################################
# Step 5: From the data set in step 4, creates a second, independent tidy data set with the average of each variable for each activity and each subject.
# Column names for measurments into vector for melting
columnnames <- colnames(extractedActivityName)[1:nrow(rows)+1]
# Use 'melt' to create tall, skinny data set for calculating mean for each variable by Subject and Activity
melted <- melt(extractedActivityName, id=c("Subject","ActivityName"),measure.vars=columnnames)
# Calculate mean for each variable by Subject and Activity
tidymean <- dcast(melted, Subject + ActivityName ~ variable, mean)
tidymean
## Tidy Data Set output
# write.table(tidymean, "TidyDataSet.txt", row.names=F)
|
c6b8fdfc79bab2a0f121d2950bd818dfa14bc609
|
be84451505b6c2a19d7975577acb4e15e91c647a
|
/activity.R
|
afe9c07e188f3af33bdcd30852a3bb86237141e5
|
[] |
no_license
|
Iryna-Garbuz/RepData_PeerAssessment1
|
3bcdf408ef1ae6c043e9daec69b6169e9b9bcbfa
|
b5bac78795f230b74045a8bcc464c04c27165909
|
refs/heads/master
| 2020-12-01T01:05:50.056819
| 2014-07-16T04:51:06
| 2014-07-16T04:51:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,878
|
r
|
activity.R
|
library (ggplot2)
## Loading and preprocessing the data
getwd()
sourcedata <- read.csv(file="activity.csv", sep=",", header=TRUE)
sourcedata$steps <- as.numeric(sourcedata$steps)
sourcedata$date <- as.Date(sourcedata$date)
## What is mean total number of steps taken per day?
## 1. Make a histogram of the total number of steps taken each day
meanStepData <- aggregate(steps~date, data=sourcedata, FUN=sum, na.rm=TRUE)
hist(meanStepData$steps, main="Total Steps Taken per Day", xlab="Steps")
##2. Calculate and report the mean and median total number of steps taken per day
mean(meanStepData$steps)
median(meanStepData$steps)
summary(meanStepData$steps, na.rm=TRUE, digits = 10)
## What is the average daily activity pattern?
## 1. Make a time series plot (i.e. type = "l") of the 5-minute interval (x-axis)
## and the average number of steps taken, averaged across all days (y-axis)
avgStepsPerInterval <- aggregate(steps~interval, data=sourcedata, FUN=mean, na.rm=TRUE)
plot(avgStepsPerInterval$interval, avgStepsPerInterval$steps, type="l",
main="The average number of steps \n taken in 5-minutes interval across all days",
xlab="Interval", ylab="Average number of steps")
## 2. Which 5-minute interval, on average across all the days in the dataset, contains the maximum number of steps?
which.max(avgStepsPerInterval$steps)
avgStepsPerInterval[104,]
## Imputing missing values
## 1. Calculate and report the total number of missing values
## in the dataset (i.e. the total number of rows with NAs)
countNA <- sum(is.na(sourcedata$steps))
percentNA <- countNA/nrow(sourcedata)*100
## 2. Devise a strategy for filling in all of the missing values in the dataset.
## The strategy does not need to be sophisticated. For example, you could use
## the mean/median for that day, or the mean for that 5-minute interval, etc.
## 3. Create a new dataset that is equal to the original dataset but with the missing data filled in.
sourcedataNAmean <- sourcedata
sourcedataNAmean$steps[is.na(sourcedataNAmean$steps)] <- mean(sourcedata$steps, na.rm=TRUE)
## 4. Make a histogram of the total number of steps taken each day and
## Calculate and report the mean and median total number of steps
## taken per day. Do these values differ from the estimates from the first part of the assignment?
## What is the impact of imputing missing data on the estimates of the total daily number of steps?
meanStepDataNA <- aggregate(steps~date, data=sourcedataNAmean, FUN=sum, na.rm=TRUE)
hist(meanStepDataNA$steps, main="Total Steps Taken per Day where we replaced all NA by mean", xlab="Steps")
mean(meanStepDataNA$steps)
median(meanStepDataNA$steps)
summary(meanStepDataNA$steps, na.rm=TRUE, digits=10)
## Are there differences in activity patterns between weekdays and weekends?
## 1. Create a new factor variable in the dataset with two levels ??? ???weekday???
## and ???weekend??? indicating whether a given date is a weekday or weekend day.
sourcedataNAmean$weekday <- factor(weekdays(sourcedataNAmean$date)=="Sunday" |
weekdays(sourcedataNAmean$date)=="Saturday",
labels=c("weekday", "weekend"))
## 2. Make a panel plot containing a time series plot (i.e. type = "l") of
## the 5-minute interval (x-axis) and the average number of steps taken,
## averaged across all weekday days or weekend days (y-axis).
library(ggplot2)
avgStepsPerWeekDay <- aggregate(steps ~ interval + weekday, data = sourcedataNAmean,
FUN = mean)
g <- ggplot (data=avgStepsPerWeekDay, aes (interval, steps)) +
geom_line(color = "BLUE", size = 1) + facet_wrap(~weekday, ncol = 1)+
theme_bw() +
theme(strip.background = element_rect(fill = "beige")) +
ggtitle ("The average number of steps taken in 5-minutes interval across all days \n for weekday and weekend\n")
print (g)
|
b12cff79ef4d9d1c248d8be6cdc0e4968ff7b87a
|
827a5a4aafec6facb3b785f55ceece0400cccf73
|
/R/gru.R
|
0fe11934beeab374ed6a54512934e1962cd2e242
|
[] |
no_license
|
systats/tidykeras
|
fbbb17f0ac48fb51ab183883f6d165378d3b20fd
|
5526c9869891cfdc58c1d2db2b67ff022e648dcc
|
refs/heads/master
| 2020-03-28T21:31:41.270296
| 2019-03-04T13:23:16
| 2019-03-04T13:23:16
| 149,163,341
| 9
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,982
|
r
|
gru.R
|
#' k_gru
#'
#' get Keras GRU model
#'
#' @param in_dim Number of total vocabluary/words used
#' @param in_length Length of the input sequences
#' @param embed_dim Number of word vectors
#' @param sp_drop Spatial Dropout after Embedding
#' @param gru_dim Number of GRU neurons
#' @param out_dim Number of neurons of the output layer
#' @param out_fun Output activation function
#' @param ... Exit arguments
#' @return model
#'
#' @examples
#' Taken from https://www.kaggle.com/yekenot/pooled-gru-fasttext
#'
#' def get_model():
#' inp = Input(shape=(maxlen, ))
#' x = Embedding(max_features, embed_size, weights=[embedding_matrix])(inp)
#' x = SpatialDropout1D(0.2)(x)
#' x = Bidirectional(GRU(80, return_sequences=True))(x)
#' avg_pool = GlobalAveragePooling1D()(x)
#' max_pool = GlobalMaxPooling1D()(x)
#' conc = concatenate([avg_pool, max_pool])
#' outp = Dense(6, activation="sigmoid")(conc)
#'
#' model = Model(inputs=inp, outputs=outp)
#' model.compile(loss='binary_crossentropy',
#' optimizer='adam',
#' metrics=['accuracy'])
#'
#' @export
k_gru <- function(
in_dim = 10000,
in_length = 100,
embed_dim = 128,
sp_drop = .2,
gru_dim = 64,
out_dim = 1,
out_fun = "sigmoid",
...
){
inp <- keras::layer_input(shape = list(in_length))
main <- inp %>%
layer_embedding(
input_dim = in_dim,
output_dim = embed_dim,
input_length = in_length
) %>%
layer_spatial_dropout_1d(sp_drop) %>%
keras::bidirectional(keras::layer_gru(units = gru_dim, return_sequences = T))
avg_pool <- main %>% layer_global_average_pooling_1d()
max_pool <- main %>% layer_global_average_pooling_1d()
outp <- layer_concatenate(c(avg_pool, max_pool)) %>%
layer_dense(units = out_dim, activation = out_fun)
model <- keras::keras_model(inp, outp) %>%
compile(
loss = "binary_crossentropy",
optimizer = "adam",
metrics = "accuracy"
)
return(model)
}
|
8e3df637397afba05ca64f49a9a4b8646fdb8ae8
|
52a1144fde8cbee79b910820f44d69dbe8e9e8f3
|
/Introducciรณn a R/5 Mineria de texto/5_2 Wordcloud.R
|
e84e7d2944454b14d7e764501b713375955c5c11
|
[
"MIT"
] |
permissive
|
jcms2665/FLACSO-R-2021
|
d19b9466a9834a654f49cdfb1deb81f855b8d132
|
34d298f847ccfdc34add1a92dc69c1951731635d
|
refs/heads/main
| 2023-04-09T11:16:00.367564
| 2021-09-03T04:16:26
| 2021-09-03T04:16:26
| 396,588,644
| 1
| 0
| null | null | null | null |
ISO-8859-1
|
R
| false
| false
| 2,675
|
r
|
5_2 Wordcloud.R
|
#--------------------------------------------------------------------------------
# Tema: Minerรญa de texto
# Autor: Julio Cesar <jcms2665@gmail.com>
# Fecha: 10-08-2021
# Datos: Texto
# Github: https://github.com/jcms2665/Tools-for-Demography/tree/main/R
# Notas:
# Contenido
# 0. Preparar entorno de trabajo
# 1. Cargar librerias
# 2. Directorio de trabajo
# 3. Crear corpus
# 4. Limpiar texto
# 4.1 Definimos funcion
# 4.2 Limpieza
# 5. Palabras vacias
# 6. Matriz
# 7. Nube de palabras
# 8. Frecuencias de palabras
# 9. Asociaciones de palabras
#--------------------------------------------------------------------------------
#0. Preparar entorno de trabajo
rm(list=ls()); graphics.off(); options(warn=-1)
#1. Cargar librerias
library(tm)
library(SnowballC)
library(wordcloud)
library(RColorBrewer)
library(foreign)
library(dplyr)
library(ggplot2)
library(igraph)
#2. Directorio de trabajo
setwd("D:/OneDrive - El Colegio de Mรฉxico A.C/5. Proyectos/2021/46. SIGMA/3 R-intro/Versiรณn SIGMA161/5 Minerรญa de texto/5_Datos")
#3. Crear corpus
docs <- Corpus(VectorSource(readLines("TEXTO.txt", encoding = "UTF-8")))
#4. Limpiar texto
#4.1 Definimos funcion
reemplazar <- content_transformer(function (x , pattern ) gsub(pattern, " ", x))
#4.2 Limpieza
docs <- tm_map(docs, reemplazar, "/")
docs <- tm_map(docs, reemplazar, "@")
docs <- tm_map(docs, reemplazar, ";")
docs <- tm_map(docs, reemplazar, "ยฟ")
docs <- tm_map(docs, reemplazar, ":")
docs <- tm_map(docs, content_transformer(tolower))
docs <- tm_map(docs, removeNumbers)
docs <- tm_map(docs, removeWords, stopwords("spanish"))
docs <- tm_map(docs, removePunctuation)
docs <- tm_map(docs, stripWhitespace)
#inspect(docs)
#5. Palabras vacias
docs <- tm_map(docs, removeWords, c("pues","tenia"))
#6. Matriz
dtm <- TermDocumentMatrix(docs)
m <- as.matrix(dtm)
v <- sort(rowSums(m),decreasing=TRUE)
d <- data.frame(word = names(v),freq=v)
#7. Nube de palabras
set.seed(1234)
wordcloud(words = d$word, freq = d$freq, min.freq = 1,
max.words=400, random.order=FALSE, rot.per=0.7,
colors=brewer.pal(8, "Dark2"))
#8. Frecuencias de palabras
barplot(d[1:10,]$freq, las = 2, names.arg = d[1:10,]$word,
col ="lightblue", main ="Palabras frecuentes",
ylab = "Frecuencias")
#9. Asociaciones de palabras
findAssocs(dtm, terms = c("mรฉxico"), corlimit = 0.50)
|
722544c7fc4cc7653be39d3afcfb82d4bd130fda
|
73924eb1f5f2ff686fa82bddb614cd112bc80325
|
/Sample_dates.R
|
8b1f722df79b503bac40bdd5b09792ef428153cc
|
[] |
no_license
|
davhernandez/Thesis-Project
|
3e240b17d2d8d75a298f4f6a32064d1053663dbf
|
bb528979e34b85fa14d11e19354a480f1fa5fa61
|
refs/heads/master
| 2020-03-23T11:06:51.818446
| 2018-12-19T23:47:58
| 2018-12-19T23:47:58
| 141,483,791
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,827
|
r
|
Sample_dates.R
|
# setup ---------------------------
rm(list = ls())
library(dplyr)
library(ggplot2)
library(lubridate)
# CCCFRP dates --------------------------------------
#importing CCFRP data
sample_date <- read.csv("~/Desktop/Thesis/Raw Data/CCFRP/Hernandez_BlueRF.csv")
#selecting date for CCFRP
sample_date$Month <- match(sample_date$Month, month.name)
#a new column that is a character vector combining the month, day, and year columns into the full date
sample_date <- mutate(sample_date, Date = paste(sample_date$Month, sample_date$Day, sample_date$Year, sep = "/"))
#since the Date column is character vector, it needs to be converted to an object type Date
sample_date$Date <- as.Date(sample_date$Date, "%m/%d/%Y")
#add a column Frequency to tell the number of observations. The column will be filled with 1 because each row is for an individual fish
sample_date <- mutate(sample_date, Frequency = 1)
#add a column for the source of the data
sample_date <- mutate(sample_date, source = 'CCFRP')
# PISCO dates -----------------------
#importing PISCO data
smys <- read.csv("~/Desktop/Thesis/Raw Data/PISCO/UCSB_FISH.csv")
#filter out all fish that aren't BRF
smys = subset(smys, classcode == 'SMYS')
#a new column that is a character vector combining the month, day, and year columns into the full date
smys <- mutate(smys, Date = paste(smys$month, smys$day, smys$year, sep = "/"))
#since the Date column is character vector, it needs to be converted to an object type Date
smys$Date <- as.Date(smys$Date, "%m/%d/%Y")
#rename 'count' to 'Frequnecy' so that it matchs the column name of 'sample_date'
smys <- rename(smys, Frequency = count)
#add a column for the source of the data
smys <- mutate(smys, source = 'PISCO')
# combining data -----------------------
#select CCFRP dates and add a column for frequency. All frequencies = 1
#select PISCO dates and count column
#join both matricies
joined_dates <- rbind(sample_date[, c("Date", "Frequency", "source")], smys[,c("Date", "Frequency", "source")])
#stacked bar plot of when each fish was sampled
ggplot(joined_dates, aes(x = Date, y = Frequency, fill = source)) +
geom_bar(stat='identity')
# plotting just month and day ---------------------------
#this combines all of the data across years to look at the trend in what time of year the samples were taken
#this section uses lubridate package to reach its goal. The previous section did it in base R
sample_date <- mutate(sample_date, Month_Day = paste(month(sample_date$Month, label = TRUE), sample_date$Day, sep = "-"))
smys <- mutate(smys, Month_Day = paste(month(smys$month, label = TRUE), smys$day, sep = "-"))
joined_months <- rbind(sample_date[, c("Month_Day", "Frequency", "source")], smys[,c("Month_Day", "Frequency", "source")])
#collapsing all matching data points together
joined_months <- joined_months %>%
na.omit %>%
group_by(Month_Day, source) %>%
summarise(Frequency = sum(Frequency))
#what if I mutate and extract the name of the month `month(data, label = TRUE)` and the day number and then forgo the as.Date?
ggplot(joined_months, aes(x = Month_Day, y = Frequency, fill=source)) +
geom_bar(stat="identity")
#plotting just the months ---------------------------------------------------------------------
sample_date <- mutate(sample_date, Months = month(sample_date$Month, label = TRUE))
smys <- mutate(smys, Months = month(smys$month, label = TRUE))
joined_months <- rbind(sample_date[, c("Months", "Frequency", "source")], smys[,c("Months", "Frequency", "source")])
#collapsing all matching data points together
joined_months <- joined_months %>%
na.omit %>%
group_by(Months, source) %>%
summarise(Frequency = sum(Frequency))
ggplot(joined_months, aes(x = Months, y = Frequency, fill=source)) +
geom_bar(stat="identity") +
ggtitle("Samples based on month collected")
|
32211a662bfd37ad67fbed24c19f1b5b19a50743
|
af553e8eab166e8647eab9aba61a39c4ee5a66cf
|
/man/boa.pardesc.Rd
|
0fa5b926305b3aed31ff5eba30640a9c9afdc6fc
|
[] |
no_license
|
cran/boa
|
89cd73809b363eb4e28e1ce393f711b34fe4d134
|
58cfd77ca08a13652ace0da4a173c3928c168528
|
refs/heads/master
| 2021-01-17T13:21:21.985274
| 2016-06-23T01:29:05
| 2016-06-23T01:29:05
| 17,671,633
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 645
|
rd
|
boa.pardesc.Rd
|
\name{boa.pardesc}
\alias{boa.pardesc}
\title{Global Parameters Descriptions}
\description{
Returns descriptive information on the global parameters.
}
\usage{
boa.pardesc()
}
\value{
A character matrix whose rows and columns ("group", "method", "desc", "par",
"note") contain the global parameters and the corresponding descriptors
(group = "Analysis", "Data", or "Plot"; method = subgroup classification;
desc = parameter description; par = parameter name name, note = information
concerning the possible values for the parameter).}
\author{Brian J. Smith}
\seealso{ \code{\link{boa.par}} }
\keyword{internal}
|
4566a97fdf32dc45eb4ba48a56a2fe07a542a3e1
|
023f137d5b1465be216f5bc1d253cdb991446601
|
/analyses (missing - llm)/missing.R
|
404f1324a1751abd847053e506747df0949d4651
|
[] |
no_license
|
j-5chneider/casebased
|
93eca49c529c7582b9f64e0ea52a630b51582669
|
ac36eb6582812a9d0ebe65c9c03bbd612b924363
|
refs/heads/master
| 2020-12-27T06:41:06.789520
| 2020-02-10T22:05:53
| 2020-02-10T22:05:53
| 237,797,129
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,442
|
r
|
missing.R
|
library(tidyverse)
obs_miss <- diss_prepost_w_kovar_analy %>%
select(theorie.r.43.1, theorie.r.43.2, theorie.r.43.7, theorie.r.43.3,
theorie.r.43.4, theorie.r.43.5, alter:anwesend_z, -seminartyp)
naniar::vis_miss(obs_miss)
gg_miss_upset(obs_miss, nsets = 10)
## checking MAR assumption
library(VIM)
marginplot(diss_prepost_w_kovar_analy[,c("theorie.r.43.3", "T1.anstrS")])
marginplot(diss_prepost_w_kovar_analy[,c("theorie.r.43.3", "T1.anstrT")])
marginplot(diss_prepost_w_kovar_analy[,c("theorie.r.43.3", "nfc")])
marginplot(diss_prepost_w_kovar_analy[,c("theorie.r.43.3", "T1.RefBer1")])
marginplot(diss_prepost_w_kovar_analy[,c("theorie.r.43.3", "T1.RefBer2")])
marginplot(diss_prepost_w_kovar_analy[,c("theorie.r.43.3", "T1.ind.wert.util")])
marginplot(diss_prepost_w_kovar_analy[,c("theorie.r.43.3", "T1.ind.wert.cost")])
marginplot(diss_prepost_w_kovar_analy[,c("theorie.r.43.3", "theorie.r.43.1")])
marginplot(diss_prepost_w_kovar_analy[,c("theorie.r.43.3", "theorie.r.43.2")])
marginplot(diss_prepost_w_kovar_analy[,c("theorie.r.43.3", "theorie.r.43.7")])
marginplot(diss_prepost_w_kovar_analy[,c("theorie.r.43.4", "theorie.r.43.1")])
marginplot(diss_prepost_w_kovar_analy[,c("theorie.r.43.4", "theorie.r.43.2")])
marginplot(diss_prepost_w_kovar_analy[,c("theorie.r.43.4", "theorie.r.43.7")])
marginplot(diss_prepost_w_kovar_analy[,c("theorie.r.43.5", "theorie.r.43.1")])
marginplot(diss_prepost_w_kovar_analy[,c("theorie.r.43.5", "theorie.r.43.2")])
marginplot(diss_prepost_w_kovar_analy[,c("theorie.r.43.5", "theorie.r.43.7")])
library(mice)
library(miceadds)
## predictor selection ##########################################################
library(corrgram)
corrgram(obs_miss, lower.panel = "panel.pie", upper.panel = "panel.cor") # modellimmanente Var + Lรคnge scheinen die einzigen guten Prรคdiktoren zu sein
corrgram(cor(y = obs_miss, x = !is.na(obs_miss), use = "pair"), lower.panel = "panel.pie", upper.panel = "panel.pie")
obs_miss <- diss_prepost_w_kovar_analy %>%
select(theorie.r.43.1, theorie.r.43.2, theorie.r.43.7, theorie.r.43.3, theorie.r.43.4,
theorie.r.43.5, llm:anwesend_z, geschl.., semester, nfc, T1.int, T2.int, seminar)
# establishing object with standard values
ini <- mice(obs_miss,
maxit = 0)
## define predictor matrix
pred <- ini$predictorMatrix
for(i in attr(pred, "dimnames")[[1]]) {
pred[i,] <- c(2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,1,1,2,2,2,-2)
pred[i,i] <- 0
}
## define imputation method
meth <- ini$meth
meth <- c("2l.pmm", "2l.pmm", "2l.pmm", "2l.pmm", "2l.pmm", "2l.pmm",
"2l.binary", "2l.binary", "2l.pmm", "2l.pmm", "2l.pmm",
"2l.pmm", "2l.pmm", "2l.pmm", "2l.pmm", "2l.pmm", "", "",
"2l.pmm", "2l.pmm", "2l.pmm", "polyreg", "2l.pmm",
"2l.pmm", "2l.pmm", "2l.pmm", "polyreg")
## categorical variables as factor
obs_miss$llm <- as.factor(obs_miss$llm)
obs_miss$medium <- as.factor(obs_miss$medium)
obs_miss$geschl.. <- as.factor(obs_miss$geschl..)
obs_miss$seminar <- as.integer(obs_miss$seminar)
## desciding visiting sequence:
## in order of missingness, from least to most missing
naniar::vis_miss(obs_miss)
## impute
imp_diss <- mice(obs_miss,
maxit = 5,
m = 5,
meth = meth,
pred = pred,
seed = 666
)
## check for implausible values
stripplot(imp_diss, pch = 20, cex = 1.2)
## check methods
##
|
b204af1b429148b7b66a938189a1968ed629a2bc
|
49b8ff57b4184c137dde8ed358b3372f3020d9b0
|
/RStudioProjects/mbDiscoveryR/mbMMLCPT/findCMB.R
|
227eaab87ea3c375e126e94077f3313d15586eeb
|
[] |
no_license
|
kelvinyangli/PhDProjects
|
c70bad5df7e4fd2b1803ceb80547dc9750162af8
|
db617e0dbb87e7d5ab7c5bfba2aec54ffa43208f
|
refs/heads/master
| 2022-06-30T23:36:29.251628
| 2019-09-08T07:14:42
| 2019-09-08T07:14:42
| 59,722,411
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 494
|
r
|
findCMB.R
|
# this function finds the pc(T) and pc(x), for all x \in pc(T)
findCMB = function(data, node, dataInfo, base = 2, debug = FALSE) {
# cmb = c()
cmb = mmlPC(data, node, dataInfo, debug = debug)
if (length(cmb) > 0) {
for (j in 1:length(cmb)) {
cmb = c(cmb, mmlPC(data, cmb[j], dataInfo, base = base, debug = debug))
} # end for j
cmb = unique(cmb)
cmb = cmb[cmb != node] # remove target node
} # end if
return(cmb)
}
|
148d2996add17baa440ed3c1d06b31a3d1d12c1d
|
0986b0e01c2b07b18ed039705c897908e266bdd5
|
/units/2_geographic_range/2b_CA_coastal/CA coastal code and csv/CAfishes.r
|
e10fcdb4ab6ef02c458f3f21fb9cbce95b02bad0
|
[] |
no_license
|
mtaylor-semo/438
|
8b74e6c092c7c0338dd28b5cefe35f6a55147433
|
aab07b32495297a59108d9c13cd29ff9ec3824d3
|
refs/heads/main
| 2023-07-06T14:55:25.774861
| 2023-06-21T21:36:04
| 2023-06-21T21:36:04
| 92,411,601
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,605
|
r
|
CAfishes.r
|
setwd('biogeo')
### Part 1: Histograms
cafish <- read.csv('california_marine_fishes.csv', header=TRUE, row.names=1)
#cafish <- read.csv('http://mtaylor4.semo.edu/~goby/biogeo/california_marine_fishes.csv', header=TRUE, row.names=1)
rangeSize <- rowSums(cafish)
numSpecies <- colSums(cafish)
highSp <- ceiling(max(numSpecies)/10)*10
max(rangeSize) # maximum number of degrees latitude occupied
min(rangeSize) # minimum number of degrees latitude occupied
mean(rangeSize) # mean number of degrees latitude occupied
hist(rangeSize)
hist(numSpecies)
op <- par(mfrow=c(1,2))
hist(rangeSize, breaks=20, xlim=c(0,100), las=1, ylab='Number of Species', xlab = 'Latitude (ยฐN)', main='Frequency Distribution of Range Size\nCalifornia Coastal Marine Fishes')
#No need to do this one. Focus on species range.
#hist(numSpecies, breaks=20, xlim=c(0,500), las=1, ylab='Degrees of Latitude', xlab = 'Number ofSpecies', main='Frequency Distribution of Number of Species\nper Degree Latitude')
par(op)
## Part 2: Richness and Point Conception
plot(numSpecies)
lat <- seq(-30,68,1)
# These limits create a balanced distribution of tick marks
plot(numSpecies~lat, xlim=c(-40,80), xlab = 'Latitude (ยฐS โ ยฐN)', ylab='Species Richness', main='Species Richness by Latitude\nCalifornia Coastal Marine Fishes')
# Plot again to use identify function
plot(numSpecies~lat) # Skip xlim for now.
identify(numSpecies~lat)
latlabels <- colnames(cafish)
latlabels # View the result
plot(numSpecies~lat, xlim=c(-40,80))
identify(numSpecies~lat, labels=lat, cex=0.8)
|
a46698a205dc34b004dcf97127606d458dc7060d
|
25aa88fe128497f742ab3e3d85a0fa98a97938a3
|
/man/phyloFcluster.Rd
|
4194b2e119a4e0d5f1498c078ab3141ad69e0eb7
|
[] |
no_license
|
mortonjt/phylofactor
|
396f489a8a6eeb8bf4fb7d98df8b501f4fa2eb89
|
148925bede1648eeffb2ce52b186d8b54f71f7ff
|
refs/heads/master
| 2021-01-25T06:57:04.488647
| 2017-02-03T01:29:07
| 2017-02-03T01:29:07
| 80,677,860
| 0
| 0
| null | 2017-02-02T00:08:37
| 2017-02-02T00:08:37
| null |
UTF-8
|
R
| false
| true
| 1,052
|
rd
|
phyloFcluster.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/phyloFcluster.R
\name{phyloFcluster}
\alias{phyloFcluster}
\title{Produces cluster object with ncores and all necessary functions to perform phylofactorization}
\usage{
phyloFcluster(ncores = 2, ...)
}
\arguments{
\item{ncores}{number of cores}
\item{...}{optional input arguments for makeClsuter}
}
\description{
Produces cluster object with ncores and all necessary functions to perform phylofactorization
}
\examples{
set.seed(1)
tree <- unroot(rtree(7))
X <- as.factor(c(rep(0,5),rep(1,5)))
sigClades <- Descendants(tree,c(9,12),type='tips')
Data <- matrix(rlnorm(70,meanlog = 8,sdlog = .5),nrow=7)
rownames(Data) <- tree$tip.label
colnames(Data) <- X
Data[sigClades[[1]],X==0] <- Data[sigClades[[1]],X==0]*8
Data[sigClades[[2]],X==1] <- Data[sigClades[[2]],X==1]*9
Data <- t(clo(t(Data)))
frmla <- Data ~ X
method='ILR'
Grps <- getGroups(tree)
choice='var'
cl <- phyloFcluster(2)
PhyloReg <- PhyloRegression(Data,X,frmla,Grps,method,choice,cl)
stopCluster(cl)
gc()
}
|
70661a08ba86c125ef6f22d263ca57ba0f388952
|
ca26b58313dc16a137f31f45e39aefe0a1a8f1ba
|
/inference-final/inference_7.R
|
7da3d0d8442849c8b80c5b7b39f06a8bc114f368
|
[] |
no_license
|
ChuckChekuri/datasciencecoursera
|
06c4a95ef37c4fde39ab55dc98de615be98f56cf
|
8e81a42c3a1a43372390aa44fdb35bafa21c08fd
|
refs/heads/master
| 2020-05-23T10:06:24.413977
| 2017-04-08T14:18:33
| 2017-04-08T14:19:12
| 80,387,047
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,468
|
r
|
inference_7.R
|
n <- 10000; means <- cumsum(rexp(n)) / (1 : n); library(ggplot2)
ge <- ggplot(data.frame(x = 1 : n, y = means), aes(x = x, y = y))
ge <- ge + geom_hline(yintercept = 0) + geom_line(size = 2)
ge <- ge + labs(x = "Number of obs", y = "Cumulative mean")
n <- 10000; means <- cumsum(rnorm(n)) / (1 : n);
gr <- ggplot(data.frame(x = 1 : n, y = means), aes(x = x, y = y))
gr <- gr + geom_hline(yintercept = 0) + geom_line(size = 2)
gr <- gr + labs(x = "Number of obs", y = "Cumulative mean")
n <- 10000; means <- cumsum(rpois(n, 1)) / (1 : n);
gp <- ggplot(data.frame(x = 1 : n, y = means), aes(x = x, y = y))
gp <- gp + geom_hline(yintercept = 0) + geom_line(size = 2)
gp <- gp + labs(x = "Number of obs", y = "Cumulative mean")
multiplot(ge,gr,gp, cols=3)
means <- cumsum(sample(0 : 1, n , replace = TRUE)) / (1 : n)
g <- ggplot(data.frame(x = 1 : n, y = means), aes(x = x, y = y))
g <- g + geom_hline(yintercept = 0.5) + geom_line(size = 2)
g <- g + labs(x = "Number of obs", y = "Cumulative mean")
g
nosim <- 1000
cfunc <- function(x, n) exp(n) # * (mean(x) - 3.5) / 1.71
dat <- data.frame(
x = c(apply(matrix(sample(1 : 6, nosim * 10, replace = TRUE),
nosim), 1, cfunc, 10),
apply(matrix(sample(1 : 6, nosim * 20, replace = TRUE),
nosim), 1, cfunc, 20),
apply(matrix(sample(1 : 6, nosim * 30, replace = TRUE),
nosim), 1, cfunc, 30)
),
size = factor(rep(c(10, 20, 30), rep(nosim, 3))))
g <- ggplot(dat, aes(x = x, fill = size)) + geom_histogram(alpha = .20, binwidth=.3, colour = "black", aes(y = ..density..))
g <- g + stat_function(fun = dexp, size = 2)
g + facet_grid(. ~ size)
g
nosim <- 1000
cfunc <- function(x, n) 2 * sqrt(n) * (mean(x) - 0.5)
dat <- data.frame(
x = c(apply(matrix(sample(0:1, nosim * 10, replace = TRUE),
nosim), 1, cfunc, 10),
apply(matrix(sample(0:1, nosim * 20, replace = TRUE),
nosim), 1, cfunc, 20),
apply(matrix(sample(0:1, nosim * 30, replace = TRUE),
nosim), 1, cfunc, 30)
),
size = factor(rep(c(10, 20, 30), rep(nosim, 3))))
g <- ggplot(dat, aes(x = x, fill = size)) + geom_histogram(binwidth=.3, colour = "black", aes(y = ..density..))
g <- g + stat_function(fun = dnorm, size = 2)
g + facet_grid(. ~ size)
g
nosim <- 1000
cfunc <- function(x, n) sqrt(n) * (mean(x) - 0.9) / sqrt(.1 * .9)
dat <- data.frame(
x = c(apply(matrix(sample(0:1, prob = c(.1,.9), nosim * 10, replace = TRUE),
nosim), 1, cfunc, 10),
apply(matrix(sample(0:1, prob = c(.1,.9), nosim * 20, replace = TRUE),
nosim), 1, cfunc, 20),
apply(matrix(sample(0:1, prob = c(.1,.9), nosim * 30, replace = TRUE),
nosim), 1, cfunc, 30)
),
size = factor(rep(c(10, 20, 30), rep(nosim, 3))))
g <- ggplot(dat, aes(x = x, fill = size)) + geom_histogram(binwidth=.3, colour = "black", aes(y = ..density..))
g <- g + stat_function(fun = dnorm, size = 2)
g + facet_grid(. ~ size)
g
library(UsingR);data(father.son); x <- father.son$sheight
(mean(x) + c(-1, 1) * qnorm(.975) * sd(x) / sqrt(length(x))) / 12
round(1 / sqrt(10 ^ (1 : 6)), 3)
.56 + c(-1, 1) * qnorm(.975) * sqrt(.56 * .44 / 100)
binom.test(56, 100)$conf.int
n <- 20; pvals <- seq(.1, .9, by = .05); nosim <- 1000
coverage <- sapply(pvals, function(p){
phats <- rbinom(nosim, prob = p, size = n) / n
ll <- phats - qnorm(.975) * sqrt(phats * (1 - phats) / n)
ul <- phats + qnorm(.975) * sqrt(phats * (1 - phats) / n)
mean(ll < p & ul > p)
})
ggplot(data.frame(pvals, coverage), aes(x = pvals, y = coverage)) + geom_line(size = 2) + geom_hline(yintercept = 0.95) + ylim(.75, 1.0)
n <- 100; pvals <- seq(.1, .9, by = .05); nosim <- 1000
coverage2 <- sapply(pvals, function(p){
phats <- rbinom(nosim, prob = p, size = n) / n
ll <- phats - qnorm(.975) * sqrt(phats * (1 - phats) / n)
ul <- phats + qnorm(.975) * sqrt(phats * (1 - phats) / n)
mean(ll < p & ul > p)
})
ggplot(data.frame(pvals, coverage), aes(x = pvals, y = coverage2)) + geom_line(size = 2) + geom_hline(yintercept = 0.95)+ ylim(.75, 1.0)
n <- 20; pvals <- seq(.1, .9, by = .05); nosim <- 1000
coverage <- sapply(pvals, function(p){
phats <- (rbinom(nosim, prob = p, size = n) + 2) / (n + 4)
ll <- phats - qnorm(.975) * sqrt(phats * (1 - phats) / n)
ul <- phats + qnorm(.975) * sqrt(phats * (1 - phats) / n)
mean(ll < p & ul > p)
})
ggplot(data.frame(pvals, coverage), aes(x = pvals, y = coverage)) + geom_line(size = 2) + geom_hline(yintercept = 0.95)+ ylim(.75, 1.0)
x <- 5; t <- 94.32; lambda <- x / t
round(lambda + c(-1, 1) * qnorm(.975) * sqrt(lambda / t), 3)
poisson.test(x, T = 94.32)$conf
lambdavals <- seq(0.005, 0.10, by = .01); nosim <- 1000
t <- 100
coverage <- sapply(lambdavals, function(lambda){
lhats <- rpois(nosim, lambda = lambda * t) / t
ll <- lhats - qnorm(.975) * sqrt(lhats / t)
ul <- lhats + qnorm(.975) * sqrt(lhats / t)
mean(ll < lambda & ul > lambda)
})
ggplot(data.frame(lambdavals, coverage), aes(x = lambdavals, y = coverage)) + geom_line(size = 2) + geom_hline(yintercept = 0.95)+ylim(0, 1.0)
lambdavals <- seq(0.005, 0.10, by = .01); nosim <- 1000
t <- 1000
coverage <- sapply(lambdavals, function(lambda){
lhats <- rpois(nosim, lambda = lambda * t) / t
ll <- lhats - qnorm(.975) * sqrt(lhats / t)
ul <- lhats + qnorm(.975) * sqrt(lhats / t)
mean(ll < lambda & ul > lambda)
})
ggplot(data.frame(lambdavals, coverage), aes(x = lambdavals, y = coverage)) + geom_line(size = 2) + geom_hline(yintercept = 0.95) + ylim(0, 1.0)
|
ceef273b0f2391d0aae8312af1e4cdb5a37adce8
|
17ca53a3827be35bbe7b0b1e88decbeed2f9eded
|
/R/utils.R
|
30989a4fbbc830e2d1323e655e71d02011639b44
|
[
"MIT"
] |
permissive
|
mchevalier2/crestr
|
190afcd9d563f92afe51394b0dad752496ce3e5b
|
e1978059c243f61475055c1f2ff08d5d8b601079
|
refs/heads/master
| 2023-08-30T03:59:50.319316
| 2023-08-25T16:00:36
| 2023-08-25T16:00:36
| 269,097,345
| 9
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 14,944
|
r
|
utils.R
|
#' Convert abundance data into percentage data.
#'
#' Convert abundance data into percentage data.
#'
#' @param df The dataframe containing the data to convert.
#' @param col2convert A vector of the columns to convert. Default is all the
#' columns but the first, which contains an age, a depth or a sampleID.
#' @return A vector of unique taxonIDs.
#' @export
#' @examples
#' df <- data.frame(matrix(1:25, ncol = 5))
#' colnames(df) <- paste(rep("col", 5), 1:5, sep = "")
#' convert2percentages(df)
#' convert2percentages(df, col2convert = 3:5)
convert2percentages <- function(df, col2convert = 2:ncol(df)) {
if(base::missing(df)) df
df2 <- cbind(
df[, -col2convert],
100 * df[, col2convert] / apply(df[, col2convert], 1, sum)
)
colnames(df2) <- colnames(df)
rownames(df2) <- rownames(df)
df2[is.na(df2)] <- 0
df2
}
#' Convert data into presence/absence data.
#'
#' Convert data into presence/absence data.
#'
#' @param df The dataframe containing the data to convert.
#' @param threshold The threshold that defines presence (presence if >= threshold)
#' @param col2convert A vector of the columns to convert. Default is all the
#' columns but the first, which contains an age, a depth or a sampleID.
#' @return A vector of unique taxonIDs.
#' @export
#' @examples
#' df <- data.frame(matrix(1:25, ncol = 5))
#' colnames(df) <- paste(rep("col", 5), 1:5, sep = "")
#' convert2presenceAbsence(df, threshold = 15)
#' convert2presenceAbsence(df, col2convert = 3:5)
convert2presenceAbsence <- function(df, threshold = 2, col2convert = 2:ncol(df)) {
if(base::missing(df)) df
df2 <- cbind(
df[, -col2convert],
ifelse(df[, col2convert] >= threshold & df[, col2convert] > 0, 1, 0)
)
colnames(df2) <- colnames(df)
rownames(df2) <- rownames(df)
df2
}
#' Normalises the percentages
#'
#' Normalises the percentages
#'
#' @param df The dataframe containing the data to convert.
#' @param col2convert A vector of the columns to convert. Default is all the
#' columns but the first, which contains an age, a depth or a sampleID.
#' @return A vector of unique taxonIDs.
#' @export
#' @examples
#' df <- data.frame(matrix(1:25, ncol = 5))
#' colnames(df) <- paste(rep("col", 5), 1:5, sep = "")
#' normalise(df)
#' normalise(df, col2convert = 3:5)
normalise <- function(df, col2convert = 2:ncol(df)) {
if(base::missing(df)) df
df2 <- convert2percentages(df, col2convert)
colweights <- apply(df2[, col2convert], 2, meanPositiveValues)
for (i in 1:nrow(df2)) {
df2[i, col2convert] <- df2[i, col2convert] / colweights
}
colnames(df2) <- colnames(df)
rownames(df2) <- rownames(df)
df2
}
#' Calculate the mean of all strictly positive values.
#'
#' Calculate the mean of all strictly positive values.
#'
#' @param x A vector of values.
#' @return The average of all the positive values. Returns \code{NaN} is no
#' strictly positive values are found.
#' @export
#' @examples
#' meanPositiveValues(-10:10)
meanPositiveValues <- function(x) {
if(base::missing(x)) x
base::mean(x[x > 0])
}
#' Copy crest data to the clipboard.
#'
#' Copy crest data to the clipboard for an easy extraction of the data from the
#' R environment.
#'
#' @inheritParams crest
#' @param x A \code{\link{crestObj}} produced by the \code{\link{crest.reconstruct}} or \code{\link{crest}} functions.
#' @param optima A boolean value to indicate if the optima should be copied to the clipboard.
#' @param mean A boolean value to indicate if the means should be copied to the clipboard.
#' @param uncertainties A boolean value to indicate if the uncertainties should be copied to the clipboard.
#' @return No return value. This function is called to copy the crest data to the clipboard.
#' @export
#' @examples
#' \dontrun{
#' if(requireNamespace('clipr', quietly=TRUE)) {
#' reconstr <- crest(
#' df = crest_ex, pse = crest_ex_pse, taxaType = 0,
#' climate = c("bio1", "bio12"), bin_width = c(2, 20),
#' shape = c("normal", "lognormal"),
#' selectedTaxa = crest_ex_selection, dbname = "crest_example",
#' leave_one_out = TRUE
#' )
#' copy_crest(reconstr, uncertainties=TRUE)
#' ## You can now paste the values in a spreadsheet.
#' }
#' }
#'
copy_crest <- function(x, climate = x$parameters$climate, optima=TRUE, mean=FALSE, uncertainties=FALSE) {
if(base::missing(x)) x
if(! requireNamespace('clipr', quietly=TRUE)) {
stop("'copy_crest()' requires the 'clipr' package. You can install it using install.packages(\"clipr\").\n\n")
}
if(optima + mean + uncertainties == 0) {
stop("'optima', 'mean' and 'uncertainties' cannot all be set to FALSE.\n\n")
}
tbl <- list()
tbl[[x$inputs$x.name]] <- x$inputs$x
for (clim in climate) {
if(optima) {
lbl <- paste(clim, 'optima', sep='_')
tbl[[lbl]] <- x$reconstructions[[clim]]$optima[, 2]
}
if(mean) {
lbl <- paste(clim, 'mean', sep='_')
tbl[[lbl]] <- x$reconstructions[[clim]]$optima[, 3]
}
if(uncertainties) {
for(k in 2:ncol(x$reconstructions[[clim]][['uncertainties']])) {
lbl <- paste(clim, colnames(x$reconstructions[[clim]][['uncertainties']])[k], sep='_')
tbl[[lbl]] <- x$reconstructions[[clim]][['uncertainties']][, k]
}
}
}
tbl <- as.data.frame(tbl)
clipr::write_clip(tbl)
invisible(x)
}
#' Check if the coordinates are correct.
#'
#' Check if the coordinates are correct.
#'
#' @inheritParams crest
#' @return Return a set of valid coordinates.
#' @export
#' @examples
#' check_coordinates(NA, NA, NA, NA)
#' check_coordinates(-200, 0, 0, 90)
#' check_coordinates(20, 0, 90, 0)
#'
check_coordinates <- function(xmn, xmx, ymn, ymx) {
if(base::missing(xmn)) xmn
if(base::missing(xmx)) xmx
if(base::missing(ymn)) ymn
if(base::missing(ymx)) ymx
estimate_xlim <- estimate_ylim <- FALSE
if (xmn < -180 | is.na(xmn) | xmx > 180 | is.na(xmx)) {
if(!is.na(xmn) & !is.na(xmx)) {
if (xmn < -180 | xmx > 180) {
warning("[xmn; xmx] range larger than accepted values [-180; 180]. The limits were set to -180 and/or 180.\n")
}
}
xmn <- max(xmn, -180, na.rm=TRUE)
xmx <- min(xmx, 180, na.rm=TRUE)
estimate_xlim <- TRUE
}
if (xmn >= xmx) {
warning("xmn was larger than xmx. The two values were inverted.\n")
tmp <- xmn
xmn <- xmx
xmx <- tmp
}
if (ymn < -90| is.na(ymn) | ymx > 90 | is.na(ymx) ) {
if(!is.na(ymn) & !is.na(ymx)) {
if (ymn < -90 | ymn > 90) {
warning("[ymn; ymx] range larger than accepted values [-90; 90]. The limits were set to -90 and/or 90.\n")
}
}
ymn <- max(ymn, -90, na.rm=TRUE)
ymx <- min(ymx, 90, na.rm=TRUE)
estimate_ylim <- TRUE
}
if (ymn >= ymx) {
warning("ymn was larger than ymx. The two values were inverted.\n")
tmp <- ymn
ymn <- ymx
ymx <- tmp
}
c(xmn, xmx, ymn, ymx, estimate_xlim, estimate_ylim)
}
#' Crop the dataset obtained from \code{\link{crest.get_modern_data}}
#'
#' Crop the dataset obtained from \code{\link{crest.get_modern_data}} according
#' to an object of the class \code{SpatialPolygonsDataFrame}.
#'
#' @inheritParams crest.calibrate
#' @param shp A shapefile (spatVect) to crop the data. Data points will be kept
#' if their centroid is within the shape.
#' @return An cropped version of the input \code{crestObj}.
#' @export
#' @examples
#' \dontrun{
#' data(M1)
#' M1 <- terra::unwrap(M1)
#' ## We want only the data covering Nigeria
#' M2 <- M1[M1$COUNTRY == 'Nigeria', ]
#' data(reconstr)
#' reconstr.cropped <- crop(reconstr, M2)
#' data1 <- terra::rast(reconstr$modelling$climate_space[, 1:3],
#' crs=terra::crs(M1), type='xyz')
#' data2 <- terra::rast(reconstr.cropped$modelling$climate_space[, 1:3],
#' crs=terra::crs(M1), type='xyz')
#' layout(matrix(c(1,2,3,4), byrow=FALSE, ncol=2), width=1, height=c(0.2, 0.8))
#' plot_map_eqearth(data1, brks.pos=seq(13,29,2), colour_scale=TRUE,
#' title='Full dataset', zlim=c(13, 29))
#' plot_map_eqearth(data2, brks.pos=seq(13,29,2), colour_scale=TRUE,
#' title='Cropped dataset', zlim=c(13, 29))
#' }
#'
crop <- function(x, shp) {
if(base::missing(x)) x
if(base::missing(shp)) shp
if (is.crestObj(x)) {
dat.x <- x$modelling$climate_space[, 1]
dat.y <- x$modelling$climate_space[, 2]
res <- cbind(dat.x, dat.y, rep(0, length(dat.x)))
pts <- terra::vect(res[, 1:2], crs="+proj=longlat")
extracted <- terra::extract(shp, pts)
res[extracted[!is.na(extracted[, 2]), 1], 3] <- 1
if(sum(res[, 3]) > 0) {
x$modelling$climate_space <- x$modelling$climate_space[res[, 3] == 1, ]
} else {
stop('\nNo overlap between the data and the selected shape.\n\n')
}
taxalist <- c()
for(tax in names(x$modelling$distributions)) {
dat.x <- x$modelling$distributions[[tax]][, 2]
dat.y <- x$modelling$distributions[[tax]][, 3]
res <- cbind(dat.x, dat.y, rep(0, length(dat.x)))
pts <- terra::vect(res[, 1:2], crs="+proj=longlat")
extracted <- terra::extract(shp, pts)
res[extracted[!is.na(extracted[, 2]), 1], 3] <- 1
if(sum(res[, 3]) > 0) {
x$modelling$distributions[[tax]] <- x$modelling$distributions[[tax]][res[, 3] == 1, ]
if(max(table(x$modelling$distributions[[tax]][, 1])) < x$parameters$minGridCells) {
x$modelling$distributions[[tax]] <- NULL
x$inputs$taxa.name <- x$inputs$taxa.name[!(x$inputs$taxa.name == tax)]
x$inputs$selectedTaxa[tax, ] <- rep(-1, length(x$parameters$climate))
x$modelling$taxonID2proxy <- x$modelling$taxonID2proxy[-(x$modelling$taxonID2proxy[, 'proxyName'] == tax), ]
taxalist <- c(taxalist, tax)
}
} else {
x$modelling$distributions[[tax]] <- NULL
x$inputs$taxa.name <- x$inputs$taxa.name[!(x$inputs$taxa.name == tax)]
x$inputs$selectedTaxa[tax, ] <- rep(-1, length(x$parameters$climate))
x$modelling$taxonID2proxy <- x$modelling$taxonID2proxy[-x$modelling$taxonID2proxy[, 'proxyName'] == tax, ]
taxalist <- c(taxalist, tax)
}
}
resol <- sort(unique(diff(sort(unique(x$modelling$climate_space[, 1])))))[1] / 2.0
xx <- range(x$modelling$climate_space[, 1])
x$parameters$xmn <- xx[1] - resol
x$parameters$xmx <- xx[2] + resol
resol <- sort(unique(diff(sort(unique(x$modelling$climate_space[, 2])))))[1] / 2.0
yy <- range(x$modelling$climate_space[, 2])
x$parameters$ymn <- yy[1] - resol
x$parameters$ymx <- yy[2] + resol
if( length(taxalist ) > 0) {
name <- find.original.name(x)
warning(paste0("One or more taxa were were lost due to the cropping of the study area. Use PSE_log() with the output of this function for details."))
message <- 'Taxon excluded by the crop function.'
x$misc$taxa_notes[[message]] <- taxalist
}
return(x)
} else {
cat('This function only works with a crestObj.\n\n')
}
return(invisible(NA))
}
#' Returns a vector of colours
#'
#' Returns a vector of colours
#'
#' @param n An index to select the colour theme
#' @return A vector of colours.
#' @export
#' @examples
#' colour_theme(1)
#'
colour_theme <- function(n) {
if(base::missing(n)) n
if(n == 1) {
return(c("#3366cc", "#dc3912", "#ff9900", "#109618", "#990099", "#0099c6", "#dd4477", "#66aa00", "#b82e2e", "#316395", "#994499", "#22AA99", "#AAAA11", "#6633CC", "#E67300", "#8B0707", "#651067", "#329262", "#5574A6", "#3B3EAC"))
} else {
warning("The selected colour theme does not exist.\n")
return(NA)
}
}
#' Returns the name of the function argument in the global environment
#'
#' Returns the name of the function argument in the global environment
#'
#' @param x The function argument
#' @return The name of the function argument in the global environment.
#' @export
#'
find.original.name <- function(x) {
if(base::missing(x)) x
objects <- ls(envir = .GlobalEnv)
for (i in objects) {
if (identical(x, get(i, envir = .GlobalEnv))) {
return(i)
}
}
}
#' Returns the taxa type corresponding to the index.
#'
#' Returns the taxa type corresponding to the index.
#'
#' @param taxaType An integer between 0 and 6
#' @return Returns the taxa type corresponding to the index.
#' @export
#'
get_taxa_type <- function(taxaType) {
if(base::missing(taxaType)) taxaType
if(taxaType == 0) return('Example dataset')
if(taxaType == 1) return('plant')
if(taxaType == 2) return('beetle')
if(taxaType == 3) return('chironomid')
if(taxaType == 4) return('foraminifer')
if(taxaType == 5) return('diatom')
if(taxaType == 6) return('rodent')
}
#' Returns the taxa type corresponding to the taxID.
#'
#' Returns the taxa type corresponding to the taxID.
#'
#' @param taxID An integer between 0 and 6
#' @return Returns the taxa type ID corresponding to the taxon ID.
#' @export
#'
getTaxaTypeFromTaxID <- function(taxID) {
return(taxID %/% 1000000)
}
#' Test if x is a crestObj.
#'
#' Test if x is a crestObj.
#'
#' @param x The object to be tested
#' @return \code{TRUE} (x is a crestObj) or \code{FALSE} (not a crestObj).
#' @export
#'
is.crestObj <- function(x) {
return(methods::is(x, "crestObj"))
}
#' Simplify a crestObj into a dataframe.
#'
#' Simplify a crestObj with reconstructed values into a dataframe.
#'
#' @inheritParams plot.crestObj
#' @return A dataframe with the age/depth of each sample and all the best
#' reconstructed values.
#' @export
#' @examples
#' head(crest.simplify(reconstr))
#'
crest.simplify <- function(x, optima=TRUE) {
if(base::missing(x)) x
if(!is.crestObj(x)) {
cat('\nx should be a crestObj.\n\n')
return(invisible(NA))
}
if(!x$misc$stage %in% c('climate_reconstructed', 'leave_one_out')) {
cat('\nReconstruct a climate variable before using crest.simplify().\n\n')
return(invisible(NA))
}
df <- x$inputs$x
for(clim in x$parameters$climate){
df <- cbind(df, x$reconstructions[[clim]]$optima[, ifelse(optima, 2, 3)])
}
colnames(df) <- c(x$inputs$x.name, x$parameters$climate)
return(df)
}
|
afbabe2cad679bf5cabc2a184743409f59756b8a
|
61fb32fdc2e1355b5c81b216f51edbffdb3fcd1b
|
/R/bayDem_calcWaveletLogLik.R
|
828ebb91a8aae89b6e7d91074f8868e18a6d8c26
|
[] |
no_license
|
SlothOfDoom/yada
|
b036ce71bf2350b933ece0acbb54701df82863c5
|
2d4276622ff2071ce8bf70a305cbae8646c61962
|
refs/heads/master
| 2021-09-13T03:42:30.658278
| 2018-04-24T15:26:17
| 2018-04-24T15:26:17
| 120,033,013
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 797
|
r
|
bayDem_calcWaveletLogLik.R
|
# Description
# Calculate the log-likelihood for a given set of radiocarbon measurements
# given the parametrized wavelet specified by the augData, the augmented
# data.
#
# Example calls(s)
#
#
# Input(s)
# Name Type Description
# M Matrix [Nmeas x Ngrid] The measurement matrix (see
# bayDem_calcMeasMatrix)
# augData List The augmented data specifying the
#
# Output(s)
# Name Type Description
# logLik scalar The log-likelihood, log( p(D|th,alpha) )
bayDem_calcWaveletLogLik <- function(M,augData,dy=1) {
f <- bayDem_calcWaveletPdf(augData,dy)
f <- as.matrix(f,n.col=1)
likVect <- M %*% f
logLik <- sum(log(likVect))
return(logLik)
}
|
0db61af27e5f7b98061e922b420868f4e6814386
|
a5b8244731689344004c67af107b1a531f7e9e2f
|
/src/08_writeup/03_best_and_worst_metrics.R
|
7c9eb51945d41b833b6d22fad1ef598eb7bbe6a3
|
[] |
no_license
|
jvenzor23/DefensiveCoverageNet
|
4efcb0f36d6806c71a1750fa9b58ba63c55e3929
|
85eef09aeede123aa32cb8ad3a8075cd7b7f3e43
|
refs/heads/master
| 2023-02-13T22:14:23.396421
| 2021-01-07T22:52:32
| 2021-01-07T22:52:32
| 317,361,746
| 4
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 108,104
|
r
|
03_best_and_worst_metrics.R
|
# This code animates a given play in the player tracking data, while
# also displaying the epa values for all receivers (targeted receiver
# shown in red)
# Clean workspace
rm(list=ls())
# Setting Working Directory
setwd("~/Desktop/NFL_BIG_DATA_BOWL_2021/inputs/")
# Calling Necessary Libraries
library(tidyverse)
library(dplyr)
library(ggplot2)
library(lubridate)
library(reticulate)
library(rootSolve)
library(modeest)
library(gganimate)
library(magick)
# Reading in The Data -----------------------------------------------------
players = read.csv("~/Desktop/CoverageNet/inputs/players.csv")
games = read.csv("~/Desktop/CoverageNet/inputs/games.csv")
plays = read.csv("~/Desktop/CoverageNet/inputs/plays.csv")
targeted_receiver = read.csv("~/Desktop/CoverageNet/inputs/targetedReceiver.csv")
epa_tracking_total = read.csv("~/Desktop/CoverageNet/src/03_coverageNet/03_score_tracking/outputs/routes_tracking_epa.csv")
pass_attempt_epa_data = read.csv("~/Desktop/CoverageNet/src/03_coverageNet/02_score_attempt/outputs/pass_attempt_epa_data.csv") %>%
dplyr::select(gameId, playId, C_prob, IN_prob, epa_pass_attempt) %>%
rename(c_prob_pass_attempt = C_prob,
in_prob_pass_attempt = IN_prob)
pass_arrived_epa_data = read.csv("~/Desktop/CoverageNet/src/03_coverageNet/01_score_arrived/outputs/pass_arrived_epa_data.csv") %>%
dplyr::select(gameId, playId, C_prob,IN_prob,epa_pass_arrived) %>%
rename(c_prob_pass_arrived = C_prob,
in_prob_pass_arrived = IN_prob)
pass_caught_epa_data = read.csv("~/Desktop/CoverageNet/src/03_coverageNet/00_score_YAC/outputs/yac_yaint_epa_data.csv")
my_epa = read.csv("~/Desktop/CoverageNet/src/02_yards_to_epa_function/outputs/plays_with_epa.csv")
pass_arrived_frames = read.csv("~/Desktop/CoverageNet/src/03_coverageNet/01_score_arrived/outputs/pass_attempts_with_fumbles.csv") %>%
distinct(gameId, playId, frameId)
# Tracking Examples --------------------------------------------------------
# GOOD
pbp_data = read.csv("~/Desktop/CoverageNet/src/00_data_wrangle/outputs/week13.csv")
pbp_data_clean = pbp_data %>%
inner_join(pass_arrived_epa_data %>%
distinct(gameId, playId))
library(magick)
tracking_good_nfl_gif = image_scale(image_read(path = "~/Desktop/CoverageNet/src/08_writeup/NFL_videos/tracking_good.gif"),"x400")
# 2018123000/2528 Jarvis TD week 17
# 2018123000/2239 cool pass to Higgins where the play takes a long time
example.play = pbp_data %>%
inner_join(
pbp_data %>%
dplyr::select(gameId, playId) %>%
filter(gameId == 2018120200,
playId == 749) %>%
distinct()
# sample_n(1)
)
example.epa_tracking_point_plot = epa_tracking_total %>%
inner_join(example.play %>%
rename(targetNflId = nflId)) %>%
mutate(time_after_snap = (frameId - 11)*.1)
example.epa_tracking_point_plot_min_vals = example.epa_tracking_point_plot %>%
group_by(gameId, playId, targetNflId) %>%
filter(frameId == min(frameId)) %>%
inner_join(example.play %>%
distinct(gameId, playId, frameId) %>%
rename(frameId2 = frameId)) %>%
filter(frameId2 < frameId) %>%
dplyr::select(-frameId) %>%
rename(frameId = frameId2) %>%
dplyr::select(names(example.epa_tracking_point_plot))
example.epa_tracking_point_plot_max_vals = example.epa_tracking_point_plot %>%
group_by(gameId, playId, targetNflId) %>%
filter(frameId == max(frameId)) %>%
inner_join(example.play %>%
distinct(gameId, playId, frameId) %>%
rename(frameId2 = frameId)) %>%
filter(frameId2 > frameId) %>%
dplyr::select(-frameId) %>%
rename(frameId = frameId2) %>%
dplyr::select(names(example.epa_tracking_point_plot))
example.epa_tracking_point_plot = rbind.data.frame(example.epa_tracking_point_plot,
example.epa_tracking_point_plot_min_vals,
example.epa_tracking_point_plot_max_vals) %>%
arrange(gameId, playId, frameId, targetNflId) %>%
mutate(targeted = if_else(targetNflId == 2560854, 1, 0))
example.epa_tracking_point_plot$targeted[is.na(example.epa_tracking_point_plot$targeted)] = 0
example.epa_tracking_point_plot = example.epa_tracking_point_plot %>%
mutate(targeted = as.factor(targeted))
example.epa_tracking_line_plot = example.epa_tracking_point_plot %>%
rename(frameId_new = frameId) %>%
full_join(example.epa_tracking_point_plot %>%
dplyr::select(gameId, playId, targetNflId, frameId)) %>%
filter(frameId_new <= frameId) %>%
dplyr::select(gameId, playId, frameId, targetNflId, time_after_snap, everything()) %>%
arrange(gameId, playId, frameId, targetNflId) %>%
ungroup() %>%
rowwise() %>%
mutate(epa_pass_attempt = epa_pass_attempt + rnorm(1)/10000)
example.play.info = plays %>%
inner_join(example.play %>%
dplyr::select(gameId, playId) %>%
distinct()) %>%
left_join(targeted_receiver) %>%
left_join(my_epa) %>%
left_join(pass_attempt_epa_data %>%
dplyr::select(gameId, playId, epa_pass_attempt)) %>%
left_join(pass_arrived_epa_data %>%
dplyr::select(gameId, playId, epa_pass_arrived)) %>%
left_join(pass_caught_epa_data %>%
dplyr::select(gameId, playId, epa_throw, epa_yac, epa_yaint)) %>%
mutate(DownDesc = case_when(down == 1 ~ paste("1st and",
yardsToGo),
down == 2 ~ paste("2nd and",
yardsToGo),
down == 3 ~ paste("3rd and",
yardsToGo),
TRUE ~ paste("4th and",
yardsToGo)))
game.info = games %>%
inner_join(example.play %>%
dplyr::select(gameId, playId) %>%
distinct())
## General field boundaries
xmin <- 0
xmax <- 160/3
hash.right <- 38.35
hash.left <- 12
hash.width <- 3.3
## Specific boundaries for a given play
ymin <- max(round(min(example.play$x, na.rm = TRUE), -1), 0) + 5
ymax <- min(round(max(example.play$x, na.rm = TRUE) + 15, -1), 120)
df.hash <- expand.grid(x = c(0, 23.36667, 29.96667, xmax), y = (10:110))
df.hash <- df.hash %>% filter(!(floor(y %% 5) == 0))
df.hash <- df.hash %>% filter(y < ymax, y > ymin)
yardline = (example.play %>% distinct(YardsFromOwnGoal))$YardsFromOwnGoal
firstDownYardLine = yardline + example.play.info$yardsToGo
animate.play =
ggplot() +
scale_size_manual(values = c(6, 4, 6), guide = FALSE) +
scale_shape_manual(values = c(21, 16, 21), guide = FALSE) +
scale_fill_manual(values = c("#002244", "#654321", "grey"), guide = FALSE) +
scale_colour_manual(values = c("#c60c30", "#654321", "black"), guide = FALSE) +
annotate("polygon", x = c(xmin, xmin, xmax, xmax),
y = c(ymin, yardline + 10, yardline + 10, ymin), colour = "black",
fill = "limegreen",
alpha = .5
) +
annotate("polygon", x = c(xmin, xmin, xmax, xmax),
y = c(yardline + 10, firstDownYardLine + 10, firstDownYardLine + 10, yardline + 10), colour = "black",
fill = "limegreen",
alpha = .85
) +
annotate("polygon", x = c(xmin, xmin, xmax, xmax),
y = c(firstDownYardLine + 10, ymax, ymax, firstDownYardLine + 10), colour = "black",
fill = "limegreen",
alpha = .5
) +
annotate("text", x = df.hash$x[df.hash$x < 55/2],
y = df.hash$y[df.hash$x < 55/2], label = "_", hjust = 0, vjust = -0.2) +
annotate("text", x = df.hash$x[df.hash$x > 55/2],
y = df.hash$y[df.hash$x > 55/2], label = "_", hjust = 1, vjust = -0.2) +
annotate("segment", x = xmin,
y = seq(max(10, ymin), min(ymax, 110), by = 5),
xend = xmax,
yend = seq(max(10, ymin), min(ymax, 110), by = 5)) +
annotate("text", x = rep(hash.left, 11), y = seq(10, 110, by = 10),
label = c("G ", seq(10, 50, by = 10), rev(seq(10, 40, by = 10)), " G"),
angle = 270, size = 4) +
annotate("text", x = rep((xmax - hash.left), 11), y = seq(10, 110, by = 10),
label = c(" G", seq(10, 50, by = 10), rev(seq(10, 40, by = 10)), "G "),
angle = 90, size = 4) +
geom_segment(aes(x = 0, xend = xmax,
y = yardline + 10, yend = yardline + 10),
color = "blue",
size = 1,
alpha = .7) +
geom_segment(aes(x = 0, xend = xmax,
y = firstDownYardLine + 10, yend = firstDownYardLine + 10),
color = "yellow",
size = 1) +
geom_point(data = example.play, aes(x = (xmax-y), y = x + 10, shape = team,
fill = team, group = nflId, size = team, colour = team), alpha = 0.9) +
geom_point(data = example.play %>%
filter(nflId == 2560854), aes(x = (xmax-y), y = x + 10),
size = 6, color = "black", shape = 21, fill = "#e31837", alpha = 0.9) +
geom_point(data = example.play %>%
filter(is.na(nflId)), aes(x = (xmax-y), y = x + 10),
fill = "brown", color = "#654321", shape = 16, size = 4,
alpha = 1) +
geom_text(data = example.play, aes(x = (xmax-y), y = x + 10, label = jerseyNumber, group = nflId), colour = "white",
vjust = 0.36, size = 3.5) +
geom_line(data = example.play %>%
filter(IsOnOffense,
!is.na(nflId),
position != "QB"),
aes(x = (xmax-y), y = x + 10, group = nflId),
alpha = 0.5, size = .5) +
ylim(ymin, ymax) +
coord_fixed() +
theme(axis.line=element_blank(),
axis.text.x=element_blank(),
axis.text.y=element_blank(),
axis.ticks=element_blank(),
axis.title.x=element_blank(),
axis.title.y=element_blank(),
legend.position="none",
panel.background=element_blank(),
panel.border=element_blank(),
panel.grid.major=element_blank(),
panel.grid.minor=element_blank(),
plot.background=element_blank()) +
labs(title = paste0(game.info$visitorTeamAbbr, " @ ", game.info$homeTeamAbbr,
" (", game.info$gameDate, ")"),
subtitle = trimws(paste0("Down and Distance: ", toString(example.play.info$DownDesc), "\n", "\n",
paste(strwrap(paste("Play Description:", toString(example.play.info$playDescription))), collapse="\n")))) +
transition_reveal(frameId) +
ease_aes('linear')
animate.epas =
ggplot() +
scale_size_manual(values = c(6, 6), guide = FALSE) +
scale_shape_manual(values = c(21, 21), guide = FALSE) +
scale_fill_manual(values = c("grey", "red"), guide = FALSE) +
scale_colour_manual(values = c("grey", "red"), guide = FALSE) +
scale_alpha_manual(values = c(.5, 1), guide = FALSE) +
geom_line(data = example.epa_tracking_line_plot %>% dplyr::select(-frameId_new), aes(x = time_after_snap, y = epa_pass_attempt,
group = targetNflId, colour = targeted, alpha = targeted)) +
geom_point(data = example.epa_tracking_point_plot, aes(x = time_after_snap, y = epa_pass_attempt,
fill = targeted, group = targetNflId, size = targeted, colour = targeted)) +
geom_text(data = example.epa_tracking_point_plot, aes(x = time_after_snap, y = epa_pass_attempt, label = jerseyNumber), colour = "white",
vjust = 0.36, size = 3.5) +
geom_hline(yintercept=0, color = "black") +
ylim(min(example.epa_tracking_point_plot$epa_pass_attempt) - .25,
max(example.epa_tracking_point_plot$epa_pass_attempt) + .25) +
labs(x = "Time after Snap (s)",
y = "Expected Points Added of Pass Attempt",
title = "Expected Points Added of Target over Time",
subtitle = paste0(
"\n", "EPA Pass Attempt = ", gsub("-", "\u2013", toString(replace_na(round(example.play.info$epa_pass_attempt, 2), "N/A"))),
"\n", "EPA Pass Arrived = ", gsub("-", "\u2013", toString(replace_na(round(example.play.info$epa_pass_arrived, 2), "N/A"))),
"\n", "EPA Pass Caught = ", gsub("-", "\u2013", toString(replace_na(round(example.play.info$epa_throw, 2), "N/A"))),
"\n", "EPA (nflWAR) = ", gsub("-", "\u2013", toString(round(example.play.info$my_epa, 2))),
"\n")) +
theme_minimal() +
transition_time(frameId) +
ease_aes('linear')
## Ensure timing of play matches 10 frames-per-second
play.length.ex <- length(unique(example.epa_tracking_line_plot$frameId))
b_gif <- animate(animate.epas, fps = 5, nframe = play.length.ex,
height = 400, width = 400)
a_gif <- animate(animate.play, fps = 5, nframe = play.length.ex,
height = 400, width = 400)
a_mgif <- image_read(a_gif)
b_mgif <- image_read(b_gif)
new_gif <- image_append(c(a_mgif[1], b_mgif[1]))
for(i in 2:play.length.ex){
combined <- image_append(c(a_mgif[i], b_mgif[i]))
new_gif <- c(new_gif, combined)
}
new_gif
final_gif = image_append(c(image_crop(tracking_good_nfl_gif[1], "800x400+200"), new_gif[1]))
for(i in 2:play.length.ex){
if(i <= (play.length.ex - length(tracking_good_nfl_gif)) + 1){
combined <- image_append(c(image_crop(tracking_good_nfl_gif[1], "800x400+200"), new_gif[i]))
}else{
combined <- image_append(c(image_crop(tracking_good_nfl_gif[length(tracking_good_nfl_gif) - (play.length.ex - i)], "800x400+200"), new_gif[i]))
}
final_gif <- c(final_gif, combined)
}
final_gif
library(gifski)
anim_save("~/Desktop/CoverageNet/src/08_writeup/images/TrackingGoodEx.gif",
final_gif,
fps = 5,
nframe = play.length.ex,
height = 500,
width = 1000,
res = 120)
tracking_good_nfl_gif[1]
image_crop(tracking_good_nfl_gif[1], "800x400+200")
# BAD
pbp_data = read.csv("~/Desktop/CoverageNet/src/00_data_wrangle/outputs/week11.csv")
pbp_data_clean = pbp_data %>%
inner_join(pass_arrived_epa_data %>%
distinct(gameId, playId))
tracking_bad_nfl_gif = image_scale(image_read(path = "~/Desktop/CoverageNet/src/08_writeup/NFL_videos/tracking_bad.gif"),"x400")
tracking_bad_nfl_gif
# 2018123000/2528 Jarvis TD week 17
# 2018123000/2239 cool pass to Higgins where the play takes a long time
example.play = pbp_data %>%
inner_join(
pbp_data %>%
dplyr::select(gameId, playId) %>%
filter(gameId == 2018111800,
playId == 2568) %>%
distinct()
# sample_n(1)
)
example.epa_tracking_point_plot = epa_tracking_total %>%
inner_join(example.play %>%
rename(targetNflId = nflId)) %>%
mutate(time_after_snap = (frameId - 11)*.1)
example.epa_tracking_point_plot_min_vals = example.epa_tracking_point_plot %>%
group_by(gameId, playId, targetNflId) %>%
filter(frameId == min(frameId)) %>%
inner_join(example.play %>%
distinct(gameId, playId, frameId) %>%
rename(frameId2 = frameId)) %>%
filter(frameId2 < frameId) %>%
dplyr::select(-frameId) %>%
rename(frameId = frameId2) %>%
dplyr::select(names(example.epa_tracking_point_plot))
example.epa_tracking_point_plot_max_vals = example.epa_tracking_point_plot %>%
group_by(gameId, playId, targetNflId) %>%
filter(frameId == max(frameId)) %>%
inner_join(example.play %>%
distinct(gameId, playId, frameId) %>%
rename(frameId2 = frameId)) %>%
filter(frameId2 > frameId) %>%
dplyr::select(-frameId) %>%
rename(frameId = frameId2) %>%
dplyr::select(names(example.epa_tracking_point_plot))
example.epa_tracking_point_plot = rbind.data.frame(example.epa_tracking_point_plot,
example.epa_tracking_point_plot_min_vals,
example.epa_tracking_point_plot_max_vals) %>%
arrange(gameId, playId, frameId, targetNflId) %>%
mutate(targeted = if_else(targetNflId == 2535698, 1, 0))
example.epa_tracking_point_plot$targeted[is.na(example.epa_tracking_point_plot$targeted)] = 0
example.epa_tracking_point_plot = example.epa_tracking_point_plot %>%
mutate(targeted = as.factor(targeted))
example.epa_tracking_line_plot = example.epa_tracking_point_plot %>%
rename(frameId_new = frameId) %>%
full_join(example.epa_tracking_point_plot %>%
dplyr::select(gameId, playId, targetNflId, frameId)) %>%
filter(frameId_new <= frameId) %>%
dplyr::select(gameId, playId, frameId, targetNflId, time_after_snap, everything()) %>%
arrange(gameId, playId, frameId, targetNflId) %>%
ungroup() %>%
rowwise() %>%
mutate(epa_pass_attempt = epa_pass_attempt + rnorm(1)/10000)
example.play.info = plays %>%
inner_join(example.play %>%
dplyr::select(gameId, playId) %>%
distinct()) %>%
left_join(targeted_receiver) %>%
left_join(my_epa) %>%
left_join(pass_attempt_epa_data %>%
dplyr::select(gameId, playId, epa_pass_attempt)) %>%
left_join(pass_arrived_epa_data %>%
dplyr::select(gameId, playId, epa_pass_arrived)) %>%
left_join(pass_caught_epa_data %>%
dplyr::select(gameId, playId, epa_throw, epa_yac, epa_yaint)) %>%
mutate(DownDesc = case_when(down == 1 ~ paste("1st and",
yardsToGo),
down == 2 ~ paste("2nd and",
yardsToGo),
down == 3 ~ paste("3rd and",
yardsToGo),
TRUE ~ paste("4th and",
yardsToGo)))
game.info = games %>%
inner_join(example.play %>%
dplyr::select(gameId, playId) %>%
distinct())
## General field boundaries
xmin <- 0
xmax <- 160/3
hash.right <- 38.35
hash.left <- 12
hash.width <- 3.3
## Specific boundaries for a given play
ymin <- max(round(min(example.play$x, na.rm = TRUE), -1), 0) + 5
ymax <- min(round(max(example.play$x, na.rm = TRUE) + 15, -1), 120)
df.hash <- expand.grid(x = c(0, 23.36667, 29.96667, xmax), y = (10:110))
df.hash <- df.hash %>% filter(!(floor(y %% 5) == 0))
df.hash <- df.hash %>% filter(y < ymax, y > ymin)
yardline = (example.play %>% distinct(YardsFromOwnGoal))$YardsFromOwnGoal
firstDownYardLine = yardline + example.play.info$yardsToGo
animate.play =
ggplot() +
scale_size_manual(values = c(6, 4, 6), guide = FALSE) +
scale_shape_manual(values = c(21, 16, 21), guide = FALSE) +
scale_fill_manual(values = c("grey", "#654321", "#002244"), guide = FALSE) +
scale_colour_manual(values = c("black", "#654321", "#c60c30"), guide = FALSE) +
annotate("polygon", x = c(xmin, xmin, xmax, xmax),
y = c(ymin, yardline + 10, yardline + 10, ymin), colour = "black",
fill = "limegreen",
alpha = .5
) +
annotate("polygon", x = c(xmin, xmin, xmax, xmax),
y = c(yardline + 10, firstDownYardLine + 10, firstDownYardLine + 10, yardline + 10), colour = "black",
fill = "limegreen",
alpha = .85
) +
annotate("polygon", x = c(xmin, xmin, xmax, xmax),
y = c(firstDownYardLine + 10, ymax, ymax, firstDownYardLine + 10), colour = "black",
fill = "limegreen",
alpha = .5
) +
annotate("text", x = df.hash$x[df.hash$x < 55/2],
y = df.hash$y[df.hash$x < 55/2], label = "_", hjust = 0, vjust = -0.2) +
annotate("text", x = df.hash$x[df.hash$x > 55/2],
y = df.hash$y[df.hash$x > 55/2], label = "_", hjust = 1, vjust = -0.2) +
annotate("segment", x = xmin,
y = seq(max(10, ymin), min(ymax, 110), by = 5),
xend = xmax,
yend = seq(max(10, ymin), min(ymax, 110), by = 5)) +
annotate("text", x = rep(hash.left, 11), y = seq(10, 110, by = 10),
label = c("G ", seq(10, 50, by = 10), rev(seq(10, 40, by = 10)), " G"),
angle = 270, size = 4) +
annotate("text", x = rep((xmax - hash.left), 11), y = seq(10, 110, by = 10),
label = c(" G", seq(10, 50, by = 10), rev(seq(10, 40, by = 10)), "G "),
angle = 90, size = 4) +
geom_segment(aes(x = 0, xend = xmax,
y = yardline + 10, yend = yardline + 10),
color = "blue",
size = 1,
alpha = .7) +
geom_segment(aes(x = 0, xend = xmax,
y = firstDownYardLine + 10, yend = firstDownYardLine + 10),
color = "yellow",
size = 1) +
geom_point(data = example.play, aes(x = (xmax-y), y = x + 10, shape = team,
fill = team, group = nflId, size = team, colour = team), alpha = 0.9) +
geom_point(data = example.play %>%
filter(nflId == 2535698), aes(x = (xmax-y), y = x + 10),
size = 6, color = "black", shape = 21, fill = "#e31837", alpha = 0.9) +
geom_point(data = example.play %>%
filter(is.na(nflId)), aes(x = (xmax-y), y = x + 10),
fill = "brown", color = "#654321", shape = 16, size = 4,
alpha = 1) +
geom_text(data = example.play, aes(x = (xmax-y), y = x + 10, label = jerseyNumber, group = nflId), colour = "white",
vjust = 0.36, size = 3.5) +
geom_line(data = example.play %>%
filter(IsOnOffense,
!is.na(nflId),
position != "QB"),
aes(x = (xmax-y), y = x + 10, group = nflId),
alpha = 0.5, size = .5) +
ylim(ymin, ymax) +
coord_fixed() +
theme(axis.line=element_blank(),
axis.text.x=element_blank(),
axis.text.y=element_blank(),
axis.ticks=element_blank(),
axis.title.x=element_blank(),
axis.title.y=element_blank(),
legend.position="none",
panel.background=element_blank(),
panel.border=element_blank(),
panel.grid.major=element_blank(),
panel.grid.minor=element_blank(),
plot.background=element_blank()) +
labs(title = paste0(game.info$visitorTeamAbbr, " @ ", game.info$homeTeamAbbr,
" (", game.info$gameDate, ")"),
subtitle = trimws(paste0("Down and Distance: ", toString(example.play.info$DownDesc), "\n", "\n",
paste(strwrap(paste("Play Description:", toString(example.play.info$playDescription))), collapse="\n")))) +
transition_reveal(frameId) +
ease_aes('linear')
animate.epas =
ggplot() +
scale_size_manual(values = c(6, 6), guide = FALSE) +
scale_shape_manual(values = c(21, 21), guide = FALSE) +
scale_fill_manual(values = c("grey", "red"), guide = FALSE) +
scale_colour_manual(values = c("grey", "red"), guide = FALSE) +
scale_alpha_manual(values = c(.5, 1), guide = FALSE) +
geom_line(data = example.epa_tracking_line_plot %>% dplyr::select(-frameId_new), aes(x = time_after_snap, y = epa_pass_attempt,
group = targetNflId, colour = targeted, alpha = targeted)) +
geom_point(data = example.epa_tracking_point_plot, aes(x = time_after_snap, y = epa_pass_attempt,
fill = targeted, group = targetNflId, size = targeted, colour = targeted)) +
geom_text(data = example.epa_tracking_point_plot, aes(x = time_after_snap, y = epa_pass_attempt, label = jerseyNumber), colour = "white",
vjust = 0.36, size = 3.5) +
geom_hline(yintercept=0, color = "black") +
ylim(min(example.epa_tracking_point_plot$epa_pass_attempt) - .25,
max(example.epa_tracking_point_plot$epa_pass_attempt) + .25) +
labs(x = "Time after Snap (s)",
y = "Expected Points Added of Pass Attempt",
title = "Expected Points Added of Target over Time",
subtitle = paste0(
"\n", "EPA Pass Attempt = ", gsub("-", "\u2013", toString(replace_na(round(example.play.info$epa_pass_attempt, 2), "N/A"))),
"\n", "EPA Pass Arrived = ", gsub("-", "\u2013", toString(replace_na(round(example.play.info$epa_pass_arrived, 2), "N/A"))),
"\n", "EPA Pass Caught = ", gsub("-", "\u2013", toString(replace_na(round(example.play.info$epa_throw, 2), "N/A"))),
"\n", "EPA (nflWAR) = ", gsub("-", "\u2013", toString(round(example.play.info$my_epa, 2))),
"\n")) +
theme_minimal() +
transition_time(frameId) +
ease_aes('linear')
## Ensure timing of play matches 10 frames-per-second
play.length.ex <- length(unique(example.epa_tracking_line_plot$frameId))
b_gif <- animate(animate.epas, fps = 5, nframe = play.length.ex,
height = 400, width = 400)
a_gif <- animate(animate.play, fps = 5, nframe = play.length.ex,
height = 400, width = 400)
a_mgif <- image_read(a_gif)
b_mgif <- image_read(b_gif)
new_gif <- image_append(c(a_mgif[1], b_mgif[1]))
for(i in 2:play.length.ex){
combined <- image_append(c(a_mgif[i], b_mgif[i]))
new_gif <- c(new_gif, combined)
}
new_gif
final_gif = image_append(c(image_crop(tracking_bad_nfl_gif[1], "500x400+100"), new_gif[1]))
for(i in 2:play.length.ex){
if(i <= (play.length.ex - length(tracking_bad_nfl_gif)) + 1){
combined <- image_append(c(image_crop(tracking_bad_nfl_gif[1], "500x400+100"), new_gif[i]))
}else{
combined <- image_append(c(image_crop(tracking_bad_nfl_gif[length(tracking_bad_nfl_gif) - (play.length.ex - i)], "500x400+100"), new_gif[i]))
}
final_gif <- c(final_gif, combined)
}
final_gif
image_crop(tracking_bad_nfl_gif[1], "500x400+100")
library(gifski)
anim_save("~/Desktop/CoverageNet/src/08_writeup/images/TrackingBadEx.gif",
final_gif,
fps = 5,
nframe = play.length.ex,
height = 500,
width = 1000,
res = 120)
tracking_good_nfl_gif[1]
image_crop(tracking_good_nfl_gif[1], "800x400+200")
# Closing Examples --------------------------------------------------------
pbp_data = read.csv("~/Desktop/CoverageNet/src/00_data_wrangle/outputs/week13.csv")
play3 = pbp_data %>%
filter(gameId == 2018120205,
playId == 1415)
play3_desc = plays %>%
inner_join(play3 %>%
distinct(gameId, playId)) %>%
dplyr::select(-epa) %>%
mutate(DownDesc = case_when(down == 1 ~ paste("1st and",
yardsToGo),
down == 2 ~ paste("2nd and",
yardsToGo),
down == 3 ~ paste("3rd and",
yardsToGo),
TRUE ~ paste("4th and",
yardsToGo))) %>%
inner_join(pass_attempt_epa_data) %>%
inner_join(pass_arrived_epa_data) %>%
inner_join(pass_caught_epa_data)
play3 %>% distinct(event)
play3_clipped = rbind(play3 %>%
filter(event %in% c('pass_forward',
'pass_outcome_interception',
'out_of_bounds')),
play3 %>%
inner_join(pass_arrived_frames) %>%
mutate(event = "pass_arrived")) %>%
arrange(gameId, playId, frameId, nflId) %>%
mutate(x_end = s*cos((90-dir)*pi/180) + x,
y_end = s*sin((90-dir)*pi/180) + y) %>%
mutate(event = as.factor(event)) %>%
mutate(event = factor(event, levels = c("pass_forward",
"pass_arrived",
"pass_outcome_interception",
"out_of_bounds")))
game.info = games %>%
inner_join(play3_clipped %>%
dplyr::select(gameId, playId) %>%
distinct())
## General field boundaries
xmin <- 0
xmax <- 160/3
hash.right <- 38.35
hash.left <- 12
hash.width <- 3.3
## Specific boundaries for a given play
ymin <- max(round(min(play3_clipped$x, na.rm = TRUE), -1), 0) + 5
ymax <- min(round(max(play3_clipped$x, na.rm = TRUE) + 20, -1), 120)
df.hash <- expand.grid(x = c(0, 23.36667, 29.96667, xmax), y = (10:110))
df.hash <- df.hash %>% filter(!(floor(y %% 5) == 0))
df.hash <- df.hash %>% filter(y < ymax, y > ymin)
yardline = (play3_clipped %>% distinct(YardsFromOwnGoal))$YardsFromOwnGoal
firstDownYardLine = yardline + play3_desc$yardsToGo
pass_forward3 =
ggplot() +
scale_size_manual(values = c(4, 2.5, 4), guide = FALSE) +
scale_shape_manual(values = c(21, 16, 21), guide = FALSE) +
scale_fill_manual(values = c("grey", "#654321", "#002244"), guide = FALSE) +
scale_colour_manual(values = c("black", "#654321", "#c60c30"), guide = FALSE) +
annotate("polygon", x = c(xmin, xmin, xmax, xmax),
y = c(ymin, yardline + 10, yardline + 10, ymin), colour = "black",
fill = "limegreen",
alpha = .5
) +
annotate("polygon", x = c(xmin, xmin, xmax, xmax),
y = c(yardline + 10, firstDownYardLine + 10, firstDownYardLine + 10, yardline + 10), colour = "black",
fill = "limegreen",
alpha = .85
) +
annotate("polygon", x = c(xmin, xmin, xmax, xmax),
y = c(firstDownYardLine + 10, ymax, ymax, firstDownYardLine + 10), colour = "black",
fill = "limegreen",
alpha = .5
) +
annotate("text", x = df.hash$x[df.hash$x < 55/2],
y = df.hash$y[df.hash$x < 55/2], label = "_", hjust = 0, vjust = -0.2) +
annotate("text", x = df.hash$x[df.hash$x > 55/2],
y = df.hash$y[df.hash$x > 55/2], label = "_", hjust = 1, vjust = -0.2) +
annotate("segment", x = xmin,
y = seq(max(10, ymin), min(ymax, 110), by = 5),
xend = xmax,
yend = seq(max(10, ymin), min(ymax, 110), by = 5)) +
annotate("text", x = rep(hash.left, 11), y = seq(10, 110, by = 10),
label = c("G ", seq(10, 50, by = 10), rev(seq(10, 40, by = 10)), " G"),
angle = 270, size = 4) +
annotate("text", x = rep((xmax - hash.left), 11), y = seq(10, 110, by = 10),
label = c(" G", seq(10, 50, by = 10), rev(seq(10, 40, by = 10)), "G "),
angle = 90, size = 4) +
annotate("segment", x = 0, xend = xmax,
y = yardline + 10, yend = yardline + 10,
color = "blue",
size = 1,
alpha = .7) +
annotate("segment", x = 0, xend = xmax,
y = firstDownYardLine + 10, yend = firstDownYardLine + 10,
color = "yellow",
size = 1,
alpha = .7) +
geom_segment(data = play3_clipped %>%
filter(event == "pass_forward") %>%
filter(!is.na(nflId)),
aes(x = (xmax - y), y = x + 10, xend = (xmax - y_end),
yend = x_end + 10, group = nflId),
color = "black",
arrow = arrow(length = unit(.25,"cm"))) +
geom_point(data = play3_clipped %>%
filter(event == "pass_forward"), aes(x = (xmax-y), y = x + 10, shape = team,
fill = team, group = nflId, size = team, colour = team), alpha = 0.9) +
geom_point(data = play3_clipped %>%
inner_join(targeted_receiver %>% rename(nflId = targetNflId)) %>%
filter(event == "pass_forward"), aes(x = (xmax-y), y = x + 10),
size = 4, color = "black", shape = 21, fill = "#e31837", alpha = 0.9) +
geom_text(data = play3_clipped %>%
filter(event == "pass_forward") %>%
filter(!is.na(nflId)), aes(x = (xmax-y), y = x + 10, label = jerseyNumber, group = nflId), colour = "white",
vjust = 0.36, size = 2) +
geom_point(data = play3_clipped %>%
filter(event == "pass_forward") %>%
filter(is.na(nflId)), aes(x = (xmax-y), y = x + 10),
fill = "brown", color = "#654321", shape = 16, size = 2.5,
alpha = 1) +
ylim(ymin, ymax) +
coord_fixed() +
theme(axis.line=element_blank(),
axis.text.x=element_blank(),
axis.text.y=element_blank(),
axis.ticks=element_blank(),
axis.title.x=element_blank(),
axis.title.y=element_blank(),
legend.position="none",
panel.background=element_blank(),
panel.border=element_blank(),
panel.grid.major=element_blank(),
panel.grid.minor=element_blank(),
plot.background=element_blank(),
plot.title = element_text(size=10),
plot.subtitle = element_text(size=8)) +
facet_wrap(~event) +
labs(title = paste0("EPA Prediction = ", gsub("-", "\u2013", toString(replace_na(round(play3_desc$epa_pass_attempt, 3), "N/A")))),
subtitle = paste0("C% = ", gsub("-", "\u2013", toString(replace_na(scales::percent(round(play3_desc$c_prob_pass_attempt, 4), .1), "N/A"))),
", INT% = ", gsub("-", "\u2013", toString(replace_na(scales::percent(round(play3_desc$in_prob_pass_attempt, 4), .1), "N/A")))
))
pass_arrived3 =
ggplot() +
scale_size_manual(values = c(4, 2.5, 4), guide = FALSE) +
scale_shape_manual(values = c(21, 16, 21), guide = FALSE) +
scale_fill_manual(values = c("grey", "#654321", "#002244"), guide = FALSE) +
scale_colour_manual(values = c("black", "#654321", "#c60c30"), guide = FALSE) +
annotate("polygon", x = c(xmin, xmin, xmax, xmax),
y = c(ymin, yardline + 10, yardline + 10, ymin), colour = "black",
fill = "limegreen",
alpha = .5
) +
annotate("polygon", x = c(xmin, xmin, xmax, xmax),
y = c(yardline + 10, firstDownYardLine + 10, firstDownYardLine + 10, yardline + 10), colour = "black",
fill = "limegreen",
alpha = .85
) +
annotate("polygon", x = c(xmin, xmin, xmax, xmax),
y = c(firstDownYardLine + 10, ymax, ymax, firstDownYardLine + 10), colour = "black",
fill = "limegreen",
alpha = .5
) +
annotate("text", x = df.hash$x[df.hash$x < 55/2],
y = df.hash$y[df.hash$x < 55/2], label = "_", hjust = 0, vjust = -0.2) +
annotate("text", x = df.hash$x[df.hash$x > 55/2],
y = df.hash$y[df.hash$x > 55/2], label = "_", hjust = 1, vjust = -0.2) +
annotate("segment", x = xmin,
y = seq(max(10, ymin), min(ymax, 110), by = 5),
xend = xmax,
yend = seq(max(10, ymin), min(ymax, 110), by = 5)) +
annotate("text", x = rep(hash.left, 11), y = seq(10, 110, by = 10),
label = c("G ", seq(10, 50, by = 10), rev(seq(10, 40, by = 10)), " G"),
angle = 270, size = 4) +
annotate("text", x = rep((xmax - hash.left), 11), y = seq(10, 110, by = 10),
label = c(" G", seq(10, 50, by = 10), rev(seq(10, 40, by = 10)), "G "),
angle = 90, size = 4) +
annotate("segment", x = 0, xend = xmax,
y = yardline + 10, yend = yardline + 10,
color = "blue",
size = 1,
alpha = .7) +
annotate("segment", x = 0, xend = xmax,
y = firstDownYardLine + 10, yend = firstDownYardLine + 10,
color = "yellow",
size = 1,
alpha = .7) +
geom_segment(data = play3_clipped %>%
filter(event == "pass_arrived") %>%
filter(!is.na(nflId)),
aes(x = (xmax - y), y = x + 10, xend = (xmax - y_end),
yend = x_end + 10, group = nflId),
color = "black",
arrow = arrow(length = unit(.25,"cm"))) +
geom_point(data = play3_clipped %>%
filter(event == "pass_arrived"), aes(x = (xmax-y), y = x + 10, shape = team,
fill = team, group = nflId, size = team, colour = team), alpha = 0.9) +
geom_point(data = play3_clipped %>%
inner_join(targeted_receiver %>% rename(nflId = targetNflId)) %>%
filter(event == "pass_arrived"), aes(x = (xmax-y), y = x + 10),
size = 4, color = "black", shape = 21, fill = "#e31837", alpha = 0.9) +
geom_text(data = play3_clipped %>%
filter(event == "pass_arrived") %>%
filter(!is.na(nflId)), aes(x = (xmax-y), y = x + 10, label = jerseyNumber, group = nflId), colour = "white",
vjust = 0.36, size = 2) +
geom_point(data = play3_clipped %>%
filter(event == "pass_arrived") %>%
filter(is.na(nflId)), aes(x = (xmax-y), y = x + 10),
fill = "brown", color = "#654321", shape = 16, size = 2.5,
alpha = 1) +
ylim(ymin, ymax) +
coord_fixed() +
theme(axis.line=element_blank(),
axis.text.x=element_blank(),
axis.text.y=element_blank(),
axis.ticks=element_blank(),
axis.title.x=element_blank(),
axis.title.y=element_blank(),
legend.position="none",
panel.background=element_blank(),
panel.border=element_blank(),
panel.grid.major=element_blank(),
panel.grid.minor=element_blank(),
plot.background=element_blank(),
plot.title = element_text(size=10),
plot.subtitle = element_text(size=8)) +
facet_wrap(~event) +
labs(title = paste0("EPA Prediction = ", gsub("-", "\u2013", toString(replace_na(round(play3_desc$epa_pass_arrived, 3), "N/A")))),
subtitle = paste0("C% = ", gsub("-", "\u2013", toString(replace_na(scales::percent(round(play3_desc$c_prob_pass_arrived, 4), accuracy = 0.1), "N/A"))),
", INT% = ", gsub("-", "\u2013", toString(replace_na(scales::percent(round(play3_desc$in_prob_pass_arrived, 4), accuracy = 0.1), "N/A")))
))
gridExtra::grid.arrange(pass_forward3,
pass_arrived3,
ncol = 2,
top = paste0(game.info$visitorTeamAbbr, " @ ", game.info$homeTeamAbbr,
" (", game.info$gameDate, ")",
"\n","Down and Distance: ", toString(play3_desc$DownDesc), "\n",
paste(strwrap(paste("Play Description:", toString(play3_desc$playDescription))), collapse="\n")))
library(grid)
g3 <- gridExtra::arrangeGrob(pass_forward3,
pass_arrived3,
ncol = 2,
top = textGrob(paste0(game.info$visitorTeamAbbr, " @ ", game.info$homeTeamAbbr,
" (", game.info$gameDate, ")",
"\n","Down and Distance: ", toString(play3_desc$DownDesc), "\n",
paste(strwrap(paste("Play Description:", toString(play3_desc$playDescription))), collapse="\n")),
gp=gpar(fontsize=8,font=8)))
ggsave(plot = g3,
filename = "~/Desktop/CoverageNet/src/08_writeup/intermediates/closing_good_intermediate.png",
height = 4,
width = 6)
closing_good_nfl_gif = image_scale(image_read(path = "~/Desktop/CoverageNet/src/08_writeup/NFL_videos/closing_good.gif"),"x400")
closing_good_nfl_gif
closing_good_image = image_scale(image_read("~/Desktop/CoverageNet/src/08_writeup/intermediates/closing_good_intermediate.png"),"x400")
closing_good_image
final_gif = image_append(c(image_crop(closing_good_nfl_gif[1], "450x400"), closing_good_image))
for(i in 2:length(closing_good_nfl_gif)){
combined <- image_append(c(image_crop(closing_good_nfl_gif[i], "450x400"), closing_good_image))
final_gif <- c(final_gif, combined)
}
final_gif
library(gifski)
anim_save("~/Desktop/CoverageNet/src/08_writeup/images/ClosingGoodEx.gif",
final_gif,
fps = 10,
nframe = length(closing_good_nfl_gif),
height = 500,
width = 1000,
res = 120)
# closing bad
pbp_data = read.csv("~/Desktop/CoverageNet/src/00_data_wrangle/outputs/week2.csv")
play3 = pbp_data %>%
filter(gameId == 2018091602,
playId == 141)
play3_desc = plays %>%
inner_join(play3 %>%
distinct(gameId, playId)) %>%
dplyr::select(-epa) %>%
mutate(DownDesc = case_when(down == 1 ~ paste("1st and",
yardsToGo),
down == 2 ~ paste("2nd and",
yardsToGo),
down == 3 ~ paste("3rd and",
yardsToGo),
TRUE ~ paste("4th and",
yardsToGo))) %>%
inner_join(pass_attempt_epa_data) %>%
inner_join(pass_arrived_epa_data) %>%
inner_join(pass_caught_epa_data)
play3 %>% distinct(event)
play3_clipped = rbind(play3 %>%
filter(event %in% c('pass_forward',
'pass_outcome_interception',
'out_of_bounds')),
play3 %>%
inner_join(pass_arrived_frames) %>%
mutate(event = "pass_arrived")) %>%
arrange(gameId, playId, frameId, nflId) %>%
mutate(x_end = s*cos((90-dir)*pi/180) + x,
y_end = s*sin((90-dir)*pi/180) + y) %>%
mutate(event = as.factor(event)) %>%
mutate(event = factor(event, levels = c("pass_forward",
"pass_arrived",
"pass_outcome_interception",
"out_of_bounds")))
game.info = games %>%
inner_join(play3_clipped %>%
dplyr::select(gameId, playId) %>%
distinct())
## General field boundaries
xmin <- 0
xmax <- 160/3
hash.right <- 38.35
hash.left <- 12
hash.width <- 3.3
## Specific boundaries for a given play
ymin <- max(round(min(play3_clipped$x, na.rm = TRUE), -1), 0) + 5
ymax <- min(round(max(play3_clipped$x, na.rm = TRUE) + 20, -1), 120)
df.hash <- expand.grid(x = c(0, 23.36667, 29.96667, xmax), y = (10:110))
df.hash <- df.hash %>% filter(!(floor(y %% 5) == 0))
df.hash <- df.hash %>% filter(y < ymax, y > ymin)
yardline = (play3_clipped %>% distinct(YardsFromOwnGoal))$YardsFromOwnGoal
firstDownYardLine = yardline + play3_desc$yardsToGo
pass_forward3 =
ggplot() +
scale_size_manual(values = c(4, 2.5, 4), guide = FALSE) +
scale_shape_manual(values = c(21, 16, 21), guide = FALSE) +
scale_fill_manual(values = c("#002244", "#654321", "grey"), guide = FALSE) +
scale_colour_manual(values = c("#c60c30", "#654321", "black"), guide = FALSE) +
annotate("polygon", x = c(xmin, xmin, xmax, xmax),
y = c(ymin, yardline + 10, yardline + 10, ymin), colour = "black",
fill = "limegreen",
alpha = .5
) +
annotate("polygon", x = c(xmin, xmin, xmax, xmax),
y = c(yardline + 10, firstDownYardLine + 10, firstDownYardLine + 10, yardline + 10), colour = "black",
fill = "limegreen",
alpha = .85
) +
annotate("polygon", x = c(xmin, xmin, xmax, xmax),
y = c(firstDownYardLine + 10, ymax, ymax, firstDownYardLine + 10), colour = "black",
fill = "limegreen",
alpha = .5
) +
annotate("text", x = df.hash$x[df.hash$x < 55/2],
y = df.hash$y[df.hash$x < 55/2], label = "_", hjust = 0, vjust = -0.2) +
annotate("text", x = df.hash$x[df.hash$x > 55/2],
y = df.hash$y[df.hash$x > 55/2], label = "_", hjust = 1, vjust = -0.2) +
annotate("segment", x = xmin,
y = seq(max(10, ymin), min(ymax, 110), by = 5),
xend = xmax,
yend = seq(max(10, ymin), min(ymax, 110), by = 5)) +
annotate("text", x = rep(hash.left, 11), y = seq(10, 110, by = 10),
label = c("G ", seq(10, 50, by = 10), rev(seq(10, 40, by = 10)), " G"),
angle = 270, size = 4) +
annotate("text", x = rep((xmax - hash.left), 11), y = seq(10, 110, by = 10),
label = c(" G", seq(10, 50, by = 10), rev(seq(10, 40, by = 10)), "G "),
angle = 90, size = 4) +
annotate("segment", x = 0, xend = xmax,
y = yardline + 10, yend = yardline + 10,
color = "blue",
size = 1,
alpha = .7) +
annotate("segment", x = 0, xend = xmax,
y = firstDownYardLine + 10, yend = firstDownYardLine + 10,
color = "yellow",
size = 1,
alpha = .7) +
geom_segment(data = play3_clipped %>%
filter(event == "pass_forward") %>%
filter(!is.na(nflId)),
aes(x = (xmax - y), y = x + 10, xend = (xmax - y_end),
yend = x_end + 10, group = nflId),
color = "black",
arrow = arrow(length = unit(.25,"cm"))) +
geom_point(data = play3_clipped %>%
filter(event == "pass_forward"), aes(x = (xmax-y), y = x + 10, shape = team,
fill = team, group = nflId, size = team, colour = team), alpha = 0.9) +
geom_point(data = play3_clipped %>%
inner_join(targeted_receiver %>% rename(nflId = targetNflId)) %>%
filter(event == "pass_forward"), aes(x = (xmax-y), y = x + 10),
size = 4, color = "black", shape = 21, fill = "#e31837", alpha = 0.9) +
geom_text(data = play3_clipped %>%
filter(event == "pass_forward") %>%
filter(!is.na(nflId)), aes(x = (xmax-y), y = x + 10, label = jerseyNumber, group = nflId), colour = "white",
vjust = 0.36, size = 2) +
geom_point(data = play3_clipped %>%
filter(event == "pass_forward") %>%
filter(is.na(nflId)), aes(x = (xmax-y), y = x + 10),
fill = "brown", color = "#654321", shape = 16, size = 2.5,
alpha = 1) +
ylim(ymin, ymax) +
coord_fixed() +
theme(axis.line=element_blank(),
axis.text.x=element_blank(),
axis.text.y=element_blank(),
axis.ticks=element_blank(),
axis.title.x=element_blank(),
axis.title.y=element_blank(),
legend.position="none",
panel.background=element_blank(),
panel.border=element_blank(),
panel.grid.major=element_blank(),
panel.grid.minor=element_blank(),
plot.background=element_blank(),
plot.title = element_text(size=10),
plot.subtitle = element_text(size=8)) +
facet_wrap(~event) +
labs(title = paste0("EPA Prediction = ", gsub("-", "\u2013", toString(replace_na(round(play3_desc$epa_pass_attempt, 3), "N/A")))),
subtitle = paste0("C% = ", gsub("-", "\u2013", toString(replace_na(scales::percent(round(play3_desc$c_prob_pass_attempt, 4), .1), "N/A"))),
", INT% = ", gsub("-", "\u2013", toString(replace_na(scales::percent(round(play3_desc$in_prob_pass_attempt, 4), .1), "N/A")))
))
pass_arrived3 =
ggplot() +
scale_size_manual(values = c(4, 2.5, 4), guide = FALSE) +
scale_shape_manual(values = c(21, 16, 21), guide = FALSE) +
scale_fill_manual(values = c("#002244", "#654321", "grey"), guide = FALSE) +
scale_colour_manual(values = c("#c60c30", "#654321", "black"), guide = FALSE) +
annotate("polygon", x = c(xmin, xmin, xmax, xmax),
y = c(ymin, yardline + 10, yardline + 10, ymin), colour = "black",
fill = "limegreen",
alpha = .5
) +
annotate("polygon", x = c(xmin, xmin, xmax, xmax),
y = c(yardline + 10, firstDownYardLine + 10, firstDownYardLine + 10, yardline + 10), colour = "black",
fill = "limegreen",
alpha = .85
) +
annotate("polygon", x = c(xmin, xmin, xmax, xmax),
y = c(firstDownYardLine + 10, ymax, ymax, firstDownYardLine + 10), colour = "black",
fill = "limegreen",
alpha = .5
) +
annotate("text", x = df.hash$x[df.hash$x < 55/2],
y = df.hash$y[df.hash$x < 55/2], label = "_", hjust = 0, vjust = -0.2) +
annotate("text", x = df.hash$x[df.hash$x > 55/2],
y = df.hash$y[df.hash$x > 55/2], label = "_", hjust = 1, vjust = -0.2) +
annotate("segment", x = xmin,
y = seq(max(10, ymin), min(ymax, 110), by = 5),
xend = xmax,
yend = seq(max(10, ymin), min(ymax, 110), by = 5)) +
annotate("text", x = rep(hash.left, 11), y = seq(10, 110, by = 10),
label = c("G ", seq(10, 50, by = 10), rev(seq(10, 40, by = 10)), " G"),
angle = 270, size = 4) +
annotate("text", x = rep((xmax - hash.left), 11), y = seq(10, 110, by = 10),
label = c(" G", seq(10, 50, by = 10), rev(seq(10, 40, by = 10)), "G "),
angle = 90, size = 4) +
annotate("segment", x = 0, xend = xmax,
y = yardline + 10, yend = yardline + 10,
color = "blue",
size = 1,
alpha = .7) +
annotate("segment", x = 0, xend = xmax,
y = firstDownYardLine + 10, yend = firstDownYardLine + 10,
color = "yellow",
size = 1,
alpha = .7) +
geom_segment(data = play3_clipped %>%
filter(event == "pass_arrived") %>%
filter(!is.na(nflId)),
aes(x = (xmax - y), y = x + 10, xend = (xmax - y_end),
yend = x_end + 10, group = nflId),
color = "black",
arrow = arrow(length = unit(.25,"cm"))) +
geom_point(data = play3_clipped %>%
filter(event == "pass_arrived"), aes(x = (xmax-y), y = x + 10, shape = team,
fill = team, group = nflId, size = team, colour = team), alpha = 0.9) +
geom_point(data = play3_clipped %>%
inner_join(targeted_receiver %>% rename(nflId = targetNflId)) %>%
filter(event == "pass_arrived"), aes(x = (xmax-y), y = x + 10),
size = 4, color = "black", shape = 21, fill = "#e31837", alpha = 0.9) +
geom_text(data = play3_clipped %>%
filter(event == "pass_arrived") %>%
filter(!is.na(nflId)), aes(x = (xmax-y), y = x + 10, label = jerseyNumber, group = nflId), colour = "white",
vjust = 0.36, size = 2) +
geom_point(data = play3_clipped %>%
filter(event == "pass_arrived") %>%
filter(is.na(nflId)), aes(x = (xmax-y), y = x + 10),
fill = "brown", color = "#654321", shape = 16, size = 2.5,
alpha = 1) +
ylim(ymin, ymax) +
coord_fixed() +
theme(axis.line=element_blank(),
axis.text.x=element_blank(),
axis.text.y=element_blank(),
axis.ticks=element_blank(),
axis.title.x=element_blank(),
axis.title.y=element_blank(),
legend.position="none",
panel.background=element_blank(),
panel.border=element_blank(),
panel.grid.major=element_blank(),
panel.grid.minor=element_blank(),
plot.background=element_blank(),
plot.title = element_text(size=10),
plot.subtitle = element_text(size=8)) +
facet_wrap(~event) +
labs(title = paste0("EPA Prediction = ", gsub("-", "\u2013", toString(replace_na(round(play3_desc$epa_pass_arrived, 3), "N/A")))),
subtitle = paste0("C% = ", gsub("-", "\u2013", toString(replace_na(scales::percent(round(play3_desc$c_prob_pass_arrived, 4), accuracy = 0.1), "N/A"))),
", INT% = ", gsub("-", "\u2013", toString(replace_na(scales::percent(round(play3_desc$in_prob_pass_arrived, 4), accuracy = 0.1), "N/A")))
))
gridExtra::grid.arrange(pass_forward3,
pass_arrived3,
ncol = 2,
top = paste0(game.info$visitorTeamAbbr, " @ ", game.info$homeTeamAbbr,
" (", game.info$gameDate, ")",
"\n","Down and Distance: ", toString(play3_desc$DownDesc), "\n",
paste(strwrap(paste("Play Description:", toString(play3_desc$playDescription))), collapse="\n")))
library(grid)
g3 <- gridExtra::arrangeGrob(pass_forward3,
pass_arrived3,
ncol = 2,
top = textGrob(paste0(game.info$visitorTeamAbbr, " @ ", game.info$homeTeamAbbr,
" (", game.info$gameDate, ")",
"\n","Down and Distance: ", toString(play3_desc$DownDesc), "\n",
paste(strwrap(paste("Play Description:", toString(play3_desc$playDescription))), collapse="\n")),
gp=gpar(fontsize=8,font=8)))
ggsave(plot = g3,
filename = "~/Desktop/CoverageNet/src/08_writeup/intermediates/closing_bad_intermediate.png",
height = 4,
width = 6)
closing_bad_nfl_gif = image_scale(image_read(path = "~/Desktop/CoverageNet/src/08_writeup/NFL_videos/closing_bad.gif"),"x400")
closing_bad_nfl_gif
closing_bad_image = image_scale(image_read("~/Desktop/CoverageNet/src/08_writeup/intermediates/closing_bad_intermediate.png"),"x400")
closing_bad_image
final_gif = image_append(c(image_crop(closing_bad_nfl_gif[1], "450x400"), closing_bad_image))
for(i in 2:length(closing_bad_nfl_gif)){
combined <- image_append(c(image_crop(closing_bad_nfl_gif[i], "450x400"), closing_bad_image))
final_gif <- c(final_gif, combined)
}
final_gif
anim_save("~/Desktop/CoverageNet/src/08_writeup/images/ClosingBadEx.gif",
final_gif,
fps = 10,
nframe = length(closing_bad_nfl_gif),
height = 500,
width = 1000,
res = 120)
# Ball Skills Examples ----------------------------------------------------
# GOOD
pbp_data = read.csv("~/Desktop/CoverageNet/src/00_data_wrangle/outputs/week5.csv")
play1 = pbp_data %>%
filter(gameId == 2018100702,
playId == 3173)
play1_desc = plays %>%
inner_join(play1 %>%
distinct(gameId, playId)) %>%
dplyr::select(-epa) %>%
mutate(DownDesc = case_when(down == 1 ~ paste("1st and",
yardsToGo),
down == 2 ~ paste("2nd and",
yardsToGo),
down == 3 ~ paste("3rd and",
yardsToGo),
TRUE ~ paste("4th and",
yardsToGo))) %>%
inner_join(pass_attempt_epa_data) %>%
inner_join(pass_arrived_epa_data) %>%
inner_join(pass_caught_epa_data)
play1 %>% distinct(event)
play1_clipped = rbind(play1 %>%
filter(event %in% c('pass_forward',
'pass_outcome_incomplete',
'tackle')),
play1 %>%
inner_join(pass_arrived_frames) %>%
mutate(event = "pass_arrived")) %>%
arrange(gameId, playId, frameId, nflId) %>%
mutate(x_end = .75*s*cos((90-dir)*pi/180) + x,
y_end = .75*s*sin((90-dir)*pi/180) + y) %>%
mutate(event = as.factor(event)) %>%
mutate(event = factor(event, levels = c("pass_forward",
"pass_arrived",
"pass_outcome_incomplete",
"tackle")))
game.info = games %>%
inner_join(play1_clipped %>%
dplyr::select(gameId, playId) %>%
distinct())
## General field boundaries
xmin <- 0
xmax <- 160/3
hash.right <- 38.35
hash.left <- 12
hash.width <- 3.3
## Specific boundaries for a given play
ymin <- max(round(min(play1_clipped$x, na.rm = TRUE), -1), 0) + 5
ymax <- min(round(max(play1_clipped$x, na.rm = TRUE) + 20, -1), 120)
df.hash <- expand.grid(x = c(0, 23.36667, 29.96667, xmax), y = (10:110))
df.hash <- df.hash %>% filter(!(floor(y %% 5) == 0))
df.hash <- df.hash %>% filter(y < ymax, y > ymin)
yardline = (play1_clipped %>% distinct(YardsFromOwnGoal))$YardsFromOwnGoal
firstDownYardLine = yardline + play1_desc$yardsToGo
pass_arrived1 =
ggplot() +
scale_size_manual(values = c(4, 2.5, 4), guide = FALSE) +
scale_shape_manual(values = c(21, 16, 21), guide = FALSE) +
scale_fill_manual(values = c("#002244", "#654321", "grey"), guide = FALSE) +
scale_colour_manual(values = c("#c60c30", "#654321", "black"), guide = FALSE) +
annotate("polygon", x = c(xmin, xmin, xmax, xmax),
y = c(ymin, yardline + 10, yardline + 10, ymin), colour = "black",
fill = "limegreen",
alpha = .5
) +
annotate("polygon", x = c(xmin, xmin, xmax, xmax),
y = c(yardline + 10, firstDownYardLine + 10, firstDownYardLine + 10, yardline + 10), colour = "black",
fill = "limegreen",
alpha = .85
) +
annotate("polygon", x = c(xmin, xmin, xmax, xmax),
y = c(firstDownYardLine + 10, ymax, ymax, firstDownYardLine + 10), colour = "black",
fill = "limegreen",
alpha = .5
) +
annotate("text", x = df.hash$x[df.hash$x < 55/2],
y = df.hash$y[df.hash$x < 55/2], label = "_", hjust = 0, vjust = -0.2) +
annotate("text", x = df.hash$x[df.hash$x > 55/2],
y = df.hash$y[df.hash$x > 55/2], label = "_", hjust = 1, vjust = -0.2) +
annotate("segment", x = xmin,
y = seq(max(10, ymin), min(ymax, 110), by = 5),
xend = xmax,
yend = seq(max(10, ymin), min(ymax, 110), by = 5)) +
annotate("text", x = rep(hash.left, 11), y = seq(10, 110, by = 10),
label = c("G ", seq(10, 50, by = 10), rev(seq(10, 40, by = 10)), " G"),
angle = 270, size = 4) +
annotate("text", x = rep((xmax - hash.left), 11), y = seq(10, 110, by = 10),
label = c(" G", seq(10, 50, by = 10), rev(seq(10, 40, by = 10)), "G "),
angle = 90, size = 4) +
annotate("segment", x = 0, xend = xmax,
y = yardline + 10, yend = yardline + 10,
color = "blue",
size = 1,
alpha = .7) +
annotate("segment", x = 0, xend = xmax,
y = firstDownYardLine + 10, yend = firstDownYardLine + 10,
color = "yellow",
size = 1,
alpha = .7) +
geom_segment(data = play1_clipped %>%
filter(event == "pass_arrived") %>%
filter(!is.na(nflId)),
aes(x = (xmax - y), y = x + 10, xend = (xmax - y_end),
yend = x_end + 10, group = nflId),
color = "black",
arrow = arrow(length = unit(.25,"cm"))) +
geom_point(data = play1_clipped %>%
filter(event == "pass_arrived"), aes(x = (xmax-y), y = x + 10, shape = team,
fill = team, group = nflId, size = team, colour = team), alpha = 0.9) +
geom_point(data = play1_clipped %>%
inner_join(targeted_receiver %>% rename(nflId = targetNflId)) %>%
filter(event == "pass_arrived"), aes(x = (xmax-y), y = x + 10),
size = 4, color = "black", shape = 21, fill = "#e31837", alpha = 0.9) +
geom_text(data = play1_clipped %>%
filter(event == "pass_arrived") %>%
filter(!is.na(nflId)), aes(x = (xmax-y), y = x + 10, label = jerseyNumber, group = nflId), colour = "white",
vjust = 0.36, size = 2) +
geom_point(data = play1_clipped %>%
filter(event == "pass_arrived") %>%
filter(is.na(nflId)), aes(x = (xmax-y), y = x + 10),
fill = "brown", color = "#654321", shape = 16, size = 2.5,
alpha = 1) +
ylim(ymin, ymax) +
coord_fixed() +
theme(axis.line=element_blank(),
axis.text.x=element_blank(),
axis.text.y=element_blank(),
axis.ticks=element_blank(),
axis.title.x=element_blank(),
axis.title.y=element_blank(),
legend.position="none",
panel.background=element_blank(),
panel.border=element_blank(),
panel.grid.major=element_blank(),
panel.grid.minor=element_blank(),
plot.background=element_blank(),
plot.title = element_text(size=10),
plot.subtitle = element_text(size=8)) +
facet_wrap(~event) +
labs(title = paste0("EPA Prediction = ", gsub("-", "\u2013", toString(replace_na(round(play1_desc$epa_pass_arrived, 3), "N/A")))),
subtitle = paste0("C% = ", gsub("-", "\u2013", toString(replace_na(scales::percent(round(play1_desc$c_prob_pass_arrived, 4), accuracy = 0.1), "N/A"))),
", INT% = ", gsub("-", "\u2013", toString(replace_na(scales::percent(round(play1_desc$in_prob_pass_arrived, 4), accuracy = 0.1), "N/A")))
))
pass_caught1 =
ggplot() +
scale_size_manual(values = c(4, 2.5, 4), guide = FALSE) +
scale_shape_manual(values = c(21, 16, 21), guide = FALSE) +
scale_fill_manual(values = c("#002244", "#654321", "grey"), guide = FALSE) +
scale_colour_manual(values = c("#c60c30", "#654321", "black"), guide = FALSE) +
annotate("polygon", x = c(xmin, xmin, xmax, xmax),
y = c(ymin, yardline + 10, yardline + 10, ymin), colour = "black",
fill = "limegreen",
alpha = .5
) +
annotate("polygon", x = c(xmin, xmin, xmax, xmax),
y = c(yardline + 10, firstDownYardLine + 10, firstDownYardLine + 10, yardline + 10), colour = "black",
fill = "limegreen",
alpha = .85
) +
annotate("polygon", x = c(xmin, xmin, xmax, xmax),
y = c(firstDownYardLine + 10, ymax, ymax, firstDownYardLine + 10), colour = "black",
fill = "limegreen",
alpha = .5
) +
annotate("text", x = df.hash$x[df.hash$x < 55/2],
y = df.hash$y[df.hash$x < 55/2], label = "_", hjust = 0, vjust = -0.2) +
annotate("text", x = df.hash$x[df.hash$x > 55/2],
y = df.hash$y[df.hash$x > 55/2], label = "_", hjust = 1, vjust = -0.2) +
annotate("segment", x = xmin,
y = seq(max(10, ymin), min(ymax, 110), by = 5),
xend = xmax,
yend = seq(max(10, ymin), min(ymax, 110), by = 5)) +
annotate("text", x = rep(hash.left, 11), y = seq(10, 110, by = 10),
label = c("G ", seq(10, 50, by = 10), rev(seq(10, 40, by = 10)), " G"),
angle = 270, size = 4) +
annotate("text", x = rep((xmax - hash.left), 11), y = seq(10, 110, by = 10),
label = c(" G", seq(10, 50, by = 10), rev(seq(10, 40, by = 10)), "G "),
angle = 90, size = 4) +
annotate("segment", x = 0, xend = xmax,
y = yardline + 10, yend = yardline + 10,
color = "blue",
size = 1,
alpha = .7) +
annotate("segment", x = 0, xend = xmax,
y = firstDownYardLine + 10, yend = firstDownYardLine + 10,
color = "yellow",
size = 1,
alpha = .7) +
geom_segment(data = play1_clipped %>%
filter(event == "pass_outcome_incomplete") %>%
filter(!is.na(nflId)),
aes(x = (xmax - y), y = x + 10, xend = (xmax - y_end),
yend = x_end + 10, group = nflId),
color = "black",
arrow = arrow(length = unit(.25,"cm"))) +
geom_point(data = play1_clipped %>%
filter(event == "pass_outcome_incomplete"), aes(x = (xmax-y), y = x + 10, shape = team,
fill = team, group = nflId, size = team, colour = team), alpha = 0.9) +
geom_point(data = play1_clipped %>%
inner_join(targeted_receiver %>% rename(nflId = targetNflId)) %>%
filter(event == "pass_outcome_incomplete"), aes(x = (xmax-y), y = x + 10),
size = 4, color = "black", shape = 21, fill = "#e31837", alpha = 0.9) +
geom_text(data = play1_clipped %>%
filter(event == "pass_outcome_incomplete") %>%
filter(!is.na(nflId)), aes(x = (xmax-y), y = x + 10, label = jerseyNumber, group = nflId), colour = "white",
vjust = 0.36, size = 2) +
geom_point(data = play1_clipped %>%
filter(event == "pass_outcome_incomplete") %>%
filter(is.na(nflId)), aes(x = (xmax-y), y = x + 10),
fill = "brown", color = "#654321", shape = 16, size = 2.5,
alpha = 1) +
ylim(ymin, ymax) +
coord_fixed() +
theme(axis.line=element_blank(),
axis.text.x=element_blank(),
axis.text.y=element_blank(),
axis.ticks=element_blank(),
axis.title.x=element_blank(),
axis.title.y=element_blank(),
legend.position="none",
panel.background=element_blank(),
panel.border=element_blank(),
panel.grid.major=element_blank(),
panel.grid.minor=element_blank(),
plot.background=element_blank(),
plot.title = element_text(size=10),
plot.subtitle = element_text(size=8)) +
facet_wrap(~event) +
labs(title = paste0("EPA Result = ", gsub("-", "\u2013", toString(replace_na(round(play1_desc$epa_throw, 3), "N/A")))),
subtitle = "")
gridExtra::grid.arrange(pass_arrived1,
pass_caught1,
ncol = 2,
top = paste0(game.info$visitorTeamAbbr, " @ ", game.info$homeTeamAbbr,
" (", game.info$gameDate, ")",
"\n","Down and Distance: ", toString(play1_desc$DownDesc), "\n",
paste(strwrap(paste("Play Description:", toString(play1_desc$playDescription))), collapse="\n")))
library(grid)
g1 <- gridExtra::arrangeGrob(pass_arrived1,
pass_caught1,
ncol = 2,
top = textGrob(paste0(game.info$visitorTeamAbbr, " @ ", game.info$homeTeamAbbr,
" (", game.info$gameDate, ")",
"\n","Down and Distance: ", toString(play1_desc$DownDesc), "\n",
paste(strwrap(paste("Play Description:", toString(play1_desc$playDescription))), collapse="\n")),
gp=gpar(fontsize=8,font=8)))
ggsave(plot = g1,
filename = "~/Desktop/CoverageNet/src/08_writeup/intermediates/ball_skills_good_intermediate.png",
height = 4,
width = 6)
ball_skills_good_nfl_gif = image_scale(image_read(path = "~/Desktop/CoverageNet/src/08_writeup/NFL_videos/ball_skills_good.gif"),"x400")
ball_skills_good_nfl_gif
ball_skills_good_image = image_scale(image_read("~/Desktop/CoverageNet/src/08_writeup/intermediates/ball_skills_good_intermediate.png"),"x400")
ball_skills_good_image
final_gif = image_append(c(image_crop(ball_skills_good_nfl_gif[1], "450x400"), ball_skills_good_image))
for(i in 2:length(ball_skills_good_nfl_gif)){
combined <- image_append(c(image_crop(ball_skills_good_nfl_gif[i], "450x400"), ball_skills_good_image))
final_gif <- c(final_gif, combined)
}
final_gif
anim_save("~/Desktop/CoverageNet/src/08_writeup/images/BallSkillsGoodEx.gif",
final_gif,
fps = 10,
nframe = length(ball_skills_good_nfl_gif),
height = 500,
width = 1000,
res = 120)
# BAD
pbp_data = read.csv("~/Desktop/CoverageNet/src/00_data_wrangle/outputs/week10.csv")
play1 = pbp_data %>%
filter(gameId == 2018110800,
playId == 1602)
play1_desc = plays %>%
inner_join(play1 %>%
distinct(gameId, playId)) %>%
dplyr::select(-epa) %>%
mutate(DownDesc = case_when(down == 1 ~ paste("1st and",
yardsToGo),
down == 2 ~ paste("2nd and",
yardsToGo),
down == 3 ~ paste("3rd and",
yardsToGo),
TRUE ~ paste("4th and",
yardsToGo))) %>%
inner_join(pass_attempt_epa_data) %>%
inner_join(pass_arrived_epa_data) %>%
inner_join(pass_caught_epa_data)
play1 %>% distinct(event)
play1_clipped = rbind(play1 %>%
filter(event %in% c('pass_forward',
'pass_outcome_caught',
'tackle')),
play1 %>%
inner_join(pass_arrived_frames) %>%
mutate(event = "pass_arrived")) %>%
arrange(gameId, playId, frameId, nflId) %>%
mutate(x_end = .75*s*cos((90-dir)*pi/180) + x,
y_end = .75*s*sin((90-dir)*pi/180) + y) %>%
mutate(event = as.factor(event)) %>%
mutate(event = factor(event, levels = c("pass_forward",
"pass_arrived",
"pass_outcome_caught",
"tackle")))
game.info = games %>%
inner_join(play1_clipped %>%
dplyr::select(gameId, playId) %>%
distinct())
## General field boundaries
xmin <- 0
xmax <- 160/3
hash.right <- 38.35
hash.left <- 12
hash.width <- 3.3
## Specific boundaries for a given play
ymin <- max(round(min(play1_clipped$x, na.rm = TRUE), -1), 0) + 5
ymax <- min(round(max(play1_clipped$x, na.rm = TRUE) + 20, -1), 120)
df.hash <- expand.grid(x = c(0, 23.36667, 29.96667, xmax), y = (10:110))
df.hash <- df.hash %>% filter(!(floor(y %% 5) == 0))
df.hash <- df.hash %>% filter(y < ymax, y > ymin)
yardline = (play1_clipped %>% distinct(YardsFromOwnGoal))$YardsFromOwnGoal
firstDownYardLine = yardline + play1_desc$yardsToGo
pass_arrived1 =
ggplot() +
scale_size_manual(values = c(4, 2.5, 4), guide = FALSE) +
scale_shape_manual(values = c(21, 16, 21), guide = FALSE) +
scale_fill_manual(values = c("#002244", "#654321", "grey"), guide = FALSE) +
scale_colour_manual(values = c("#c60c30", "#654321", "black"), guide = FALSE) +
annotate("polygon", x = c(xmin, xmin, xmax, xmax),
y = c(ymin, yardline + 10, yardline + 10, ymin), colour = "black",
fill = "limegreen",
alpha = .5
) +
annotate("polygon", x = c(xmin, xmin, xmax, xmax),
y = c(yardline + 10, firstDownYardLine + 10, firstDownYardLine + 10, yardline + 10), colour = "black",
fill = "limegreen",
alpha = .85
) +
annotate("polygon", x = c(xmin, xmin, xmax, xmax),
y = c(firstDownYardLine + 10, ymax, ymax, firstDownYardLine + 10), colour = "black",
fill = "limegreen",
alpha = .5
) +
annotate("text", x = df.hash$x[df.hash$x < 55/2],
y = df.hash$y[df.hash$x < 55/2], label = "_", hjust = 0, vjust = -0.2) +
annotate("text", x = df.hash$x[df.hash$x > 55/2],
y = df.hash$y[df.hash$x > 55/2], label = "_", hjust = 1, vjust = -0.2) +
annotate("segment", x = xmin,
y = seq(max(10, ymin), min(ymax, 110), by = 5),
xend = xmax,
yend = seq(max(10, ymin), min(ymax, 110), by = 5)) +
annotate("text", x = rep(hash.left, 11), y = seq(10, 110, by = 10),
label = c("G ", seq(10, 50, by = 10), rev(seq(10, 40, by = 10)), " G"),
angle = 270, size = 4) +
annotate("text", x = rep((xmax - hash.left), 11), y = seq(10, 110, by = 10),
label = c(" G", seq(10, 50, by = 10), rev(seq(10, 40, by = 10)), "G "),
angle = 90, size = 4) +
annotate("segment", x = 0, xend = xmax,
y = yardline + 10, yend = yardline + 10,
color = "blue",
size = 1,
alpha = .7) +
annotate("segment", x = 0, xend = xmax,
y = firstDownYardLine + 10, yend = firstDownYardLine + 10,
color = "yellow",
size = 1,
alpha = .7) +
geom_segment(data = play1_clipped %>%
filter(event == "pass_arrived") %>%
filter(!is.na(nflId)),
aes(x = (xmax - y), y = x + 10, xend = (xmax - y_end),
yend = x_end + 10, group = nflId),
color = "black",
arrow = arrow(length = unit(.25,"cm"))) +
geom_point(data = play1_clipped %>%
filter(event == "pass_arrived"), aes(x = (xmax-y), y = x + 10, shape = team,
fill = team, group = nflId, size = team, colour = team), alpha = 0.9) +
geom_point(data = play1_clipped %>%
inner_join(targeted_receiver %>% rename(nflId = targetNflId)) %>%
filter(event == "pass_arrived"), aes(x = (xmax-y), y = x + 10),
size = 4, color = "black", shape = 21, fill = "#e31837", alpha = 0.9) +
geom_text(data = play1_clipped %>%
filter(event == "pass_arrived") %>%
filter(!is.na(nflId)), aes(x = (xmax-y), y = x + 10, label = jerseyNumber, group = nflId), colour = "white",
vjust = 0.36, size = 2) +
geom_point(data = play1_clipped %>%
filter(event == "pass_arrived") %>%
filter(is.na(nflId)), aes(x = (xmax-y), y = x + 10),
fill = "brown", color = "#654321", shape = 16, size = 2.5,
alpha = 1) +
ylim(ymin, ymax) +
coord_fixed() +
theme(axis.line=element_blank(),
axis.text.x=element_blank(),
axis.text.y=element_blank(),
axis.ticks=element_blank(),
axis.title.x=element_blank(),
axis.title.y=element_blank(),
legend.position="none",
panel.background=element_blank(),
panel.border=element_blank(),
panel.grid.major=element_blank(),
panel.grid.minor=element_blank(),
plot.background=element_blank(),
plot.title = element_text(size=10),
plot.subtitle = element_text(size=8)) +
facet_wrap(~event) +
labs(title = paste0("EPA Prediction = ", gsub("-", "\u2013", toString(replace_na(round(play1_desc$epa_pass_arrived, 3), "N/A")))),
subtitle = paste0("C% = ", gsub("-", "\u2013", toString(replace_na(scales::percent(round(play1_desc$c_prob_pass_arrived, 4), accuracy = 0.1), "N/A"))),
", INT% = ", gsub("-", "\u2013", toString(replace_na(scales::percent(round(play1_desc$in_prob_pass_arrived, 4), accuracy = 0.1), "N/A")))
))
pass_caught1 =
ggplot() +
scale_size_manual(values = c(4, 2.5, 4), guide = FALSE) +
scale_shape_manual(values = c(21, 16, 21), guide = FALSE) +
scale_fill_manual(values = c("#002244", "#654321", "grey"), guide = FALSE) +
scale_colour_manual(values = c("#c60c30", "#654321", "black"), guide = FALSE) +
annotate("polygon", x = c(xmin, xmin, xmax, xmax),
y = c(ymin, yardline + 10, yardline + 10, ymin), colour = "black",
fill = "limegreen",
alpha = .5
) +
annotate("polygon", x = c(xmin, xmin, xmax, xmax),
y = c(yardline + 10, firstDownYardLine + 10, firstDownYardLine + 10, yardline + 10), colour = "black",
fill = "limegreen",
alpha = .85
) +
annotate("polygon", x = c(xmin, xmin, xmax, xmax),
y = c(firstDownYardLine + 10, ymax, ymax, firstDownYardLine + 10), colour = "black",
fill = "limegreen",
alpha = .5
) +
annotate("text", x = df.hash$x[df.hash$x < 55/2],
y = df.hash$y[df.hash$x < 55/2], label = "_", hjust = 0, vjust = -0.2) +
annotate("text", x = df.hash$x[df.hash$x > 55/2],
y = df.hash$y[df.hash$x > 55/2], label = "_", hjust = 1, vjust = -0.2) +
annotate("segment", x = xmin,
y = seq(max(10, ymin), min(ymax, 110), by = 5),
xend = xmax,
yend = seq(max(10, ymin), min(ymax, 110), by = 5)) +
annotate("text", x = rep(hash.left, 11), y = seq(10, 110, by = 10),
label = c("G ", seq(10, 50, by = 10), rev(seq(10, 40, by = 10)), " G"),
angle = 270, size = 4) +
annotate("text", x = rep((xmax - hash.left), 11), y = seq(10, 110, by = 10),
label = c(" G", seq(10, 50, by = 10), rev(seq(10, 40, by = 10)), "G "),
angle = 90, size = 4) +
annotate("segment", x = 0, xend = xmax,
y = yardline + 10, yend = yardline + 10,
color = "blue",
size = 1,
alpha = .7) +
annotate("segment", x = 0, xend = xmax,
y = firstDownYardLine + 10, yend = firstDownYardLine + 10,
color = "yellow",
size = 1,
alpha = .7) +
geom_segment(data = play1_clipped %>%
filter(event == "pass_outcome_caught") %>%
filter(!is.na(nflId)),
aes(x = (xmax - y), y = x + 10, xend = (xmax - y_end),
yend = x_end + 10, group = nflId),
color = "black",
arrow = arrow(length = unit(.25,"cm"))) +
geom_point(data = play1_clipped %>%
filter(event == "pass_outcome_caught"), aes(x = (xmax-y), y = x + 10, shape = team,
fill = team, group = nflId, size = team, colour = team), alpha = 0.9) +
geom_point(data = play1_clipped %>%
inner_join(targeted_receiver %>% rename(nflId = targetNflId)) %>%
filter(event == "pass_outcome_caught"), aes(x = (xmax-y), y = x + 10),
size = 4, color = "black", shape = 21, fill = "#e31837", alpha = 0.9) +
geom_text(data = play1_clipped %>%
filter(event == "pass_outcome_caught") %>%
filter(!is.na(nflId)), aes(x = (xmax-y), y = x + 10, label = jerseyNumber, group = nflId), colour = "white",
vjust = 0.36, size = 2) +
geom_point(data = play1_clipped %>%
filter(event == "pass_outcome_caught") %>%
filter(is.na(nflId)), aes(x = (xmax-y), y = x + 10),
fill = "brown", color = "#654321", shape = 16, size = 2.5,
alpha = 1) +
ylim(ymin, ymax) +
coord_fixed() +
theme(axis.line=element_blank(),
axis.text.x=element_blank(),
axis.text.y=element_blank(),
axis.ticks=element_blank(),
axis.title.x=element_blank(),
axis.title.y=element_blank(),
legend.position="none",
panel.background=element_blank(),
panel.border=element_blank(),
panel.grid.major=element_blank(),
panel.grid.minor=element_blank(),
plot.background=element_blank(),
plot.title = element_text(size=10),
plot.subtitle = element_text(size=8)) +
facet_wrap(~event) +
labs(title = paste0("EPA Prediction = ", gsub("-", "\u2013", toString(replace_na(round(play1_desc$epa_throw, 3), "N/A")))),
subtitle = paste0("Expected YAC = ", gsub("-", "\u2013", toString(replace_na(round(play1_desc$eyac, 1), "N/A"))))
)
gridExtra::grid.arrange(pass_arrived1,
pass_caught1,
ncol = 2,
top = paste0(game.info$visitorTeamAbbr, " @ ", game.info$homeTeamAbbr,
" (", game.info$gameDate, ")",
"\n","Down and Distance: ", toString(play1_desc$DownDesc), "\n",
paste(strwrap(paste("Play Description:", toString(play1_desc$playDescription))), collapse="\n")))
library(grid)
g1 <- gridExtra::arrangeGrob(pass_arrived1,
pass_caught1,
ncol = 2,
top = textGrob(paste0(game.info$visitorTeamAbbr, " @ ", game.info$homeTeamAbbr,
" (", game.info$gameDate, ")",
"\n","Down and Distance: ", toString(play1_desc$DownDesc), "\n",
paste(strwrap(paste("Play Description:", toString(play1_desc$playDescription))), collapse="\n")),
gp=gpar(fontsize=8,font=8)))
ggsave(plot = g1,
filename = "~/Desktop/CoverageNet/src/08_writeup/intermediates/ball_skills_bad_intermediate.png",
height = 4,
width = 6)
ball_skills_bad_nfl_gif = image_scale(image_read(path = "~/Desktop/CoverageNet/src/08_writeup/NFL_videos/ball_skills_bad.gif"),"x400")
ball_skills_bad_nfl_gif
ball_skills_bad_image = image_scale(image_read("~/Desktop/CoverageNet/src/08_writeup/intermediates/ball_skills_bad_intermediate.png"),"x400")
ball_skills_bad_image
final_gif = image_append(c(image_crop(ball_skills_bad_nfl_gif[1], "450x400"), ball_skills_bad_image))
for(i in 2:length(ball_skills_bad_nfl_gif)){
combined <- image_append(c(image_crop(ball_skills_bad_nfl_gif[i], "450x400"), ball_skills_bad_image))
final_gif <- c(final_gif, combined)
}
final_gif
anim_save("~/Desktop/CoverageNet/src/08_writeup/images/BallSkillsBadEx.gif",
final_gif,
fps = 10,
nframe = length(ball_skills_bad_nfl_gif),
height = 500,
width = 1000,
res = 120)
# Tackling Examples -------------------------------------------------------
pbp_data = read.csv("~/Desktop/CoverageNet/src/00_data_wrangle/outputs/week10.csv")
play2 = pbp_data %>%
filter(gameId == 2018111104,
playId == 481)
play2_desc = plays %>%
inner_join(play2 %>%
distinct(gameId, playId)) %>%
dplyr::select(-epa) %>%
mutate(DownDesc = case_when(down == 1 ~ paste("1st and",
yardsToGo),
down == 2 ~ paste("2nd and",
yardsToGo),
down == 3 ~ paste("3rd and",
yardsToGo),
TRUE ~ paste("4th and",
yardsToGo))) %>%
inner_join(pass_attempt_epa_data) %>%
inner_join(pass_arrived_epa_data) %>%
inner_join(pass_caught_epa_data)
play2 %>% distinct(event)
play2_clipped = rbind(play2 %>%
filter(event %in% c('pass_forward',
'pass_outcome_caught',
'tackle')),
play2 %>%
inner_join(pass_arrived_frames) %>%
mutate(event = "pass_arrived")) %>%
arrange(gameId, playId, frameId, nflId) %>%
mutate(x_end = s*cos((90-dir)*pi/180) + x,
y_end = s*sin((90-dir)*pi/180) + y) %>%
mutate(event = as.factor(event)) %>%
mutate(event = factor(event, levels = c("pass_forward",
"pass_arrived",
"pass_outcome_caught",
"tackle")))
game.info = games %>%
inner_join(play2_clipped %>%
dplyr::select(gameId, playId) %>%
distinct())
## General field boundaries
xmin <- 0
xmax <- 160/3
hash.right <- 38.35
hash.left <- 12
hash.width <- 3.3
## Specific boundaries for a given play
ymin <- max(round(min(play2_clipped$x, na.rm = TRUE), -1), 0) + 5
ymax <- min(round(max(play2_clipped$x, na.rm = TRUE) + 20, -1), 120)
df.hash <- expand.grid(x = c(0, 23.36667, 29.96667, xmax), y = (10:110))
df.hash <- df.hash %>% filter(!(floor(y %% 5) == 0))
df.hash <- df.hash %>% filter(y < ymax, y > ymin)
yardline = (play2_clipped %>% distinct(YardsFromOwnGoal))$YardsFromOwnGoal
firstDownYardLine = yardline + play2_desc$yardsToGo
pass_caught2 =
ggplot() +
scale_size_manual(values = c(4, 2.5, 4), guide = FALSE) +
scale_shape_manual(values = c(21, 16, 21), guide = FALSE) +
scale_fill_manual(values = c("#002244", "#654321", "grey"), guide = FALSE) +
scale_colour_manual(values = c("#c60c30", "#654321", "black"), guide = FALSE) +
annotate("polygon", x = c(xmin, xmin, xmax, xmax),
y = c(ymin, yardline + 10, yardline + 10, ymin), colour = "black",
fill = "limegreen",
alpha = .5
) +
annotate("polygon", x = c(xmin, xmin, xmax, xmax),
y = c(yardline + 10, firstDownYardLine + 10, firstDownYardLine + 10, yardline + 10), colour = "black",
fill = "limegreen",
alpha = .85
) +
annotate("polygon", x = c(xmin, xmin, xmax, xmax),
y = c(firstDownYardLine + 10, ymax, ymax, firstDownYardLine + 10), colour = "black",
fill = "limegreen",
alpha = .5
) +
annotate("text", x = df.hash$x[df.hash$x < 55/2],
y = df.hash$y[df.hash$x < 55/2], label = "_", hjust = 0, vjust = -0.2) +
annotate("text", x = df.hash$x[df.hash$x > 55/2],
y = df.hash$y[df.hash$x > 55/2], label = "_", hjust = 1, vjust = -0.2) +
annotate("segment", x = xmin,
y = seq(max(10, ymin), min(ymax, 110), by = 5),
xend = xmax,
yend = seq(max(10, ymin), min(ymax, 110), by = 5)) +
annotate("text", x = rep(hash.left, 11), y = seq(10, 110, by = 10),
label = c("G ", seq(10, 50, by = 10), rev(seq(10, 40, by = 10)), " G"),
angle = 270, size = 4) +
annotate("text", x = rep((xmax - hash.left), 11), y = seq(10, 110, by = 10),
label = c(" G", seq(10, 50, by = 10), rev(seq(10, 40, by = 10)), "G "),
angle = 90, size = 4) +
annotate("segment", x = 0, xend = xmax,
y = yardline + 10, yend = yardline + 10,
color = "blue",
size = 1,
alpha = .7) +
annotate("segment", x = 0, xend = xmax,
y = firstDownYardLine + 10, yend = firstDownYardLine + 10,
color = "yellow",
size = 1,
alpha = .7) +
geom_segment(data = play2_clipped %>%
filter(event == "pass_outcome_caught") %>%
filter(!is.na(nflId)),
aes(x = (xmax - y), y = x + 10, xend = (xmax - y_end),
yend = x_end + 10, group = nflId),
color = "black",
arrow = arrow(length = unit(.25,"cm"))) +
geom_point(data = play2_clipped %>%
filter(event == "pass_outcome_caught"), aes(x = (xmax-y), y = x + 10, shape = team,
fill = team, group = nflId, size = team, colour = team), alpha = 0.9) +
geom_point(data = play2_clipped %>%
inner_join(targeted_receiver %>% rename(nflId = targetNflId)) %>%
filter(event == "pass_outcome_caught"), aes(x = (xmax-y), y = x + 10),
size = 4, color = "black", shape = 21, fill = "#e31837", alpha = 0.9) +
geom_text(data = play2_clipped %>%
filter(event == "pass_outcome_caught") %>%
filter(!is.na(nflId)), aes(x = (xmax-y), y = x + 10, label = jerseyNumber, group = nflId), colour = "white",
vjust = 0.36, size = 2) +
geom_point(data = play2_clipped %>%
filter(event == "pass_outcome_caught") %>%
filter(is.na(nflId)), aes(x = (xmax-y), y = x + 10),
fill = "brown", color = "#654321", shape = 16, size = 2.5,
alpha = 1) +
ylim(ymin, ymax) +
coord_fixed() +
theme(axis.line=element_blank(),
axis.text.x=element_blank(),
axis.text.y=element_blank(),
axis.ticks=element_blank(),
axis.title.x=element_blank(),
axis.title.y=element_blank(),
legend.position="none",
panel.background=element_blank(),
panel.border=element_blank(),
panel.grid.major=element_blank(),
panel.grid.minor=element_blank(),
plot.background=element_blank(),
plot.title = element_text(size=10),
plot.subtitle = element_text(size=8)) +
facet_wrap(~event) +
labs(title = paste0("EPA Prediction = ", gsub("-", "\u2013", toString(replace_na(round(play2_desc$epa_throw, 3), "N/A")))),
subtitle = paste0("Expected YAC = ", gsub("-", "\u2013", toString(replace_na(round(play2_desc$eyac, 3), "N/A"))))
)
pass_tackle2 =
ggplot() +
scale_size_manual(values = c(4, 2.5, 4), guide = FALSE) +
scale_shape_manual(values = c(21, 16, 21), guide = FALSE) +
scale_fill_manual(values = c("#002244", "#654321", "grey"), guide = FALSE) +
scale_colour_manual(values = c("#c60c30", "#654321", "black"), guide = FALSE) +
annotate("polygon", x = c(xmin, xmin, xmax, xmax),
y = c(ymin, yardline + 10, yardline + 10, ymin), colour = "black",
fill = "limegreen",
alpha = .5
) +
annotate("polygon", x = c(xmin, xmin, xmax, xmax),
y = c(yardline + 10, firstDownYardLine + 10, firstDownYardLine + 10, yardline + 10), colour = "black",
fill = "limegreen",
alpha = .85
) +
annotate("polygon", x = c(xmin, xmin, xmax, xmax),
y = c(firstDownYardLine + 10, ymax, ymax, firstDownYardLine + 10), colour = "black",
fill = "limegreen",
alpha = .5
) +
annotate("text", x = df.hash$x[df.hash$x < 55/2],
y = df.hash$y[df.hash$x < 55/2], label = "_", hjust = 0, vjust = -0.2) +
annotate("text", x = df.hash$x[df.hash$x > 55/2],
y = df.hash$y[df.hash$x > 55/2], label = "_", hjust = 1, vjust = -0.2) +
annotate("segment", x = xmin,
y = seq(max(10, ymin), min(ymax, 110), by = 5),
xend = xmax,
yend = seq(max(10, ymin), min(ymax, 110), by = 5)) +
annotate("text", x = rep(hash.left, 11), y = seq(10, 110, by = 10),
label = c("G ", seq(10, 50, by = 10), rev(seq(10, 40, by = 10)), " G"),
angle = 270, size = 4) +
annotate("text", x = rep((xmax - hash.left), 11), y = seq(10, 110, by = 10),
label = c(" G", seq(10, 50, by = 10), rev(seq(10, 40, by = 10)), "G "),
angle = 90, size = 4) +
annotate("segment", x = 0, xend = xmax,
y = yardline + 10, yend = yardline + 10,
color = "blue",
size = 1,
alpha = .7) +
annotate("segment", x = 0, xend = xmax,
y = firstDownYardLine + 10, yend = firstDownYardLine + 10,
color = "yellow",
size = 1,
alpha = .7) +
# geom_segment(data = play2_clipped %>%
# filter(event == "tackle") %>%
# filter(!is.na(nflId)),
# aes(x = (xmax - y), y = x + 10, xend = (xmax - y_end),
# yend = x_end + 10, group = nflId),
# color = "black",
# arrow = arrow(length = unit(.25,"cm"))) +
geom_point(data = play2_clipped %>%
filter(event == "tackle"), aes(x = (xmax-y), y = x + 10, shape = team,
fill = team, group = nflId, size = team, colour = team), alpha = 0.9) +
geom_point(data = play2_clipped %>%
inner_join(targeted_receiver %>% rename(nflId = targetNflId)) %>%
filter(event == "tackle"), aes(x = (xmax-y), y = x + 10),
size = 4, color = "black", shape = 21, fill = "#e31837", alpha = 0.9) +
geom_text(data = play2_clipped %>%
filter(event == "tackle") %>%
filter(!is.na(nflId)), aes(x = (xmax-y), y = x + 10, label = jerseyNumber, group = nflId), colour = "white",
vjust = 0.36, size = 2) +
geom_point(data = play2_clipped %>%
filter(event == "tackle") %>%
filter(is.na(nflId)), aes(x = (xmax-y), y = x + 10),
fill = "brown", color = "#654321", shape = 16, size = 2.5,
alpha = 1) +
ylim(ymin, ymax) +
coord_fixed() +
theme(axis.line=element_blank(),
axis.text.x=element_blank(),
axis.text.y=element_blank(),
axis.ticks=element_blank(),
axis.title.x=element_blank(),
axis.title.y=element_blank(),
legend.position="none",
panel.background=element_blank(),
panel.border=element_blank(),
panel.grid.major=element_blank(),
panel.grid.minor=element_blank(),
plot.background=element_blank(),
plot.title = element_text(size=10),
plot.subtitle = element_text(size=8)) +
facet_wrap(~event) +
labs(title = paste0("EPA (nflWAR) = ", gsub("-", "\u2013", toString(replace_na(round(play2_desc$epa, 3), "N/A")))),
subtitle = ""
)
gridExtra::grid.arrange(pass_caught2,
pass_tackle2,
ncol = 2,
top = textGrob(paste0(game.info$visitorTeamAbbr, " @ ", game.info$homeTeamAbbr,
" (", game.info$gameDate, ")",
"\n","Down and Distance: ", toString(play2_desc$DownDesc), "\n",
paste(strwrap(paste("Play Description:", toString(play2_desc$playDescription))), collapse="\n")),
gp=gpar(fontsize=8,font=8)))
library(grid)
g1 <- gridExtra::arrangeGrob(pass_caught2,
pass_tackle2,
ncol = 2,
top = textGrob(paste0(game.info$visitorTeamAbbr, " @ ", game.info$homeTeamAbbr,
" (", game.info$gameDate, ")",
"\n","Down and Distance: ", toString(play2_desc$DownDesc), "\n",
paste(strwrap(paste("Play Description:", toString(play2_desc$playDescription))), collapse="\n")),
gp=gpar(fontsize=8,font=8)))
ggsave(plot = g1,
filename = "~/Desktop/CoverageNet/src/08_writeup/intermediates/tackle_good_intermediate.png",
height = 4,
width = 6)
tackle_good_nfl_gif = image_scale(image_read(path = "~/Desktop/CoverageNet/src/08_writeup/NFL_videos/tackling_good.gif"),"x400")
tackle_good_nfl_gif
tackle_good_image = image_scale(image_read("~/Desktop/CoverageNet/src/08_writeup/intermediates/tackle_good_intermediate.png"),"x400")
tackle_good_image
final_gif = image_append(c(image_crop(tackle_good_nfl_gif[1], "450x400"), tackle_good_image))
for(i in 2:length(tackle_good_nfl_gif)){
combined <- image_append(c(image_crop(tackle_good_nfl_gif[i], "450x400"), tackle_good_image))
final_gif <- c(final_gif, combined)
}
final_gif
anim_save("~/Desktop/CoverageNet/src/08_writeup/images/TacklingGoodEx.gif",
final_gif,
fps = 10,
nframe = length(tackle_good_nfl_gif),
height = 500,
width = 1000,
res = 120)
# Ball Hawk Examples ------------------------------------------------------
pbp_data = read.csv("~/Desktop/CoverageNet/src/00_data_wrangle/outputs/week1.csv")
play2 = pbp_data %>%
filter(gameId == 2018090901,
playId == 704)
play2_desc = plays %>%
inner_join(play2 %>%
distinct(gameId, playId)) %>%
dplyr::select(-epa) %>%
mutate(DownDesc = case_when(down == 1 ~ paste("1st and",
yardsToGo),
down == 2 ~ paste("2nd and",
yardsToGo),
down == 3 ~ paste("3rd and",
yardsToGo),
TRUE ~ paste("4th and",
yardsToGo))) %>%
left_join(pass_attempt_epa_data) %>%
left_join(pass_arrived_epa_data) %>%
left_join(pass_caught_epa_data)
play2 %>% distinct(event)
play2_clipped = rbind(play2 %>%
filter(event %in% c('pass_forward',
'pass_outcome_interception',
'tackle')),
play2 %>%
inner_join(pass_arrived_frames) %>%
mutate(event = "pass_arrived")) %>%
arrange(gameId, playId, frameId, nflId) %>%
mutate(x_end = s*cos((90-dir)*pi/180) + x,
y_end = s*sin((90-dir)*pi/180) + y) %>%
mutate(event = as.factor(event)) %>%
mutate(event = factor(event, levels = c("pass_forward",
"pass_arrived",
"pass_outcome_interception",
"tackle")))
game.info = games %>%
inner_join(play2_clipped %>%
dplyr::select(gameId, playId) %>%
distinct())
## General field boundaries
xmin <- 0
xmax <- 160/3
hash.right <- 38.35
hash.left <- 12
hash.width <- 3.3
## Specific boundaries for a given play
ymin <- max(round(min(play2_clipped$x, na.rm = TRUE), -1), 0) + 5
ymax <- min(round(max(play2_clipped$x, na.rm = TRUE) + 20, -1), 120)
df.hash <- expand.grid(x = c(0, 23.36667, 29.96667, xmax), y = (10:110))
df.hash <- df.hash %>% filter(!(floor(y %% 5) == 0))
df.hash <- df.hash %>% filter(y < ymax, y > ymin)
yardline = (play2_clipped %>% distinct(YardsFromOwnGoal))$YardsFromOwnGoal
firstDownYardLine = yardline + play2_desc$yardsToGo
pass_forward2 =
ggplot() +
scale_size_manual(values = c(4, 2.5, 4), guide = FALSE) +
scale_shape_manual(values = c(21, 16, 21), guide = FALSE) +
scale_fill_manual(values = c("grey", "#654321", "#002244"), guide = FALSE) +
scale_colour_manual(values = c("black", "#654321", "#c60c30"), guide = FALSE) +
annotate("polygon", x = c(xmin, xmin, xmax, xmax),
y = c(ymin, yardline + 10, yardline + 10, ymin), colour = "black",
fill = "limegreen",
alpha = .5
) +
annotate("polygon", x = c(xmin, xmin, xmax, xmax),
y = c(yardline + 10, firstDownYardLine + 10, firstDownYardLine + 10, yardline + 10), colour = "black",
fill = "limegreen",
alpha = .85
) +
annotate("polygon", x = c(xmin, xmin, xmax, xmax),
y = c(firstDownYardLine + 10, ymax, ymax, firstDownYardLine + 10), colour = "black",
fill = "limegreen",
alpha = .5
) +
annotate("text", x = df.hash$x[df.hash$x < 55/2],
y = df.hash$y[df.hash$x < 55/2], label = "_", hjust = 0, vjust = -0.2) +
annotate("text", x = df.hash$x[df.hash$x > 55/2],
y = df.hash$y[df.hash$x > 55/2], label = "_", hjust = 1, vjust = -0.2) +
annotate("segment", x = xmin,
y = seq(max(10, ymin), min(ymax, 110), by = 5),
xend = xmax,
yend = seq(max(10, ymin), min(ymax, 110), by = 5)) +
annotate("text", x = rep(hash.left, 11), y = seq(10, 110, by = 10),
label = c("G ", seq(10, 50, by = 10), rev(seq(10, 40, by = 10)), " G"),
angle = 270, size = 4) +
annotate("text", x = rep((xmax - hash.left), 11), y = seq(10, 110, by = 10),
label = c(" G", seq(10, 50, by = 10), rev(seq(10, 40, by = 10)), "G "),
angle = 90, size = 4) +
annotate("segment", x = 0, xend = xmax,
y = yardline + 10, yend = yardline + 10,
color = "blue",
size = 1,
alpha = .7) +
annotate("segment", x = 0, xend = xmax,
y = firstDownYardLine + 10, yend = firstDownYardLine + 10,
color = "yellow",
size = 1,
alpha = .7) +
geom_segment(data = play2_clipped %>%
filter(event == "pass_forward") %>%
filter(!is.na(nflId)),
aes(x = (xmax - y), y = x + 10, xend = (xmax - y_end),
yend = x_end + 10, group = nflId),
color = "black",
arrow = arrow(length = unit(.25,"cm"))) +
geom_point(data = play2_clipped %>%
filter(event == "pass_forward"), aes(x = (xmax-y), y = x + 10, shape = team,
fill = team, group = nflId, size = team, colour = team), alpha = 0.9) +
geom_point(data = play2_clipped %>%
inner_join(targeted_receiver %>% rename(nflId = targetNflId)) %>%
filter(event == "pass_forward"), aes(x = (xmax-y), y = x + 10),
size = 4, color = "black", shape = 21, fill = "#e31837", alpha = 0.9) +
geom_text(data = play2_clipped %>%
filter(event == "pass_forward") %>%
filter(!is.na(nflId)), aes(x = (xmax-y), y = x + 10, label = jerseyNumber, group = nflId), colour = "white",
vjust = 0.36, size = 2) +
geom_point(data = play2_clipped %>%
filter(event == "pass_forward") %>%
filter(is.na(nflId)), aes(x = (xmax-y), y = x + 10),
fill = "brown", color = "#654321", shape = 16, size = 2.5,
alpha = 1) +
ylim(ymin, ymax) +
coord_fixed() +
theme(axis.line=element_blank(),
axis.text.x=element_blank(),
axis.text.y=element_blank(),
axis.ticks=element_blank(),
axis.title.x=element_blank(),
axis.title.y=element_blank(),
legend.position="none",
panel.background=element_blank(),
panel.border=element_blank(),
panel.grid.major=element_blank(),
panel.grid.minor=element_blank(),
plot.background=element_blank(),
plot.title = element_text(size=10),
plot.subtitle = element_text(size=8)) +
facet_wrap(~event) +
labs(title = paste0("EPA Prediction = ", gsub("-", "\u2013", toString(replace_na(round(play2_desc$epa_pass_attempt, 3), "N/A")))),
subtitle = paste0("C% = ", gsub("-", "\u2013", toString(replace_na(scales::percent(round(play2_desc$c_prob_pass_attempt, 4), .1), "N/A"))),
", INT% = ", gsub("-", "\u2013", toString(replace_na(scales::percent(round(play2_desc$in_prob_pass_attempt, 4), .1), "N/A")))
))
pass_caught2 =
ggplot() +
scale_size_manual(values = c(4, 2.5, 4), guide = FALSE) +
scale_shape_manual(values = c(21, 16, 21), guide = FALSE) +
scale_fill_manual(values = c("grey", "#654321", "#002244"), guide = FALSE) +
scale_colour_manual(values = c("black", "#654321", "#c60c30"), guide = FALSE) +
annotate("polygon", x = c(xmin, xmin, xmax, xmax),
y = c(ymin, yardline + 10, yardline + 10, ymin), colour = "black",
fill = "limegreen",
alpha = .5
) +
annotate("polygon", x = c(xmin, xmin, xmax, xmax),
y = c(yardline + 10, firstDownYardLine + 10, firstDownYardLine + 10, yardline + 10), colour = "black",
fill = "limegreen",
alpha = .85
) +
annotate("polygon", x = c(xmin, xmin, xmax, xmax),
y = c(firstDownYardLine + 10, ymax, ymax, firstDownYardLine + 10), colour = "black",
fill = "limegreen",
alpha = .5
) +
annotate("text", x = df.hash$x[df.hash$x < 55/2],
y = df.hash$y[df.hash$x < 55/2], label = "_", hjust = 0, vjust = -0.2) +
annotate("text", x = df.hash$x[df.hash$x > 55/2],
y = df.hash$y[df.hash$x > 55/2], label = "_", hjust = 1, vjust = -0.2) +
annotate("segment", x = xmin,
y = seq(max(10, ymin), min(ymax, 110), by = 5),
xend = xmax,
yend = seq(max(10, ymin), min(ymax, 110), by = 5)) +
annotate("text", x = rep(hash.left, 11), y = seq(10, 110, by = 10),
label = c("G ", seq(10, 50, by = 10), rev(seq(10, 40, by = 10)), " G"),
angle = 270, size = 4) +
annotate("text", x = rep((xmax - hash.left), 11), y = seq(10, 110, by = 10),
label = c(" G", seq(10, 50, by = 10), rev(seq(10, 40, by = 10)), "G "),
angle = 90, size = 4) +
annotate("segment", x = 0, xend = xmax,
y = yardline + 10, yend = yardline + 10,
color = "blue",
size = 1,
alpha = .7) +
annotate("segment", x = 0, xend = xmax,
y = firstDownYardLine + 10, yend = firstDownYardLine + 10,
color = "yellow",
size = 1,
alpha = .7) +
geom_segment(data = play2_clipped %>%
filter(event == "pass_outcome_interception") %>%
filter(!is.na(nflId)),
aes(x = (xmax - y), y = x + 10, xend = (xmax - y_end),
yend = x_end + 10, group = nflId),
color = "black",
arrow = arrow(length = unit(.25,"cm"))) +
geom_point(data = play2_clipped %>%
filter(event == "pass_outcome_interception"), aes(x = (xmax-y), y = x + 10, shape = team,
fill = team, group = nflId, size = team, colour = team), alpha = 0.9) +
geom_point(data = play2_clipped %>%
inner_join(targeted_receiver %>% rename(nflId = targetNflId)) %>%
filter(event == "pass_outcome_interception"), aes(x = (xmax-y), y = x + 10),
size = 4, color = "black", shape = 21, fill = "#e31837", alpha = 0.9) +
geom_text(data = play2_clipped %>%
filter(event == "pass_outcome_interception") %>%
filter(!is.na(nflId)), aes(x = (xmax-y), y = x + 10, label = jerseyNumber, group = nflId), colour = "white",
vjust = 0.36, size = 2) +
geom_point(data = play2_clipped %>%
filter(event == "pass_outcome_interception") %>%
filter(is.na(nflId)), aes(x = (xmax-y), y = x + 10),
fill = "brown", color = "#654321", shape = 16, size = 2.5,
alpha = 1) +
ylim(ymin, ymax) +
coord_fixed() +
theme(axis.line=element_blank(),
axis.text.x=element_blank(),
axis.text.y=element_blank(),
axis.ticks=element_blank(),
axis.title.x=element_blank(),
axis.title.y=element_blank(),
legend.position="none",
panel.background=element_blank(),
panel.border=element_blank(),
panel.grid.major=element_blank(),
panel.grid.minor=element_blank(),
plot.background=element_blank(),
plot.title = element_text(size=10),
plot.subtitle = element_text(size=8)) +
facet_wrap(~event) +
labs(title = paste0("EPA Prediction = ", gsub("-", "\u2013", toString(replace_na(round(play2_desc$epa_throw, 3), "N/A")))),
subtitle = paste0("Expected YAINT = ", gsub("-", "\u2013", toString(replace_na(round(play2_desc$eyaint, 3), "N/A"))))
)
gridExtra::grid.arrange(pass_forward2,
pass_caught2,
ncol = 2,
top = textGrob(paste0(game.info$visitorTeamAbbr, " @ ", game.info$homeTeamAbbr,
" (", game.info$gameDate, ")",
"\n","Down and Distance: ", toString(play2_desc$DownDesc), "\n",
paste(strwrap(paste("Play Description:", toString(play2_desc$playDescription))), collapse="\n")),
gp=gpar(fontsize=8,font=8)))
library(grid)
g1 <- gridExtra::arrangeGrob(pass_forward2,
pass_caught2,
ncol = 2,
top = textGrob(paste0(game.info$visitorTeamAbbr, " @ ", game.info$homeTeamAbbr,
" (", game.info$gameDate, ")",
"\n","Down and Distance: ", toString(play2_desc$DownDesc), "\n",
paste(strwrap(paste("Play Description:", toString(play2_desc$playDescription))), collapse="\n")),
gp=gpar(fontsize=8,font=8)))
ggsave(plot = g1,
filename = "~/Desktop/CoverageNet/src/08_writeup/intermediates/ball_hawk_intermediate.png",
height = 4,
width = 6)
ball_hawk_nfl_gif = image_scale(image_read(path = "~/Desktop/CoverageNet/src/08_writeup/NFL_videos/ball_hawk.gif"),"x400")
ball_hawk_nfl_gif
ball_hawk_image = image_scale(image_read("~/Desktop/CoverageNet/src/08_writeup/intermediates/ball_hawk_intermediate.png"),"x400")
ball_hawk_image
final_gif = image_append(c(ball_hawk_nfl_gif[1], ball_hawk_image))
for(i in 2:length(ball_hawk_nfl_gif)){
combined <- image_append(c(ball_hawk_nfl_gif[i], ball_hawk_image))
final_gif <- c(final_gif, combined)
}
final_gif
anim_save("~/Desktop/CoverageNet/src/08_writeup/images/BallHawkEx.gif",
final_gif,
fps = 10,
nframe = length(ball_hawk_nfl_gif),
height = 500,
width = 1000,
res = 120)
|
4877d42d308c09dd38ae19db9b7c5099a0a750e2
|
71688ca1121015a31165525a6c1d9db9daa2cd56
|
/CHQBATCH.RD
|
2830832c626722b0d7f0e24ec4cc654f00e09888
|
[] |
no_license
|
pingleware/apac-accounting-code
|
d340edf13b1b4dd327218a25ad535e2ac3875474
|
bee104c735e49b4c20fa86c299a993859e6ba884
|
refs/heads/master
| 2022-08-02T01:48:59.722370
| 2020-05-20T12:28:26
| 2020-05-20T12:28:26
| 265,557,663
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,903
|
rd
|
CHQBATCH.RD
|
*
* *** * * *** **** *** * * ***** *** * *****
* * * * * * * * * * * * * * * * *
* * * * * * * * * * * * * * *
* * ***** * * **** * ***** *** * * ***
* * * * * * * * * * * * * * *
* * * * * * ** * * * * * * * * * *
* *** * * **** **** *** * * * *** ***** *****
*
000020 READ-CHQBATCH.
000030 READ CHQBATCH
KEY IS BCH-KEY.
000040 IF WS-STATUS = "00"
MOVE ZERO TO WS-F-ERROR
000050 GO TO READ-CHQBATCH-EXIT.
000040 IF WS-STATUS = "23"
MOVE 58 TO WS-F-ERROR
000050 GO TO READ-CHQBATCH-EXIT.
000060 IF WS-STAT1 = "2" OR "3" OR "4"
000070 MOVE 58 TO WS-F-ERROR
000080 PERFORM READ-ERROR.
000090 IF RECORD-LOCKED
MOVE W02-CHQBATCH TO WS-FILE
MOVE ZERO TO WS-KEY
000100 PERFORM LOCKED-RECORD
000110 GO TO READ-CHQBATCH.
GO TO READ-CHQBATCH-EXIT.
000020 READ-CHQBATCH-NEXT.
000030 READ CHQBATCH NEXT.
000040 IF WS-STATUS = "00"
MOVE ZERO TO WS-F-ERROR
000050 GO TO READ-CHQBATCH-EXIT.
000040 IF (WS-STATUS = "23") OR
(WS-STAT1 = "1")
MOVE 58 TO WS-F-ERROR
000050 GO TO READ-CHQBATCH-EXIT.
000060 IF WS-STAT1 = "2" OR "3" OR "4"
000070 MOVE 58 TO WS-F-ERROR
000080 PERFORM READ-ERROR.
000090 IF RECORD-LOCKED
MOVE W02-CHQBATCH TO WS-FILE
MOVE ZERO TO WS-KEY
000100 PERFORM LOCKED-RECORD
000110 GO TO READ-CHQBATCH-NEXT.
GO TO READ-CHQBATCH-EXIT.
000020 READ-CHQBATCH-PREV.
000030 READ CHQBATCH PREVIOUS.
000040 IF WS-STATUS = "00"
MOVE ZERO TO WS-F-ERROR
000050 GO TO READ-CHQBATCH-EXIT.
000040 IF (WS-STATUS = "23") OR
(WS-STAT1 = "1")
MOVE 58 TO WS-F-ERROR
000050 GO TO READ-CHQBATCH-EXIT.
000060 IF WS-STAT1 = "2" OR "3" OR "4"
000070 MOVE 58 TO WS-F-ERROR
000080 PERFORM READ-ERROR.
000090 IF RECORD-LOCKED
MOVE W02-CHQBATCH TO WS-FILE
MOVE ZERO TO WS-KEY
000100 PERFORM LOCKED-RECORD
000110 GO TO READ-CHQBATCH-PREV.
GO TO READ-CHQBATCH-EXIT.
START-AT-CHQBATCH-KEY.
000030 START CHQBATCH
KEY >= BCH-KEY.
000040 IF WS-STATUS = "00"
MOVE ZERO TO WS-F-ERROR
000050 GO TO READ-CHQBATCH-EXIT.
000040 IF WS-STATUS = "23"
MOVE 58 TO WS-F-ERROR
000050 GO TO READ-CHQBATCH-EXIT.
000060 IF WS-STAT1 = "2" OR "3" OR "4"
000070 MOVE 58 TO WS-F-ERROR
000080 PERFORM READ-ERROR.
GO TO READ-CHQBATCH-EXIT.
000150 READ-CHQBATCH-EXIT.
000160 EXIT.
|
ac40a4e745d54d092a3cc6ff8035397adceac838
|
4e42c724f1602d319b8bd1f36196fd7c22ff8ec9
|
/corr.R
|
1a50c4b6f7771d25d12f21f286b54847b05242e2
|
[] |
no_license
|
lastactionhero/R-Programming
|
4376320301425a28a40b58eedab2121c39017fa7
|
34481e0ff1c7489402aa61075a179932b1f00792
|
refs/heads/master
| 2021-01-21T11:46:40.080532
| 2015-07-26T21:05:57
| 2015-07-26T21:05:57
| 39,589,466
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 871
|
r
|
corr.R
|
corr = function(directory, threshold = 0) {
files <- list.files(path= paste("../",directory, sep=""), pattern="*.csv")
calculateComplete <-
function(x)
{
data <- read.csv(x, stringsAsFactors = FALSE)
data[complete.cases(data),]
}
idObservations <- complete(directory)
selectedObservations <- subset(idObservations,nobs > threshold)
selectedObservations
correlations <- rep(NA,length(selectedObservations))
id <- selectedObservations$id
correlations <- rep(NA,length(id))
totalData <- do.call("rbind", lapply(files[id], calculateComplete))
i=1
for(myid in id)
{
myData <- subset(totalData,ID==myid)
SulfateData <- myData[, 2]
NitrateData <- myData[, 3]
correlations[i]=cor(x=SulfateData,y=NitrateData)
i =i+1
}
correlations[complete.cases(correlations)]
# totalData
}
|
7e6182cded85c9ff824b2988ae58923cea7ebd58
|
2584a626ee1564516b64c1424f384d1265e70b40
|
/code/weatherParser.R
|
7f3ec971337f5ce9da27b7ebbbf2352d12bb3670
|
[] |
no_license
|
DineshGauda/ADM_Wildfile_Prediction
|
fa551bffcd4b4cea2c78003cfc02617258589a03
|
8bc507ec23290d6ecfecf2f2fa14af3cf82db27c
|
refs/heads/master
| 2020-07-25T04:25:53.851122
| 2019-09-13T00:33:27
| 2019-09-13T00:33:27
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,062
|
r
|
weatherParser.R
|
setwd("/home/pratham/Documents/admProject/")
#files = list.files(pattern="*.csv")
wildfireData=read.csv("/home/pratham/Documents/admProject/wildfire_data.csv")
#myfiles = do.call(rbind, lapply(files, function(x) read.csv(x,header = F, stringsAsFactors = T)))
weatherData<-read.csv(file = "2010.csv",header = F,stringsAsFactors = T)
weatherData$V1<-gsub(".*:","",weatherData$V1)
weatherData$V1<-as.Date(weatherData$V1,format = "%m/%d/%Y")
weatherData$V2<-round(weatherData$V2)
weatherData$V3<-round(weatherData$V3)
wildfireData$Date<-as.Date(wildfireData$Date)
wildfireData$LATITUDE<-round(wildfireData$LATITUDE)
wildfireData$LONGITUDE<-round(wildfireData$LONGITUDE)
wildfireUniqueCols<-unique(wildfireData[c(1,2,8)])
weatherUniqueCols<-unique(weatherData[c(1,3,2)])
#106 105 104 103
#56 57 58
wildfireUniqueCols <- wildfireUniqueCols[which(wildfireUniqueCols$LATITUDE %in% c(56,57)),]
wildfireUniqueCols <- wildfireUniqueCols[which(wildfireUniqueCols$LONGITUDE %in% c(-105,-104)),]
for(i in 1:nrow(wildfireUniqueCols))
{
#get weather data for common lat long of fire and dont check for date yet
allDaysForLatLong<-weatherData[which(weatherData$V3==wildfireUniqueCols$LATITUDE[i]
& weatherData$V2==wildfireUniqueCols$LONGITUDE[i]),]
#fire days for same lat long
fireDays<-weatherData[which(weatherData$V3==wildfireUniqueCols$LATITUDE[i]
& weatherData$V2==wildfireUniqueCols$LONGITUDE[i]
& weatherData$V1==wildfireUniqueCols$Date[i]),]
#label the data
allDaysForLatLong[allDaysForLatLong$V1==fireDays$V1,]$V11<-"Yes"
allDaysForLatLong[allDaysForLatLong$V1!=fireDays$V1,]$V11<-"No"
#add new col for ndvi
write.csv(allDaysForLatLong,file = paste(i,"fireLabel.csv"))
}
files = list.files(pattern="*fireLabel.csv")
myfiles = do.call(rbind, lapply(files, function(x) read.csv(x,header = F, stringsAsFactors = T)))
resultUnique<-unique(myfiles[c(2,3,4,12)])
|
9bbf85793c48ba8c06dcda14051d5091d11a5f60
|
3877ee02e7deec476c64901c474a24ad56dcd431
|
/R/getMetaGenomeAnnotations.R
|
5efae15cfb1ecdd6f13d67c024814947a8780f5d
|
[] |
no_license
|
ropensci/biomartr
|
282d15b64b1d984e3ff8d7d0e4c32b981349f8ca
|
e82db6541f4132d28de11add75c61624644f6aa1
|
refs/heads/master
| 2023-09-04T09:40:15.481115
| 2023-08-28T15:56:25
| 2023-08-28T15:56:25
| 22,648,899
| 171
| 34
| null | 2023-09-14T12:28:02
| 2014-08-05T15:34:55
|
R
|
UTF-8
|
R
| false
| false
| 4,620
|
r
|
getMetaGenomeAnnotations.R
|
#' @title Retrieve annotation *.gff files for metagenomes from NCBI Genbank
#' @description Retrieve available annotation *.gff files for metagenomes
#' from NCBI Genbank. NCBI Genbank allows users
#' to download entire metagenomes and their annotations of several metagenome
#' projects. This function downloads available metagenomes that can then be
#' downloaded via \code{\link{getMetaGenomes}}.
#' @param name metagenome name retrieved by \code{\link{listMetaGenomes}}.
#' @param path a character string specifying the location (a folder)
#' in which the corresponding metagenome annotations shall be stored.
#' Default is
#' \code{path} = \code{file.path("_ncbi_downloads","metagenome","annotations")}.
#' @author Hajk-Georg Drost
#' @examples
#' \dontrun{
#' # Frist, retrieve a list of available metagenomes
#' listMetaGenomes()
#'
#' # Now, retrieve the 'human gut metagenome'
#' getMetaGenomeAnnotations(name = "human gut metagenome")
#' }
#' @seealso \code{\link{getMetaGenomes}}, \code{\link{listMetaGenomes}},
#' \code{\link{getGFF}}
#' @export
getMetaGenomeAnnotations <-
function(name,
path = file.path("_ncbi_downloads", "metagenome", "annotations")) {
if (!is.element(name, listMetaGenomes(details = FALSE)))
stop(
paste0("Unfortunately, the metagenome '",
name,
"' is not available. Please consult the listMetaGenomes() ",
"function for available metagenomes.")
)
if (!file.exists(path)) {
dir.create(path, recursive = TRUE)
}
organism_name <- NULL
# retrieve metagenomes assembly_summary.txt file
mgs <- getMetaGenomeSummary()
metagenomes.members <-
dplyr::filter(mgs, organism_name == name)
file.names <- metagenomes.members$ftp_path
for (i in seq_len(length(file.names))) {
download_url <-
paste0(
file.names[i],
"/",
paste0(
metagenomes.members$assembly_accession[i],
"_",
metagenomes.members$asm_name[i],
"_genomic.gff.gz"
)
)
tryCatch({
utils::capture.output(downloader::download(
download_url,
destfile = file.path(path, paste0(
basename(file.names[i]), "_genomic.gff.gz"
)),
mode = "wb"
))
}, error = function(e)
message(
"Unfortunately, the FTP site 'ftp://ftp.ncbi.nlm.nih.gov/' cannot be
reached. This might be due to an instable internet connection or some issues with the firewall.
Are you able to reach the FTP site '",
download_url,
"' from your browser?"
))
docFile(
file.name = paste0(basename(file.names[i]), "_genomic.gff.gz"),
organism = basename(file.names[i]),
url = download_url,
database = "Genbank metagenomes",
path = path,
refseq_category = metagenomes.members$refseq_category[i],
assembly_accession = metagenomes.members$assembly_accession[i],
bioproject = metagenomes.members$bioproject[i],
biosample = metagenomes.members$biosample[i],
taxid = metagenomes.members$taxid[i],
infraspecific_name = metagenomes.members$infraspecific_name[i],
version_status = metagenomes.members$version_status[i],
release_type = metagenomes.members$release_type[i],
genome_rep = metagenomes.members$genome_rep[i],
seq_rel_date = metagenomes.members$seq_rel_date[i],
submitter = metagenomes.members$submitter[i]
)
}
print(
paste0(
"The annotations of metagenome '",
name,
"' have been downloaded and stored at '",
path,
"'."
)
)
file.paths <- file.path(path, list.files(path = path))
# return only file paths without "*.txt"
return(file.paths[!unlist(lapply(file.paths, function(x)
stringr::str_detect(x, "[.]txt")))])
}
|
b8ebc5a640af28607f105f86aac9c57491fc122f
|
db20134b2dce8bf3db9c5db43148f096a39f8bc8
|
/plot2.R
|
1f0430de663ef6046586956e764e9c00792e10e1
|
[] |
no_license
|
tedconway/ExData_Plotting1
|
7ef49557d1c9bdeaaf568e863e7add269a97cfc3
|
2f467c1ab1e0c5f7e467976b031958a075f88d2a
|
refs/heads/master
| 2021-01-17T22:14:39.456873
| 2014-06-08T21:18:25
| 2014-06-08T21:18:25
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 505
|
r
|
plot2.R
|
hpc <- read.csv("household_power_consumption.txt", header=TRUE, sep=";", stringsAsFactors=F)
hpc2 <- subset(hpc, hpc$Date %in% c("1/2/2007", "2/2/2007"))
hpc3 <- subset(hpc2, hpc2$Global_active_power != "?")
x <- paste(as.Date(hpc3$Date,"%d/%m/%Y"), hpc3$Time)
hpc3$DtTm <- strptime(x, "%Y-%m-%d %H:%M:%S")
png("plot2.png", width = 480, height = 480)
plot(hpc3$DtTm, hpc3$Global_active_power, type="n", ylab="Global Active Power (kilowatts)", xlab="")
lines(hpc3$DtTm, hpc3$Global_active_power)
dev.off()
|
164d983f76a95aeee16a64d608fb8ed6b3ce9539
|
62c14804025c9b0a56b3dc43937cd365ec1481b3
|
/output/sorted/GM12891/GM12891.R
|
182c50b15c2656d6aeae60056695a61ecf9bef38
|
[
"MIT"
] |
permissive
|
Bohdan-Khomtchouk/ENCODE_TF_geneXtendeR_analysis
|
98ad9dd688d78af0a412d7c3defde223c6d1ff50
|
4d055110f2015aa8d65bcd31eea3b0da8e19298f
|
refs/heads/master
| 2021-05-04T06:55:12.446062
| 2019-04-19T00:46:01
| 2019-04-19T00:46:01
| 70,523,421
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 776
|
r
|
GM12891.R
|
peaksInput("CTCF.GM12891.bed")
png("CTCF.GM12891.png")
linePlot(human, 0, 10000, 500)
dev.off()
peaksInput("NFKB1.GM12891.bed")
png("NFKB1.GM12891.png")
linePlot(human, 0, 10000, 500)
dev.off()
peaksInput("PAX5.GM12891.bed")
png("PAX5.GM12891.png")
linePlot(human, 0, 10000, 500)
dev.off()
peaksInput("POLR2A.GM12891.bed")
png("POLR2A.GM12891.png")
linePlot(human, 0, 10000, 500)
dev.off()
peaksInput("POU2F2.GM12891.bed")
png("POU2F2.GM12891.png")
linePlot(human, 0, 10000, 500)
dev.off()
peaksInput("SPI1.GM12891.bed")
png("SPI1.GM12891.png")
linePlot(human, 0, 10000, 500)
dev.off()
peaksInput("TAF1.GM12891.bed")
png("TAF1.GM12891.png")
linePlot(human, 0, 10000, 500)
dev.off()
peaksInput("YY1.GM12891.bed")
png("YY1.GM12891.png")
linePlot(human, 0, 10000, 500)
dev.off()
|
337c1873a2943ab906c4ea1c765cd9ac1f122e63
|
e872b2f134259ed11af64f37a03d5f66e7cd8a1e
|
/6.6 Overfitting.R
|
852c931b1ef4045d626ec68846cb90c6aeefcab7
|
[] |
no_license
|
jefftwebb/IS-6489
|
07789d202969dd429550e2de67052ecde2d6c9c4
|
eca366b7ff2a569a7b6c383d2ee2a88c68640055
|
refs/heads/master
| 2020-03-17T23:47:52.893600
| 2019-03-19T16:55:15
| 2019-03-19T16:55:15
| 134,061,382
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,857
|
r
|
6.6 Overfitting.R
|
### Statistics and Predictive Analytics
# Tutorial topic: Overfitting
library(tidyverse)
library(MASS)
library(arm)
library(caret)
rmse <- function(actual, predicted) sqrt(mean((actual - predicted)^2))
bv <- data.frame(species = c("afarensis", "africanus", "habilis", "boisei",
"rudolfensis","ergaster", "sapiens"),
brain = c(458, 432, 612, 521, 752, 871, 1350),
mass = c(37, 35.5, 34.5, 41.5, 55.5, 61, 53.5))
bv
# Make a scatterplot of brain volume ~ body mass with linear fit
ggplot(bv, aes(mass, brain)) +
geom_point() +
stat_smooth(method = "lm")
display(p1 <- lm(brain ~ mass, bv))
rmse(bv$brain, predict(p1))
# Quadratic
display(p2 <- lm(brain ~ mass + I(mass^2), bv))
bv$p2 <- fitted(p2) #Add these fitted values to the data frame
ggplot(bv, aes(mass, brain)) +
geom_point() +
geom_line(aes(mass, p2)) +
labs(title = "R-squared = .54")
rmse(bv$brain, predict(p2))
# Cubic
display(p3 <- lm(brain ~ mass + I(mass^2) + I(mass^3), bv))
bv$p3 <- fitted(p3)
ggplot(bv, aes(mass, brain)) +
geom_point() +
geom_line(aes(mass, p3)) +
labs(title = "R-squared = .68")
rmse(bv$brain, predict(p3))
# degree = 4
display(p4 <- lm(brain ~ mass + I(mass^2) + I(mass^3) +
I(mass^4), bv))
bv$p4 <- fitted(p4)
ggplot(bv, aes(mass, brain)) +
geom_point() +
geom_line(aes(mass, p4)) +
labs(title = "R-squared = .80")
rmse(bv$brain, predict(p4))
# order = 5
display(p5 <- lm(brain ~ mass + I(mass^2) + I(mass^3) +
I(mass^4) +
+ I(mass^5), bv))
bv$p5 <- fitted(p5)
ggplot(bv, aes(mass, brain)) +
geom_point() +
geom_line(aes(mass, p5)) +
labs(title = "R-squared = .98")
rmse(bv$brain, predict(p5))
# order = 6
display(p6 <- lm(brain ~ mass + I(mass^2) + I(mass^3) + I(mass^4) +
+ I(mass^5) + I(mass^6), bv))
bv$p6 <- fitted(p6)
ggplot(bv, aes(mass, brain)) +
geom_point() +
geom_line(aes(mass, p6)) +
labs(title = "R-squared = 1")
rmse(bv$brain, predict(p6))
# A perfect model! But, this is overfitting. The six degree polynomial has
# enough parameters to assign one to each data point. The fit is no longer
# summarizing. It IS the data.
# Question: what will happen when this model encounters different data?
new_bv <- rbind(bv[, 2:3],
data.frame(brain = abs(rnorm(2, mean = 700, sd = 500)),
mass = abs(rnorm(2, 50, 20))))
new_bv
rmse(new_bv$brain, predict(p6, newdata=new_bv))
ggplot(new_bv, aes(mass, brain)) +
geom_point() +
geom_line(aes(new_bv$mass, predict(p6, newdata = new_bv))) +
theme_minimal()
# Schockingly bad! That's overfitting. When models overfit
# they perform terribly in prediction because they have fit noise
# in the training sample that does not exist in new data.
|
8527f080491364f7dfff3f6b2cf6b5d34b8ea5b8
|
2a7e77565c33e6b5d92ce6702b4a5fd96f80d7d0
|
/fuzzedpackages/bpgmm/man/getZmat.Rd
|
09f03f6030f3a2fcc23bc1bb76cf4c011851d5e1
|
[] |
no_license
|
akhikolla/testpackages
|
62ccaeed866e2194652b65e7360987b3b20df7e7
|
01259c3543febc89955ea5b79f3a08d3afe57e95
|
refs/heads/master
| 2023-02-18T03:50:28.288006
| 2021-01-18T13:23:32
| 2021-01-18T13:23:32
| 329,981,898
| 7
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 469
|
rd
|
getZmat.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils.R
\name{getZmat}
\alias{getZmat}
\title{Tool for vector to matrix}
\usage{
getZmat(ZOneDim, m, n)
}
\arguments{
\item{ZOneDim}{a vector.}
\item{m}{the number of cluster.}
\item{n}{sample size.}
}
\value{
adjacency matrix
}
\description{
Tool for vector to matrix
}
\examples{
m <- 20
n <- 500
ZOneDim <- sample(seq_len(m), n, replace = TRUE)
#'
\donttest{
getZmat(ZOneDim, m, n)
}
}
|
c97914bfbc75e314b4e27473f41a2de75f7f59ea
|
96ca1d8e761542e25c7d061b0b9ea021c7c1dbc9
|
/output_files/QR.R
|
1aab6bf726282688cef1ba54c205d170a51d5feb
|
[] |
no_license
|
SilasX/QuineRelayFiles
|
3811179c45fd4d967ba5b41dc9dc4e4f8cda931d
|
363d3f0100a9fec9a0a2fad9ebd08ad8cecfe0d9
|
refs/heads/master
| 2020-05-31T21:56:50.156657
| 2013-07-16T09:38:44
| 2013-07-16T09:38:44
| 11,435,446
| 4
| 4
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,209
|
r
|
QR.R
|
cat("say \"eval$s=%q(eval(%w(B=92.chr;N=10.chr;n=0;e=->(s){s.gsub(/[#{B+B+N}\"\"]/){B+(N==$&??n:$&)}};E=->(s){'(\"\"'+e[s]+'\"\")'}\"\nsay \";d=->(s,t=?\"\"){s.gsub(t){t+t}};D=->(s,t=?@){s.gsub(B){t}};Q=->(s,t=?$){s.gsub(t){B+$&}};puts(eval(%q(\"\"objectXQRX\"\nsay \"extendsXApp{Hln#{E[%((displayX\"\"#{e[%(HfX% sX\"\"#{Q[ e[\"\"Trans criptXshow:X'#{d[%(putsX[regsubX-allX{.}X\"\"#{Q[e[%[\"\nsay \"intXK(){sJXs=#{E[%(withXAda.Text _Io;p roce dure XQRXisXbeginXAda.Text_Io.Put_Line(\"\"#{d[%(BEGINXH(\"\"#\"\nsay \"{d[%(BEGIN{s=#{E[D[%(forXbXinX Sys t em.Text.ASCIIEncoding().GetBytes(#{Q[E[\"\"#i\"\nsay \"nclude<stdio.h>`nintXK (){pu t s#{E[\"\"#include<iostream>`nintXK(){std::cout\"\nsay \"<<#{E[%(classXProgram {pu blicXstaticXvoidXMain(){System.Co\"\nsay \"nsole.Write(#{E[D[%( ( defnXf[lXr](if(>(countXr)45)(lazy-\"\nsay \"seq(cons(str\"\"XXXX^\"\" \"\"r\"\"^\"\"&\"\")(fXl\"\"\"\")))(let[c(fir\"\nsay \"stXl)](ifXc(f(next Xl)(if(=XcX^\"\")(strXrXcXc)(st\"\nsay \"rXrXc)))[(str\"\"XXX X^\"\"\"\"r\"\"^\"\".\"\")]))))(doall(\"\nsay \"mapX#(Hln(str\"\"XX XXXXXX\"\"%1))(lazy-cat[\"\"ID\"\nsay \"ENTIFICATIONXD IVISION.\"\" \"\"PROGRAM-ID.XQR.\"\"\"\"PR\"\nsay \"OCEDUREXDIVI SION.\"\"]#{%(s=#{E[%(packag eXK;import(\"\"fmt\"\";\"\"sJs\"\nsay \"\"\");funcXK() {fmt.Print(\"\"H^x27\"\"+sJs.Replace(\"\"#{ e[D[e[%(importXData.\"\nsay \"Char`nK=p utStrLn$\"\"procedureXK();write(^\"\"DO,1<-#\"\"++ show(lengthXs)++fX\"\nsay \"sX1X0;f( x:t)iXc=letXv=foldl(^aXx->a*2+(modXxX2))0$take X8$iterate(flipXd\"\nsay \"ivX2)$D ata.Char.ordXxXin(ifXmodXiX4<1then\"\"PLEASE\"\"else\"\"\"\")+ +\"\"DO,1SUB#\"\"++sho\"\nsay \"wXi++\"\" <-#\"\"++show(mod(c-v)256)++\"\"^^n\"\"++fXt(i+1)v;f[]_X_=\"\"PLE ASEREADOUT,1^^n\"\nsay \"PLEAS EGIVEUP^\"\");end\"\";s=#{E[%(.classXpublicXQR`n.superXjava/la ng/Object`n.me\"\nsay \"thod XpublicXstaticXK([Lj ava/lang/SJ;)V`n.limitXstackX2`ngetsta ticXjava/lang/\"\nsay \"Syst em/outXLjava/io/Prin tStream;`nldcX\"\"#{e[%(classXQR{publicXst aticXvoidXK(S\"\nsay \"J[] v){SJXc[]=newXSJ[800 0 ],y=\"\"\"\",z=y,s=\"\"#{z=t=(0..r=q=126).map{|n |[n,[]]};a=[\"\nsay \"];% (@s=internalXconstan t[# {i=(s=%(PRINTX\"\"#{Q[\"\"H\"\"+E[%(all:`n`t@Hf X%sX\"\"#{e[%(.\"\nsay \"ass em blyXt{}.methodXstati cXvoi dXMain(){.entrypointXldstr\"\"#{e[\"\"varXu= require('ut\"\nsay \"il' );u.H('#import<stdio .h>^n') ;u.H(#{E[D[%(intXK(){puts#{E[\"\"H_sJ\"\"+E[ \"\"Hf\"\"+E[%(sa\"\nsay \"y\"\"#{ e[\"\"progr amXQR(output);beginX#{([* %($_=\"\"#{s=%\"\nsay \"(<?ph pXe cho\"\"#{Q[e[ \"\"intXK( ){write#{E[\"\"q r:-writ e('#{Q[e[\"\"H\"\"+E[\"\"cat\"\"+E[%(ev al$s=%q(#$s\"\nsay \")).gsu b(/.+/){\"\"sayX`\"\"# {d[$& ]}`\"\"\"\"}]]],?']}' ),nl, halt.\"\"]};returnX0;}\"\"]]}\"\"?>); (s+N*(-s.si\"\nsay \"ze%6)).by tes.map{|n|\"\"%07b\"\"% n}. join.scan(/.{6}/) .ma p{|n|n=n.to_i(2);((n/26*6+n+1 9)%83+46).c\"\nsay \"hr}*\"\"\"\"}\"\";s|.|$n =ord$&;substrXu n pack(B8,chr$n-($n<5 8 ?-6:$n<91?65:71)),2|eg;s/.{7}/ 0$&/g;HXpac\"\nsay \"kXB.length,$_). scan(%r(([X.0-9A -Za-z]+)|(.))).revers e.map{|a,b|(b)?\"\"s//chrX#{b.ord} /e\"\":\"\"s//#{a\"\nsay \"}/\"\"},\"\"eval\"\"]*\"\"X xX\"\").scan(/.{1, 2 55}/).map{|s|\"\"write ( '#{s}');\"\"}*\"\"\"\"}end.\"\"]}\"\"`n e nd`n)]]]};r\"\nsay \"eturnX0;})]]}.r eplace(/@/g,S J.f romCharCode(92))) \"\"]} \"\"callXvoidX[ m s corlib]Sy\"\nsay \"stem.Console::Wr iteLine(sJ)r et})] }\"\")],/[X^`t;\"\"() {}`[` ]]/]}`nB Y E )).size+\"\nsay \"1}XxXi8]c\"\"#{s.gs ub(/[^`n\"\"] /){B+\"\"% 02`x58\"\"%$&.or d}}^00\"\" declareXi32@pu t s (i8*)de\"\nsay \"fineXi32@K(){star t:%0=call Xi32@puts(i8 * X getele\"\nsay \"mentptrXinbounds( [#{i}XxXi8]*@s,i32X0, i32X0)) retXi32X0}).bytes{|n|r,z= z[n]||(a < < r;q<56\"\nsay \"24&&z[n]=[q+=1,[]] ;t[n])};a<<r;t=[*43.. 123]- [64,*92..96];a.map{|n|t[n/75].chr+ t [ n%75]\"\nsay \".chr}*\"\"\"\"}\"\";intXi,n, q=0,t;for(n=0;++n<126 ;)c [n]=\"\"\"\"+(char)n;for(i=0;++i<s.length( ) ;){t=\"\nsay \"s.charAt(i);q=q*75+t -t/64-t/92*5-43;if(i% 2 >0){y=q<n?c[q]:y;c[n++]=z+=y.charAt( 0 );Sys\"\nsay \"tem.out.H(z=c[q]);q=0 ;}}}})]}\"\"`ninvokevir tualXjava/io/PrintStream/Hln(Ljava/ l ang/\"\nsay \"SJ;)V`nreturn`n.endXme thod)+N]})]]]}^x27^n \"\",\"\"@\"\",\"\"^^\"\",-1))})]};u=\"\"XXXXXXXX\"\";g= ( l)->\"\nsay \"l.replaceX/[^^\"\"]/g,(x)- >\"\"^^\"\"+x`nf=(l)->console.logX\"\"(write-lineX^\"\"\"\"+g(l)+\"\"^ \"\")\"\"`ne= (l)-\"\nsay \">fX\"\".^^^\"\"\"\"+u+g(l)+\"\"^\"\"Xcr\"\" `nd=(l)->eX\"\"WRITE(*,*)'\"\"+u+l+\"\"'\"\"`ndX\"\"programXQR\"\" ;dX\"\"HX^\"\"( &\"\";i\"\nsay \"=0`ndX\"\"&A,&\"\"whileXi++<s.le ngth`ndX\"\"&A)^\"\",&\"\";i=0`ndX\"\"&char(\"\"+s.charCo deAt(i++)+\"\"),&\"\"whil eXi<\"\nsay \"s.length`ndX\"\"&^\"\"^\"\"\"\";dX\"\"endXp rogramXQR\"\";eX\"\"STOP\"\";eX\"\"END\"\";fX\"\"bye\"\") .gsub(/.+/){%((cons\"\"DI SPL\"\nsay \"AY\"\"(f\"\"#{e[$&]}\"\"\"\"\"\")))}}[\"\"STOPXRU N.\"\"])))),?~]]}.Replace(\"\"~\"\",\"\" ^^\"\"));}})]};}\"\"]};returnX 0;}\"\nsay \"\"\"]]}):HXjoin(['+'forXiXinXrange(0 ,b)],\"\"\"\")+\"\".>\"\" ),?!]]};gsub(/!/,\"\"^^\"\",s);HXs})]}\"\nsay \"\"\")END)]}\"\");endXQR;)]};intXi,j;H(\"\"modu leXQR;initialXbeginX\"\");for(i=0;i<s.\"\nsay \"length;i++){H(\"\"$write(^\"\"XXX\"\");for(j=6;j>= 0;j--)H((s[i]>>j)%2>0?\"\"^^t\"\":\"\"X\"\");H(\"\"^^n^\"\nsay \"^t^^nXX^`\"\");\"\");}H(\"\"$display(^\"\"^^n^^n^\"\");endXendm odule\"\");returnX0;}].reverse],/[`[`]$]/]}\"\"X^x60.\"\nsay \"&];putsX\"\"k\"\"),?']}';cr\"\"]]}\"\")]}\"\"))]}}\"\").gsub(/[HJK^`X]/){[:print,0,:tring,:main,B*2,0,B,32.chr][$&.ord%9]})))*\"\"\"\")\"\nsay \"################### Quine Relay -- Copyright (c) 2013 Yusuke Endoh (@mametter), @hirekoke ##################)\"")
|
24eaa811ea0f08f9339ae8caaddbb9118cc05bbb
|
2f203859f753102e9d422e8edefa2f943a21c456
|
/PraceDomowe/PD10/gr1/WichrowskaAleksandra/przezycia.R
|
58b6cd6acd3ee055ba64ed0b74b17f5645faea32
|
[] |
no_license
|
Levinaanna/TechnikiWizualizacjiDanych2018
|
e2a33db7e4cdc5c0eec2bea632fcee40bd0687dd
|
f7640cbed60b505baeab760b68920e3ca08de6dd
|
refs/heads/master
| 2021-10-16T08:15:04.839938
| 2019-02-09T11:33:35
| 2019-02-09T11:33:35
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 951
|
r
|
przezycia.R
|
library(data.table)
library(r2d3)
przezycia <- archivist::aread("pbiecek/Przewodnik/arepo/609491e5ec491f240cbeafe377743e21")
przezycia <- data.table(przezycia)
przezycia <- przezycia[Year==2009]
przezycia2009$Age <- as.numeric(as.character(przezycia2009$Age))
przezycia2009 <- na.omit(przezycia2009)
przezycia2009 <- data.table(przezycia2009$Age, przezycia2009$Tx,przezycia2009$Gender)
colnames(przezycia2009) <- c("Age", "Tx", "Gender")
przezycia2009_kobiety <- przezycia2009[przezycia2009$Gender=="Female"]
przezycia2009_mezczyzni <- przezycia2009[przezycia2009$Gender=="Male"]
przezycia <- merge(przezycia2009_kobiety, przezycia2009_mezczyzni, by="Age")
przezycia <- data.table(przezycia$Age, przezycia$Tx.x, przezycia$Tx.y)
colnames(przezycia) <- c("Age", "Female", "Male")
przezycia$roznica <- przezycia$Female-przezycia$Male
przezycia_json <- jsonlite::toJSON(przezycia)
r2d3::r2d3("skrypt.js", data=przezycia_json)
|
f529aaea24270320f24b1dc80b9ebffe42d9eeda
|
f6150b8fe6f9dc44be22cd470969afacb44efe51
|
/exploratory/testbetaMMremote.r
|
0fe06400ee10f568cf2f6e288193b57990b5943a
|
[] |
no_license
|
qdread/nasabio
|
83e83a4d0e64fc427efa7452033eb434add9b6ee
|
7c94ce512ae6349d84cb3573c15be2f815c5758d
|
refs/heads/master
| 2021-01-20T02:02:53.514053
| 2019-12-28T15:22:53
| 2019-12-28T15:22:53
| 82,062,690
| 7
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,637
|
r
|
testbetaMMremote.r
|
# Script to run on cluster BBS beta-diversity model fits with additive partition beta, to compare to non-additive.
NC <- 3
NI <- 5000
NW <- 3000
delta <- 0.8
prednames <- c('elevation_5k_tri_50_mean', 'bio1_5k_50_mean', 'geological_age_5k_50_diversity', 'soil_type_5k_50_diversity', 'bio12_5k_50_mean', 'dhi_gpp_5k_tri_50_mean')
climate_prednames <- c('bio1_5k_50_mean', 'bio12_5k_50_mean')
geo_prednames <- c('elevation_5k_tri_50_mean', 'geological_age_5k_50_diversity', 'soil_type_5k_50_diversity', 'dhi_gpp_5k_tri_50_mean')
alpha_resp <- c('alpha_richness', 'alpha_phy_pa', 'alpha_func_pa')
beta_resp <- c('beta_td_additive', 'beta_phy_pa', 'beta_func_pa') # changed to ADDITIVE.
gamma_resp <- c('gamma_richness', 'gamma_phy_pa', 'gamma_func_pa')
task_table <- expand.grid(taxon = c('fia','bbs'),
rv = c('alpha', 'beta', 'gamma'),
ecoregion = 'TNC',
model = c('full','climate','space', 'geo'),
fold = 0:63,
stringsAsFactors = FALSE)
taxon <- 'fia' # change to bbs or fia
fold <- 0
rv <- beta_resp
# if(task_table$model[task] == 'climate') prednames <- climate_prednames
# if(task_table$model[task] == 'geo') prednames <- geo_prednames
# if(task_table$model[task] == 'space') prednames <- character(0)
ecoregion <- 'TNC'
source('/mnt/research/nasabio/code/fit_mv_mm.r')
# Fit the model for the given response variable, taxon, and ecoregion
options(mc.cores = 3)
if (taxon == 'bbs') {
load('/mnt/research/nasabio/temp/bbs_spatial_mm_dat_50k.RData')
geodat <- bbsgeo
biodat <- bbsbio
siteid <- 'rteNo'
# Added 14 May: logit transform beta td.
biodat$beta_td_sorensen_pa <- qlogis(biodat$beta_td_sorensen_pa)
biodat$beta_td_additive <- biodat$gamma_richness - biodat$alpha_richness
# Get the additive beta diversity by just subtracting gamma - alpha. Easy as that.
} else {
load('/mnt/research/nasabio/temp/fia_spatial_mm_dat_50k.RData')
geodat <- fiageo
biodat <- fiabio
siteid <- 'PLT_CN'
# Added 14 May: logit transform beta td.
biodat$beta_td_sorensen_pa <- qlogis(biodat$beta_td_sorensen_pa)
biodat$beta_td_sorensen <- qlogis(biodat$beta_td_sorensen)
biodat$beta_td_additive <- biodat$gamma_richness - biodat$alpha_richness
}
# The following six ecoregions should not be used in any model fitting because they have too few data points.
# They are primarily in Canada or Mexico with only a small portion of area in the USA, once buffer is deducted
exclude_regions <- c('NA0801', 'NA0808', 'NA0417', 'NA0514', 'NA1202', 'NA1301')
# Set data from the holdout set to missing, if task was designated as a k-fold task
# For "leave one region out" cross-validation, we just need to get rid of a single region for each fold
# Added 02 May 2019: include the ecoregion folds, less the excluded ones
fold_df <- read.csv('/mnt/research/nasabio/data/ecoregions/ecoregion_folds.csv', stringsAsFactors = FALSE)
region_folds <- fold_df$TNC
region_folds <- region_folds[!grepl(paste(exclude_regions, collapse = '|'), region_folds)]
library(dplyr)
if (fold != 0) {
# Join response variable data with the region ID, then set the appropriate values to NA
biodat <- biodat %>% left_join(geodat[, c(siteid, 'TNC')])
biodat$missing <- biodat$TNC == region_folds[fold]
}
# Modified 14 May: model all with Gaussian
distrib <- 'gaussian'
# Priors (added May 29)
# --------------------
# Edit 04 Jan 2019: temporarily remove all priors (add some back in on 05 Jan)
# Edit May 31: Add priors for FIA intercepts and for BBS alpha sdcar
# Edit June 14: Add sdcar priors and intercept priors on FIA beta, sd car priors on BBS beta
library(brms)
# 1st arg is df, 2nd is mu, 3rd is sigma for student t distribution
added_priors <- NULL
# if (task_table$rv[task] == 'alpha' & taxon == 'fia') {
# added_priors <- c(set_prior('student_t(5, 0, 2)', class = 'Intercept', resp = 'alpharichness'),
# set_prior('student_t(5, 0, 2)', class = 'Intercept', resp = 'alphaphypa'),
# set_prior('student_t(5, 0, 2)', class = 'Intercept', resp = 'alphafuncpa') )
# }
# if (task_table$rv[task] == 'beta' & taxon == 'fia') {
# added_priors <- c(set_prior('lognormal(1, 2)', class = 'sdcar', resp = 'betatdsorensenpa'),
# set_prior('lognormal(1, 2)', class = 'sdcar', resp = 'betaphypa'),
# set_prior('lognormal(1, 2)', class = 'sdcar', resp = 'betafuncpa'),
# set_prior('student_t(10, 0, 1)', class = 'Intercept', resp = 'betatdsorensenpa'),
# set_prior('student_t(10, 0, 1)', class = 'Intercept', resp = 'betaphypa'),
# set_prior('student_t(10, 0, 1)', class = 'Intercept', resp = 'betafuncpa') )
# }
# if (task_table$rv[task] == 'beta' & taxon == 'fia') {
# added_priors <- c(set_prior('student_t(10, 0, 1)', class = 'Intercept', resp = 'betatdsorensenpa'),
# set_prior('student_t(10, 0, 1)', class = 'Intercept', resp = 'betaphypa'),
# set_prior('student_t(10, 0, 1)', class = 'Intercept', resp = 'betafuncpa') )
# }
# if (task_table$rv[task] == 'gamma' & taxon == 'fia') {
# added_priors <- c(set_prior('student_t(10, 0, 1)', class = 'Intercept', resp = 'gammarichness'),
# set_prior('student_t(10, 0, 1)', class = 'Intercept', resp = 'gammaphypa'),
# set_prior('student_t(10, 0, 1)', class = 'Intercept', resp = 'gammafuncpa') )
# }
# if (task_table$rv[task] == 'alpha' & taxon == 'bbs') {
# added_priors <- c(set_prior('lognormal(1, 1)', class = 'sdcar', resp = 'alpharichness'),
# set_prior('lognormal(1, 1)', class = 'sdcar', resp = 'alphaphypa'),
# set_prior('lognormal(1, 1)', class = 'sdcar', resp = 'alphafuncpa'),
# set_prior('student_t(5, 0, 2)', class = 'Intercept', resp = 'alpharichness'),
# set_prior('student_t(5, 0, 2)', class = 'Intercept', resp = 'alphaphypa'),
# set_prior('student_t(5, 0, 2)', class = 'Intercept', resp = 'alphafuncpa') )
# }
# if (task_table$rv[task] == 'beta' & taxon == 'bbs') {
# added_priors <- c(set_prior('lognormal(1, 1)', class = 'sdcar', resp = 'betatdsorensenpa'),
# set_prior('lognormal(1, 1)', class = 'sdcar', resp = 'betaphypa'),
# set_prior('lognormal(1, 1)', class = 'sdcar', resp = 'betafuncpa'),
# set_prior('student_t(10, 0, 1)', class = 'Intercept', resp = 'betatdsorensenpa'),
# set_prior('student_t(10, 0, 1)', class = 'Intercept', resp = 'betaphypa'),
# set_prior('student_t(10, 0, 1)', class = 'Intercept', resp = 'betafuncpa') )
# }
# if (task_table$rv[task] == 'gamma' & taxon == 'bbs') {
# added_priors <- c(set_prior('student_t(10, 0, 1)', class = 'Intercept', resp = 'gammarichness'),
# set_prior('student_t(10, 0, 1)', class = 'Intercept', resp = 'gammaphypa'),
# set_prior('student_t(10, 0, 1)', class = 'Intercept', resp = 'gammafuncpa') )
# }
#
# --------------------
if (ecoregion == 'HUC4') eco_mat <- huc_bin
if (ecoregion == 'BCR') eco_mat <- bcr_bin
if (ecoregion == 'TNC') eco_mat <- tnc_bin
fit <- fit_mv_mm(pred_df = geodat,
resp_df = biodat,
pred_vars = prednames,
resp_vars = rv,
id_var = siteid,
region_var = ecoregion,
distribution = distrib,
adj_matrix = eco_mat,
priors = added_priors,
n_chains = NC,
n_iter = NI,
n_warmup = NW,
delta = delta,
missing_data = fold > 0,
exclude_locations = exclude_regions
)
save(fit, file = paste0('/mnt/research/nasabio/temp/additive', taxon, 'fit.RData'))
|
37fcc854c8d92f013abe274dc147fb4a3bb3a13e
|
ebe9df460d5c69f214bdbd5f4e409d52b27f8aa0
|
/SDM_Range_code.R
|
abfda0a651daaaaae1a7e61cec7b02c968d38478
|
[] |
no_license
|
pabmedrano/Ecological_Niche_Modeling
|
0766ffa444b7e61bee6d68fa9e7c34292cf34adc
|
e8edcb62bdf81a156fe514b01b4abb0621537ad5
|
refs/heads/master
| 2021-01-03T22:08:56.175339
| 2018-07-05T00:25:40
| 2018-07-05T00:25:40
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 991
|
r
|
SDM_Range_code.R
|
######Range Code#######
library(raster)
library(fields)
setwd('W:/2015_Fall/Closed/Week13_Nov24/example_model') #set your working directory
respXY=read.csv('respXY.csv',row.names=1) #this needs to be the respXY csv for the speices you are working with
projCurrent=raster('proj_current_model.output.grd')#change this to the grd file of the species you want to do the ranges with
projCurrent[projCurrent<200]=0
projCurrent[projCurrent>0]=1
plot(projCurrent)
points(respXY[,1:2])
pseudoAbs=xyFromCell(projCurrent,cell=sample(which(projCurrent[]==0),3*nrow(respXY)))
pseudoAbs=cbind(pseudoAbs,rep(0,nrow(pseudoAbs)))
colnames(pseudoAbs)=colnames(respXY)
presAbs=rbind(respXY,pseudoAbs)
presKrig=Tps(presAbs[,1:2],presAbs[,3])
interp=interpolate(projCurrent,presKrig,xyOnly=FALSE)
interp=mask(interp,projCurrent)
plot(interp)
points(respXY[,1:3])
interp[interp<0.3]=0
plot(interp)
points(respXY[,1:3])
interp[interp>0]=1
plot(interp)
points(respXY[,1:3])
|
8173b5fda84ef0b6b88b4a28a1d1b39925acab53
|
62efff0b61cd4f7738e223d3b74cfbf69d52a18e
|
/R/old/onset_to_death_data_table.r
|
17b6b0b6ac5aaf0f64cd35b78dd2c2543960b1b0
|
[] |
no_license
|
jhellewell14/uk_inf_curve
|
9d3725ea2fb337ebe4c14dd9d1c894e50d2f2dc9
|
be062cf84b02518e932a7a42b8fff1ba014c0dce
|
refs/heads/master
| 2023-01-05T02:30:02.728311
| 2020-10-30T11:27:57
| 2020-10-30T11:27:57
| 288,191,589
| 1
| 1
| null | 2020-10-29T10:13:06
| 2020-08-17T13:45:16
|
R
|
UTF-8
|
R
| false
| false
| 14,114
|
r
|
onset_to_death_data_table.r
|
library(data.table)
library(magrittr)
# Set number of threads for data.table
setDTthreads(parallel::detectCores())
# Read in data
data <- data.table::fread("~/Downloads/CCPUKSARI_DATA_2020-08-04_0947.csv", na.strings = "")
# Select columns + fix read in issue where entries are "" instead of NA
df <- data[,.(cestdat, dsstdtc, dsterm, subjid, age = age_estimateyears)]
df[, c("onset_date_missing", "outcome_date_missing", "dead") :=
list(all(is.na(cestdat)), all(is.na(dsstdtc)), any(dsterm == 4, na.rm = TRUE)),
by = "subjid"]
df <- df[!onset_date_missing & !outcome_date_missing & dead
][, .(onset_date = unique(cestdat[!is.na(cestdat)]),
dead = unique(dead),
age = unique(age[!is.na(age)]),
outcome_date = unique(dsstdtc[!is.na(dsstdtc)])), by = "subjid"
][, delay := as.integer(as.Date(outcome_date) - as.Date(onset_date))
][delay >= 0 & delay <= 60 & as.Date(onset_date) > "2020-01-01"][
, delay_sampled := runif(.N, delay, delay + 1)]
df[, age_grp := cut(age, breaks = agebreaks, labels = agelabs, right = FALSE)]
# Fit a gamma distribution
nbfit30 <- fitdistrplus::fitdist(df[age_grp == "30-39", delay_sampled], distr = "gamma")
nbfit40 <- fitdistrplus::fitdist(df[age_grp == "40-49", delay_sampled], distr = "gamma")
nbfit50 <- fitdistrplus::fitdist(df[age_grp == "50-59", delay_sampled], distr = "gamma")
nbfit60 <- fitdistrplus::fitdist(df[age_grp == "60-69", delay_sampled], distr = "gamma")
nbfit70 <- fitdistrplus::fitdist(df[age_grp == "70-79", delay_sampled], distr = "gamma")
nbfit80 <- fitdistrplus::fitdist(df[age_grp == "80-89", delay_sampled], distr = "gamma")
nbfit90 <- fitdistrplus::fitdist(df[age_grp == "90-99", delay_sampled], distr = "gamma")
y <- c(dgamma(seq(0, 60, 0.1), shape = nbfit30$estimate[1], rate = nbfit30$estimate[2]),
dgamma(seq(0, 60, 0.1), shape = nbfit40$estimate[1], rate = nbfit40$estimate[2]),
dgamma(seq(0, 60, 0.1), shape = nbfit50$estimate[1], rate = nbfit50$estimate[2]),
dgamma(seq(0, 60, 0.1), shape = nbfit60$estimate[1], rate = nbfit60$estimate[2]),
dgamma(seq(0, 60, 0.1), shape = nbfit70$estimate[1], rate = nbfit70$estimate[2]),
dgamma(seq(0, 60, 0.1), shape = nbfit80$estimate[1], rate = nbfit80$estimate[2]),
dgamma(seq(0, 60, 0.1), shape = nbfit90$estimate[1], rate = nbfit90$estimate[2]))
hi <- data.frame(y,
x = rep(seq(0, 60, 0.1), 7),
agegrp = rep(agelabs[4:10], rep(601, 7)))
hi %>%
ggplot2::ggplot(ggplot2::aes(x = x, y = y, col = as.factor(agegrp))) +
ggplot2::geom_line() +
cowplot::theme_cowplot() +
ggplot2::scale_color_discrete(name = "Age group") +
ggplot2::labs(x = "Days since onset", y = "Probability density")
# Death distribution for care homes in linelist from covid19_automation
path_to_factory <- "~/repos/covid19_automation"
file_path <- file.path(path_to_factory, "data", "rds", "deaths_eng_latest.rds")
key <- cyphr::data_key(file.path(path_to_factory, "data"))
x <- cyphr::decrypt(readRDS(file_path), key)
lldf <- data.table::as.data.table(x)
lldf[, care_home_death := fifelse(residence_type == "care_nursing_home" |
place_of_death == "care_home",
"Care home",
"Other")]
### Age proportions
agebreaks <- c(0, 10, 20, 30, 40, 50, 60, 70 ,80, 90, 100)
agelabs <- c("0-9", "10-19",
"20-29", "30-39",
"40-49", "50-59",
"60-69", "70-79",
"80-89","90-99")
age_props <- lldf[, age_grp := cut(age, breaks = agebreaks, labels = agelabs, right = FALSE)
][, .N, age_grp][
!is.na(age_grp)][
, prop := N/sum(N)]
age_props[order(N)] %>%
ggplot2::ggplot(ggplot2::aes(x = age_grp, y = prop)) +
ggplot2::geom_bar(stat = "identity") +
ggplot2::scale_y_continuous(breaks = seq(0, 0.4, 0.1), labels = seq(0, 40, 10)) +
ggplot2::labs(y = "Proportion of deaths (%)", x = "Age group") +
cowplot::theme_cowplot()
####
lldf <- lldf[!is.na(date_onset) & !is.na(date_death) & care_home_death == "Care home", delay := as.numeric(date_death - date_onset)
][delay > 0 & delay < 60 & date_onset > "2020-01-01", "delay"
]
lldf[, delay_sampled := runif(.N, delay, delay + 1)]
nbfit2 <- fitdistrplus::fitdist(lldf$delay_sampled, distr = "gamma")
# Plot fitted distribution
plot(seq(0, 60, 0.1), dgamma(seq(0, 60, 0.1), shape = nbfit$estimate[1], rate = nbfit$estimate[2]),
type = "l",
ylab = "Density",
xlab = "Days",
main = "Onset to death delay",
ylim = c(0, 0.1))
lines(seq(0, 60, 0.1), dgamma(seq(0, 60, 0.1), shape = nbfit2$estimate[1], rate = nbfit2$estimate[2]),
col = "red")
text(15, 0.06, "Care homes", col = "red")
text(25, 0.03, "Hospital")
#### Deaths time series
deaths_ts <- data.table::as.data.table(x)
deaths_ts[, care_home_death := fifelse(residence_type == "care_nursing_home" |
place_of_death == "care_home",
"Care home",
"Other")]
reported_cases_community <- deaths_ts[ons == "reported_by_ons" & care_home_death == "Other", "date_death"][, .N, by = "date_death"][, .(confirm = N, date = date_death)][order(date)][.(seq.Date(from = min(date), to = max(date), by = "day")),
on = .(date),roll = 0][is.na(confirm), confirm := 0]
reported_cases_community %>%
ggplot2::ggplot(ggplot2::aes(x = date, y = confirm)) +
ggplot2::geom_line()
### EpiNow 2 fit
generation_time <- list(mean = EpiNow2::covid_generation_times[1, ]$mean,
mean_sd = EpiNow2::covid_generation_times[1, ]$mean_sd,
sd = EpiNow2::covid_generation_times[1, ]$sd,
sd_sd = EpiNow2::covid_generation_times[1, ]$sd_sd,
max = 30)
incubation_period <- list(mean = EpiNow2::covid_incubation_period[1, ]$mean,
mean_sd = EpiNow2::covid_incubation_period[1, ]$mean_sd,
sd = EpiNow2::covid_incubation_period[1, ]$sd,
sd_sd = EpiNow2::covid_incubation_period[1, ]$sd_sd,
max = 30)
reporting_delay <- EpiNow2::bootstrapped_dist_fit(values = df$delay, verbose = TRUE)
## Set max allowed delay to 30 days to truncate computation
reporting_delay$max <- 60
estimates <- EpiNow2::estimate_infections(reported_cases = reported_cases_community,
generation_time = generation_time,
estimate_rt = FALSE, fixed = FALSE,
delays = list(incubation_period, reporting_delay),
horizon = 7, samples = 4000, warmup = 500,
cores = 4, chains = 4, verbose = TRUE,
adapt_delta = 0.95)
### Plot
p1 <- estimates$summarised[variable == "infections" & type == "estimate",] %>%
ggplot2::ggplot(ggplot2::aes(x = date, y= median)) +
ggplot2::geom_line(col = "dodgerblue") +
ggplot2::geom_ribbon(data = reported_cases_community, ggplot2::aes(x = date, ymax = confirm, ymin = 0),
inherit.aes = FALSE, lty = 2, fill = "red4", alpha = 0.8) +
ggplot2::geom_ribbon(ggplot2::aes(ymin = bottom, ymax = top), alpha = 0.25, fill = "dodgerblue") +
ggplot2::geom_ribbon(ggplot2::aes(ymin = lower, ymax = upper), alpha = 0.25, fill = "dodgerblue") +
ggplot2::geom_vline(xintercept = as.Date("2020-03-23"), lty = 2) +
cowplot::theme_cowplot() +
ggplot2::labs(y = "Daily incidence", x = "Date")
### Proportion of hospital infections in non-carehome data
df2 <- data[,.(cestdat, dsstdtc, dsterm, subjid, hostdat)]
df2 <- df2[, c("onset_date_missing", "outcome_date_missing", "hosp_date_missing", "dead") :=
list(all(is.na(cestdat)), all(is.na(dsstdtc)), all(is.na(hostdat)), any(dsterm == 4, na.rm = TRUE)),
by = "subjid"][!onset_date_missing & !outcome_date_missing & !hosp_date_missing & dead
][, .(onset_date = unique(cestdat[!is.na(cestdat)]),
dead = unique(dead),
outcome_date = unique(dsstdtc[!is.na(dsstdtc)]),
hosp_date = unique(hostdat[!is.na(hostdat)])), by = "subjid"
]
df2 <- df2[order(outcome_date), .(hosp = sum((as.Date(onset_date) >= as.Date(hosp_date) + 5), na.rm = TRUE),
N = .N), by = "outcome_date"][, prop := hosp/N, ]
df2 %>%
ggplot2::ggplot(ggplot2::aes(x = outcome_date)) +
ggplot2::geom_ribbon(ggplot2::aes( ymin = 0, ymax = N), fill = "black") +
ggplot2::geom_ribbon(ggplot2::aes(ymin = 0, ymax = hosp), fill = "yellow") +
cowplot::theme_cowplot() +
ggplot2::labs(x = "Date", y = "Deaths")
df2[, ind := 1:.N] %>%
ggplot2::ggplot(ggplot2::aes(x = ind, y = prop, col = N)) +
ggplot2::geom_point() +
cowplot::theme_cowplot() +
ggplot2::labs(x = "Time", y = "Proportion of deaths from hospital-acquired infections (%)") +
ggplot2::scale_color_continuous(name = "Total deaths") +
ggplot2::scale_y_continuous(breaks = seq(0, 1, 0.1), labels = seq(0, 100, 10)) +
ggplot2::geom_hline(yintercept = median(df2$prop), lty = 2)
### Reported deaths in care homes
reported_cases_carehome <- reported_cases_community <- deaths_ts[ons == "reported_by_ons" & care_home_death == "Care home", "date_death"
][, .N, by = "date_death"
][, .(confirm = N, date = date_death)
][order(date)
][.(seq.Date(from = min(date), to = max(date), by = "day")), on = .(date), roll = 0
][is.na(confirm), confirm := 0]
reporting_delay_ch <- EpiNow2::bootstrapped_dist_fit(values = lldf$delay, verbose = TRUE)
## Set max allowed delay to 30 days to truncate computation
reporting_delay_ch$max <- 60
estimates_ch <- EpiNow2::estimate_infections(reported_cases = reported_cases_carehome,
generation_time = generation_time,
estimate_rt = FALSE, fixed = FALSE,
delays = list(incubation_period, reporting_delay_ch),
horizon = 7, samples = 4000, warmup = 500,
cores = 4, chains = 4, verbose = TRUE,
adapt_delta = 0.95)
p2 <- estimates_ch$summarised[variable == "infections" & type == "estimate",] %>%
ggplot2::ggplot(ggplot2::aes(x = date, y= median)) +
ggplot2::geom_line(col = "dodgerblue") +
ggplot2::geom_ribbon(data = reported_cases_carehome, ggplot2::aes(x = date, ymax = confirm, ymin = 0),
inherit.aes = FALSE, lty = 2, fill = "red4", alpha = 0.8) +
ggplot2::geom_ribbon(ggplot2::aes(ymin = bottom, ymax = top), alpha = 0.25, fill = "dodgerblue") +
ggplot2::geom_ribbon(ggplot2::aes(ymin = lower, ymax = upper), alpha = 0.25, fill = "dodgerblue") +
ggplot2::geom_vline(xintercept = as.Date("2020-03-23"), lty = 2) +
cowplot::theme_cowplot() +
ggplot2::labs(y = "Daily incidence", x = "Date")
### Joint plot
cm <- estimates$summarised[variable == "infections" & type == "estimate"][, location := "community"]
ch <- estimates_ch$summarised[variable == "infections" & type == "estimate"][ , location := "carehomes"]
df1 <- merge(cm, reported_cases_community[,location := "community"], all = TRUE, by = c("date","location"))
df2 <- merge(ch, reported_cases_carehome[,location := "carehomes"], all = TRUE, by = c("date", "location"))
rbind(df1, df2) %>%
ggplot2::ggplot(ggplot2::aes(x = date, y= median)) +
ggplot2::geom_line(col = "dodgerblue") +
ggplot2::geom_ribbon(ggplot2::aes(x = date, ymax = confirm, ymin = 0),
inherit.aes = FALSE, lty = 2, fill = "red4", alpha = 0.8) +
ggplot2::geom_ribbon(ggplot2::aes(ymin = bottom, ymax = top), alpha = 0.25, fill = "dodgerblue") +
ggplot2::geom_ribbon(ggplot2::aes(ymin = lower, ymax = upper), alpha = 0.25, fill = "dodgerblue") +
ggplot2::geom_vline(xintercept = as.Date("2020-03-23"), lty = 2) +
cowplot::theme_cowplot() +
ggplot2::labs(y = "Daily incidence", x = "Date") +
ggplot2::facet_wrap( ~ location, ncol = 1)
hi <- merge(cm[, .(date, median)], ch[, .(date, median)], by = "date")[, median := median.x + median.y]
daty <- data.table(date = seq.Date(from = as.Date("2020-05-14"), to = as.Date("2020-07-30"), by = "7 days"),
infections = c(4100, 3100, 2500, 2100, 1900, 1800, 1800, 1900, 2000, 2400, 2900, 3700),
top = c(6400, 4300, 3300, 2800, 2500, 2400, 2300, 2500, 2700, 3300, 4300, 6400),
bottom = c(2500, 2100, 1800, 1500, 1400, 1300, 1300, 1400, 1500, 1600, 1900, 2100))
IFR <- 0.015
cm[, .(date, infections = median / IFR, top = top / IFR, bottom = bottom / IFR)] %>%
ggplot2::ggplot(ggplot2::aes(x = date, y = infections, ymin = bottom, ymax = top)) +
ggplot2::geom_ribbon(alpha = 0.5) +
ggplot2::geom_line() +
ggplot2::geom_point(data = daty) +
ggplot2::geom_errorbar(data = daty) +
ggplot2::labs(y = "Daily new infections", x = "Date") +
cowplot::theme_cowplot()
pd <- cm[, .(date, infections = median / IFR, top = top / IFR, bottom = bottom / IFR)
][, .(date, infections = cumsum(infections), top = cumsum(top), bottom = cumsum(bottom))]
pd %>%
ggplot2::ggplot(ggplot2::aes(x = date, y = infections / 56000000,
ymin = bottom / 56000000, ymax = top / 56000000)) +
ggplot2::geom_ribbon(alpha = 0.5) +
ggplot2::geom_line() +
# ggplot2::geom_point(data = daty) +
# ggplot2::geom_errorbar(data = daty) +
ggplot2::labs(y = "Attack Rate (%)", x = "Date") +
# ggplot2::ggtitle("IFR = 1.5%") +
ggplot2::scale_y_continuous(breaks = seq(0, 0.05, 0.01), labels = seq(0:5)) +
cowplot::theme_cowplot()
|
08323a70ed210a21576e0f11316626f2d6cbd46a
|
f85aead45df8a331aa40da52b4846a107125a258
|
/tests/testthat/test-geom-ribbon.R
|
c2b2d896886af992246eff5307c07bd04bbcb1c1
|
[] |
no_license
|
zeehio/ggpipe
|
30269fbd676e6b7a3701cfef5ad061b81206230c
|
3598b70b0da3078e93f238c3d4bec73ecbc2c6d0
|
refs/heads/master
| 2021-05-07T09:32:28.364564
| 2017-11-08T12:09:49
| 2017-11-08T12:09:49
| 109,627,615
| 63
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 338
|
r
|
test-geom-ribbon.R
|
context("geom_ribbon")
test_that("geom_ribbon same results", {
df <- data.frame(x = 1:5, y = c(1, 1, NA, 1, 1))
p1 <- ggplot2::ggplot(df, ggplot2::aes(x)) +
ggplot2::geom_ribbon(ggplot2::aes(ymin = y - 1, ymax = y + 1))
p2 <- ggplot(df, aes(x)) %>%
geom_ribbon(aes(ymin = y - 1, ymax = y + 1))
expect_equal(p1, p2)
})
|
583a490a9126b1976567e8de815389f80a446f33
|
44598c891266cd295188326f2bb8d7755481e66b
|
/DbtTools/classifiers/R/KNNclassifierDistance.R
|
398ce65abc23ccd92a512ac052b7f343809e6a7a
|
[] |
no_license
|
markus-flicke/KD_Projekt_1
|
09a66f5e2ef06447d4b0408f54487b146d21f1e9
|
1958c81a92711fb9cd4ccb0ea16ffc6b02a50fe4
|
refs/heads/master
| 2020-03-13T23:12:31.501130
| 2018-05-21T22:25:37
| 2018-05-21T22:25:37
| 131,330,787
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,074
|
r
|
KNNclassifierDistance.R
|
KNNclassifierDistance = function(K=1,TrainData,TrainCls,TestData=NULL,ShowObs=F, method = "euclidean",p = 2){
# [KNNTestCls,NearestInd ] = KNNclassifier(k,TrainData,TrainCls,TestData,Verbose);
# k-nearest neighbor clssifier
# INPUT
# K the number of neighbor to use
# TrainData matrix [n,d] containing classified data
# TrainCls vector [1:n] containing the classes of TrainData
# TestData matrix [m,d] containing unclassified data
# OPTIONAL
# ShowObs logical, when it's ture, the funtion will output the imformation of training set
# cases.
# method 'euclidean','sqEuclidean','binary','cityblock', 'maximum','canberra','cosine','chebychev','jaccard', 'mahalanobis','minkowski','manhattan','braycur','cosine'
# p The power of the Minkowski distance.
# OUTPUT
# value: result of classifications of test set will be returned. (When TstX is NULL, the function will automatically
# consider the user is trying to test the knn algorithm. Hence, a test result table and accuracy
# report will be shown on the R-console.)
#KNNTestCls vector [1:m], a KNN classification of TestData
#Author: MT 17/08 umgeschrieben aus knngarden paket, copyright also Xinmiao Wang
#Description
# k-nearest neighbour classification of versatile Distance version for test set from training set. For
# each row of the test set, the k nearest (in multiple distances) training set vectors are found, and the
# classification is decided by majority vote. This function allows you measure the distance bewteen
# vectors by six different means. K Threshold Value Check and Same K_i Problem Dealing are also
# been considered.
#Details:
# K Threshold Value is stipulated to be less than the minimum size of the class in training set, or a
# warning will be shown.
# Sometimes a case may get same "ballot" from class A and class B (even C, D, ...), this time a
# weighted voting process will be activated. The weight is based on the actual distance calculated
# between the test case and K cases in neighbor A and B. The test case belongs to the class with less
# total distance.
# The multiple distances are implemented by transfering the function dist(). For the convenience of
# users, we quote the details of function "dist()" and show them here.
# Available distance measures are :
# euclidean: Usual square distance between the two vectors (2 norm).
# maximum: Maximum distance between two components of x and y (supremum norm)
# manhattan: Absolute distance between the two vectors (1 norm).
# canberra: sum(abs(Xi-Yi)/abs(Xi+Yi)) Terms with zero numerator and denominator are omitted
# from the sum and treated as if the values were missing.
# This is intended for non-negative values (e.g. counts): taking the absolute value of the denominator
# is a 1998 R modification to avoid negative distances.
# binary: (aka asymmetric binary): The vectors are regarded as binary bits, so non-zero elements are
# "on" and zero elements are "off". The distance is the proportion of bits in which only one is on
# amongst those in which at least one is on.
# minkowski: The p norm, the pth root of the sum of the pth powers of the differences of the components.
# Missing values are allowed, and are excluded from all computations involving the rows within
# which they occur. Further, when Inf values are involved, all pairs of values are excluded when
# their contribution to the distance gave NaN or NA. If some columns are excluded in calculating a
# Euclidean, Manhattan, Canberra or Minkowski distance, the sum is scaled up proportionally to the
# number of columns used. If all pairs are excluded when calculating a particular distance, the value
# is NA.
TrainCls=as.factor(TrainCls)
TrnG=as.numeric(TrainCls)
CodeMeaning=data.frame(TrnG,TrainCls)
TK=sort(as.matrix(table(TrnG)),decreasing=F)
if(K>TK[1])
{
stop(c("
NOTES:
sorry, the value of K ","(K=",K,") ",
"you have selected is bigger than the capacity of one class in your training data set",
"(","the capacity is ",TK[1],")",",","please choose a less value for K"))
}
if(is.null(TestData)==T)
{
IsTst=1
TestData<-as.matrix(TrainData)
}else
{
IsTst=0
}
if(is.matrix(TestData)==F)
{
TestData<-as.matrix(TestData)
}
TrainData<-as.matrix(TrainData)
ElmTrnG=union(TrnG,TrnG)
LevTrnG=length(ElmTrnG)
TrnTotal=cbind(TrnG,TrainData)
NTestData=nrow(TestData)
NTrnTotal=nrow(TrnTotal)
VoteResult=NULL
VoteResultList=NULL
for(i in 1:nrow(TestData))
{
RankBoardI<-NULL
RankBoardIJ<-NULL
Total=rbind(TestData[i,],TrainData)
# if(is.null(DistanceMatrix))
RankBoardI=as.matrix(as.dist(DistanceMatrix(Total, method = method ,dim=p))[1:nrow(TrainData)])
# else
# RankBoardI=as.matrix(as.dist(DistanceMatrix)[1:nrow(TrainData)])
RankBoardIJ=cbind(TrnG,RankBoardI)
VoteAndWeight=RankBoardIJ[sort(RankBoardIJ[,2],index.return=T)$ix[1:K],1:2]
TempVote4TestDataI=RankBoardIJ[sort(RankBoardIJ[,2],index.return=T)$ix[1:K],1]
ElmVote=union(TempVote4TestDataI,TempVote4TestDataI)
CountVote=as.matrix(sort(table(TempVote4TestDataI),decreasing=T))
TempWinner=as.numeric(rownames(CountVote))
if(length(CountVote)==1|K==1)
{
Winner=TempWinner[1]
TestDataIBelong=union(CodeMeaning$TrainCls[which(CodeMeaning$TrnG==Winner)],
CodeMeaning$TrainCls[which(CodeMeaning$TrnG==Winner)])
VoteResultNode=data.frame(TestDataIBelong)
VoteResultList=rbind(VoteResultList,VoteResultNode)
}else
{
NumOfTie=CountVote[1]
FinalList=NULL
j=1
TempWeight=sum(VoteAndWeight[which(VoteAndWeight[,1]==TempWinner[j]),2])
FinalList=data.frame(TempWinner[j],TempWeight)
while(CountVote[j]==CountVote[j+1]&j<length(CountVote))
{
TempWeight=sum(VoteAndWeight[which(VoteAndWeight[,1]==TempWinner[j+1]),2])
FinalListNode=c(TempWinner[j+1],TempWeight)
FinalList=rbind(FinalList,FinalListNode)
j=j+1
}
FinalList=FinalList[sort(FinalList$TempWeight,index.return=T)$ix[1],]
TestDataIBelong=union(CodeMeaning$TrainCls[which(CodeMeaning$TrnG==FinalList[1,1])],
CodeMeaning$TrainCls[which(CodeMeaning$TrnG==FinalList[1,1])])
VoteResultNode=data.frame(TestDataIBelong)
VoteResultList=rbind(VoteResultList,VoteResultNode)
}
}
if(IsTst==1)
{
CheckT=as.matrix(table(data.frame(VoteResultList,TrainCls)))
AccuStat=1-sum(CheckT-diag(diag(CheckT)))/length(TrnG)
print(CheckT)
cat("the classification accuracy of this algorithm on this training dataset is: ",
AccuStat*100,"%","\n\n\n")
}
if(IsTst==1&ShowObs==F){
result=data.frame(VoteResultList,TrainCls)
}else
{
if(IsTst==1&ShowObs==T){
result=data.frame(TestData,VoteResultList,TrainCls)
}else
{
if(ShowObs==F){
result=data.frame(VoteResultList)
}else{
result=data.frame(TestData,VoteResultList)
}
}
}
return(list(result=result,KNNTestCls=as.vector(VoteResultList$TestDataIBelong)))
}
|
ed4b3131e53382d1a20b83688cf7f26f23ab13fc
|
0500ba15e741ce1c84bfd397f0f3b43af8cb5ffb
|
/cran/paws.security.identity/man/account_list_regions.Rd
|
d47777581123e06a51264bcc0220ce7eb4ba088b
|
[
"Apache-2.0"
] |
permissive
|
paws-r/paws
|
196d42a2b9aca0e551a51ea5e6f34daca739591b
|
a689da2aee079391e100060524f6b973130f4e40
|
refs/heads/main
| 2023-08-18T00:33:48.538539
| 2023-08-09T09:31:24
| 2023-08-09T09:31:24
| 154,419,943
| 293
| 45
|
NOASSERTION
| 2023-09-14T15:31:32
| 2018-10-24T01:28:47
|
R
|
UTF-8
|
R
| false
| true
| 3,338
|
rd
|
account_list_regions.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/account_operations.R
\name{account_list_regions}
\alias{account_list_regions}
\title{Lists all the Regions for a given account and their respective opt-in
statuses}
\usage{
account_list_regions(
AccountId = NULL,
MaxResults = NULL,
NextToken = NULL,
RegionOptStatusContains = NULL
)
}
\arguments{
\item{AccountId}{Specifies the 12-digit account ID number of the Amazon Web Services
account that you want to access or modify with this operation. If you
don't specify this parameter, it defaults to the Amazon Web Services
account of the identity used to call the operation. To use this
parameter, the caller must be an identity in the \href{https://docs.aws.amazon.com/organizations/latest/userguide/orgs_getting-started_concepts.html#account}{organization's management account}
or a delegated administrator account. The specified account ID must also
be a member account in the same organization. The organization must have
\href{https://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_org_support-all-features.html}{all features enabled},
and the organization must have \href{https://docs.aws.amazon.com/organizations/latest/userguide/}{trusted access}
enabled for the Account Management service, and optionally a \href{https://docs.aws.amazon.com/organizations/latest/userguide/}{delegated admin}
account assigned.
The management account can't specify its own \code{AccountId}. It must call
the operation in standalone context by not including the \code{AccountId}
parameter.
To call this operation on an account that is not a member of an
organization, don't specify this parameter. Instead, call the operation
using an identity belonging to the account whose contacts you wish to
retrieve or modify.}
\item{MaxResults}{The total number of items to return in the commandโs output. If the
total number of items available is more than the value specified, a
\code{NextToken} is provided in the commandโs output. To resume pagination,
provide the \code{NextToken} value in the \code{starting-token} argument of a
subsequent command. Do not use the \code{NextToken} response element directly
outside of the Amazon Web Services CLI. For usage examples, see
\href{https://docs.aws.amazon.com/cli/latest/userguide/cli-usage-pagination.html}{Pagination}
in the \emph{Amazon Web Services Command Line Interface User Guide}.}
\item{NextToken}{A token used to specify where to start paginating. This is the
\code{NextToken} from a previously truncated response. For usage examples,
see
\href{https://docs.aws.amazon.com/cli/latest/userguide/cli-usage-pagination.html}{Pagination}
in the \emph{Amazon Web Services Command Line Interface User Guide}.}
\item{RegionOptStatusContains}{A list of Region statuses (Enabling, Enabled, Disabling, Disabled,
Enabled_by_default) to use to filter the list of Regions for a given
account. For example, passing in a value of ENABLING will only return a
list of Regions with a Region status of ENABLING.}
}
\description{
Lists all the Regions for a given account and their respective opt-in statuses. Optionally, this list can be filtered by the \code{region-opt-status-contains} parameter.
See \url{https://www.paws-r-sdk.com/docs/account_list_regions/} for full documentation.
}
\keyword{internal}
|
d70fe152aa29c1b55e2ea92837efeac5b473047d
|
15defa8cb13e2e1babe80f08f2bfcdb7aef97671
|
/analyses/independent-samples/01-generate-independent-specimens.R
|
3455c6e05cc8320c0ac6d076b4a28c773e8ab749
|
[
"CC-BY-4.0",
"CC-BY-3.0",
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause"
] |
permissive
|
arpoe/OpenPBTA-analysis
|
24dd66efc4b226d9a9856f0aca81de3807af6290
|
3f9cdc713051a30165d31c30cf4d24240dbe58c1
|
refs/heads/master
| 2021-05-17T16:51:44.667557
| 2020-05-19T19:14:14
| 2020-05-19T19:14:14
| 250,880,017
| 3
| 0
|
NOASSERTION
| 2020-03-28T19:47:33
| 2020-03-28T19:47:32
| null |
UTF-8
|
R
| false
| false
| 3,871
|
r
|
01-generate-independent-specimens.R
|
# 01-generate-independent-specimens.R
#
# Josh Shapiro for CCDL 2019
#
# Purpose: Generate tables of independent specimens where no two specimens are
# chosen from the same individual.
#
# Option descriptions
# -f, --histology_file : File path to where you would like the annotation_rds file to be
# stored
# -o,--output_directory : Output directory
#
# example invocation:
# Rscript analyses/independent-samples/01-generate-independent-specimens.R \
# -f data/pbta-histologies.tsv \
# -o analyses/independent-samples/results
# Base directories
root_dir <- rprojroot::find_root(rprojroot::has_dir(".git"))
analysis_dir <- file.path(root_dir, "analyses", "independent-samples")
# Load the optparse library
library(optparse)
# Magrittr pipe
`%>%` <- dplyr::`%>%`
# source sample selection function
source(file.path(analysis_dir, "independent-samples.R"))
set.seed(201910)
# Parse options
option_list <- list(
make_option(
c("-f", "--histology_file"),
type = "character",
default = NULL,
help = "path to the histology tsv file, relative to project root",
),
make_option(
c("-o", "--output_directory"),
type = "character",
default = NULL,
help = "path to output directory, relative to project root"
)
)
opts <- parse_args(OptionParser(option_list = option_list))
# set output files
out_dir <- file.path(root_dir, opts$output_directory)
if (!dir.exists(out_dir)){
dir.create(out_dir, recursive = TRUE)
}
wgs_primary_file <- file.path(out_dir,
"independent-specimens.wgs.primary.tsv")
wgs_primplus_file <- file.path(out_dir,
"independent-specimens.wgs.primary-plus.tsv")
wgswxs_primary_file <- file.path(out_dir,
"independent-specimens.wgswxs.primary.tsv")
wgswxs_primplus_file <- file.path(out_dir,
"independent-specimens.wgswxs.primary-plus.tsv")
# Read histology file
sample_df <- readr::read_tsv(file.path(root_dir, opts$histology_file),
col_types = readr::cols()) # suppress parse message
# Filter to only samples from tumors, where composition is known to be Solid Tissue
# Note that there are some samples with unknown composition, but these will be ignored for now.
tumor_samples <- sample_df %>%
dplyr::filter(sample_type == "Tumor",
composition == "Solid Tissue",
experimental_strategy %in% c("WGS", "WXS"))
# Generate WGS independent samples
wgs_samples <- tumor_samples %>%
dplyr::filter(experimental_strategy == "WGS")
wgs_primary <- independent_samples(wgs_samples, tumor_types = "primary")
wgs_primary_plus <- independent_samples(wgs_samples, tumor_types = "prefer_primary")
# Generate lists for WXS only samples
# WGS is generally preferred, so we will only include those where WGS is not available
wxs_only_samples <- tumor_samples %>%
dplyr::filter(!(Kids_First_Participant_ID %in%
wgs_samples$Kids_First_Participant_ID))
wxs_primary <- independent_samples(wxs_only_samples, tumor_types = "primary")
wxs_primary_plus <- independent_samples(wxs_only_samples, tumor_types = "prefer_primary")
# write files
message(paste(nrow(wgs_primary), "WGS primary specimens"))
readr::write_tsv(wgs_primary, wgs_primary_file)
message(paste(nrow(wgs_primary_plus), "WGS specimens (including non-primary)"))
readr::write_tsv(wgs_primary_plus, wgs_primplus_file)
message(paste(nrow(wgs_primary) + nrow(wxs_primary), "WGS+WXS primary specimens"))
readr::write_tsv(dplyr::bind_rows(wgs_primary, wxs_primary),
wgswxs_primary_file)
message(paste(nrow(wgs_primary_plus) + nrow(wxs_primary_plus), "WGS+WXS specimens (including non-primary)"))
readr::write_tsv(dplyr::bind_rows(wgs_primary_plus, wxs_primary_plus),
wgswxs_primplus_file)
|
24e1fe628546e7a893e8719d8ab8227ebd953b7b
|
8c0969a8aba7988ece1c4b9c20ba1d1fd2f3a0d2
|
/R/fix_data_EKF_1d_interp_joint.r
|
67f439fe195bf43626014b5f12b816e7e0906167
|
[] |
no_license
|
cran/animalEKF
|
300e36ebc91fa8143100dfe3179b76f71f5c0fbb
|
acf5cdd8c0d92e1000d03987afb66acc0c8925fb
|
refs/heads/master
| 2022-12-27T07:30:47.740285
| 2020-10-05T10:50:06
| 2020-10-05T10:50:06
| 301,801,402
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,862
|
r
|
fix_data_EKF_1d_interp_joint.r
|
fix_data_EKF_1d_interp_joint <- function(env_obj) {
# confidence plot
env_obj$loc_pred_plot_conf <- min(max(env_obj$loc_pred_plot_conf, 0.05), 0.95)
# normal
env_obj$loc_pred_plot_conf_constant <- qnorm(p=0.5 + env_obj$loc_pred_plot_conf/2)
env_obj$d <- env_obj$d[ order(env_obj$d$date_as_sec),]
env_obj$first_time <- min(env_obj$d$date_as_sec)
env_obj$shark_names <- as.character(sort(unique(env_obj$d$tag)))
# if NULL, maxStep should be the number of steps required to simulate entire submitted data
observed_intervals <- lapply(env_obj$shark_names, function(s) unique(env_obj$d$t_intervals[ env_obj$d$tag==s ]))
names(observed_intervals) <- env_obj$shark_names
observed_intervals <- observed_intervals[sapply(observed_intervals, function(x) length(x) >= 3)]
if (length(observed_intervals) == 0) {
stop(paste("No observed animals have 3 or more intervals of length", env_obj$reg_dt, "with observations\nNeed to have more data or shorted reg_dt"))
}
third_steps <- sapply(observed_intervals, function(x) x[3])
if (is.null(env_obj$max_int_wo_obs)) {
env_obj$max_int_wo_obs <- Inf
}
else if (! any(third_steps <= env_obj$max_int_wo_obs)) {
print(observed_intervals)
stop(paste("No observed animals have consecutive observed intervals separated by less than", env_obj$max_int_wo_obs, "intervals.\nNeed to increase max_int_wo_obs to at least", min(third_steps)))
}
min_intervals_needed <- min(third_steps[third_steps <= env_obj$max_int_wo_obs])
# if NULL, maxStep should be the number of steps required to simulate entire submitted data
max_required_steps <- ceiling(1+(env_obj$d$date_as_sec[ nrow(env_obj$d) ] - env_obj$first_time)/env_obj$reg_dt)
env_obj$maxStep <- ifelse(is.null(env_obj$maxStep), max_required_steps, max(min_intervals_needed, min(env_obj$maxStep, max_required_steps)))
env_obj$t_reg <- seq(from= env_obj$first_time, by=env_obj$reg_dt, length.out=env_obj$maxStep)
#do this so the first interval captures the first observation at t=0
env_obj$t_reg[ 1 ] <- env_obj$t_reg[ 1 ]-.Machine$double.eps
env_obj$N <- length(env_obj$t_reg)
env_obj$max_int_wo_obs <- min(env_obj$N+1, env_obj$max_int_wo_obs)
env_obj$d <- env_obj$d[ env_obj$d$date_as_sec <= env_obj$t_reg[ env_obj$N ],]
env_obj$tags <- env_obj$d$tag
#dt <- d$time_to_next
env_obj$included_intervals <- 1:(env_obj$N-1)
#calculate which regular step each observation falls into
env_obj$d$t_intervals <- as.numeric(as.character(cut(x=env_obj$d$date_as_sec, breaks=env_obj$t_reg, labels=env_obj$included_intervals, right=TRUE)))
print("t_intervals")
print(env_obj$d$t_intervals)
env_obj$shark_names <- as.character(sort(unique(env_obj$tags)))
env_obj$nsharks <- length(env_obj$shark_names)
print(paste("shark names are",paste(env_obj$shark_names, collapse=" ")))
env_obj$shark_intervals <- list()
env_obj$shark_valid_steps <- list()
for (s in env_obj$shark_names) {
env_obj$shark_intervals[[ s ]] <- unique(env_obj$d$t_intervals[ env_obj$d$tag==s ])
#keep steps where there are less than a certain gap between observations
tmp <- c()
tmp1 <- c()
for (jj in 1:(length(env_obj$shark_intervals[[ s ]])-1)){
if(diff(env_obj$shark_intervals[[ s ]][ jj:(jj+1) ]) <= env_obj$max_int_wo_obs) {
tmp <- c(tmp, (env_obj$shark_intervals[[ s ]][ jj ]):(env_obj$shark_intervals[[ s ]][ jj+1 ]))
tmp1 <- c(tmp1, env_obj$shark_intervals[[ s ]][ jj:(jj+1) ])
}
}
env_obj$shark_valid_steps[[ s ]] <- sort(unique(tmp))
env_obj$shark_intervals[[ s ]] <- sort(unique(tmp1))
}
print("shark intervals")
print(env_obj$shark_intervals)
print(env_obj$N)
env_obj$included_intervals <- sort(unique(unlist(env_obj$shark_valid_steps)))
print(paste("sharks:", paste(env_obj$shark_names, collapse=" ")))
env_obj$first_intervals <- lapply(env_obj$shark_valid_steps, function(x) x[ !((x-1) %in% x) ])
names(env_obj$first_intervals) <- env_obj$shark_names
print("starting observations per shark:")
print(env_obj$first_intervals)
env_obj$shark_symbols <- 1:env_obj$nsharks
names(env_obj$shark_symbols) <- env_obj$shark_names
print("intervals with observations per shark:")
print(env_obj$shark_intervals)
print("intervals to be simulated per shark:")
print(env_obj$shark_valid_steps)
#last interval with a valid observation
env_obj$shark_final_obs <- sapply(env_obj$shark_intervals, max)
names(env_obj$shark_final_obs) <- env_obj$shark_names
if (env_obj$nsharks==1) {
env_obj$interact <- FALSE
}
if (env_obj$nstates==1) {
env_obj$states <- rep(1, length(env_obj$states))
env_obj$next_states <- rep(1, length(env_obj$next_states))
env_obj$interact <- FALSE
}
print(paste("nstates:", env_obj$nstates))
if (env_obj$update_params_for_obs_only) {
env_obj$update_eachstep <- FALSE
}
env_obj$d$shark_obs_index <- NA
for (s in env_obj$shark_names) {
ss <- which(env_obj$tags==s)
env_obj$d$shark_obs_index[ ss ]<- 1:length(ss)
}
#print(d$shark_obs_index)
if (env_obj$nsharks > 1) { print(env_obj$tags) }
env_obj$d$rowid <- 1:nrow(env_obj$d)
#if we want to model it as one state, so be it
if (env_obj$nstates == 1) {
env_obj$d$state.guess2[ ! is.na(env_obj$d$state.guess2)] <- 1
env_obj$d$next.guess2[ ! is.na(env_obj$d$next.guess2)] <- 1
env_obj$d$lambda[ ! is.na(env_obj$d$lambda)] <- 1
}
env_obj$d$state.guess2 <- as.numeric(env_obj$d$state.guess2)
env_obj$d$next.guess2 <- as.numeric(env_obj$d$next.guess2)
env_obj$d$lambda <- as.numeric(env_obj$d$lambda)
env_obj$states <- as.numeric(env_obj$d$state.guess2)
env_obj$next_states <- as.numeric(env_obj$d$next.guess2)
#nstates <- max(length(unique(states)), nstates)
env_obj$d <- env_obj$d[,c("shark_obs_index","X","velocity","date_as_sec","time_to_next",
"lambda","state.guess2","next.guess2","t_intervals")]
env_obj$d <- as.matrix(env_obj$d)
rownames(env_obj$d) <- 1:nrow(env_obj$d)
#for 1D log velocity is just angle_velocity
nus <- length(unique(env_obj$states, na.rm=TRUE))
nust <- env_obj$nstates
if (env_obj$compare_with_known) {
nust <- length(unique(env_obj$known_regular_step_ds$state.guess2, na.rm=TRUE))
true_diffs <- unique(diff(unique(env_obj$known_regular_step_ds$date_as_sec[ ! is.na(env_obj$known_regular_step_ds$date_as_sec)])))
if (length(true_diffs) > 1) {
stop(paste("known_regular_step_ds has multiple observed time gaps:", paste(true_diffs, collapse=", ")))
}
else if (! (env_obj$reg_dt %in% true_diffs)) {
stop(paste("known_regular_step_ds has observed time gap ", true_diffs, " but argument reg_dt is ", env_obj$reg_dt, "; they must be the same"))
}
}
if (nus != env_obj$nstates || nust != env_obj$nstates) {
print(paste("Observed/true data has", nus, "and", nust, "behaviors, but choose to model with", env_obj$nstates, "behaviors"))
}
invisible(NULL)
}
|
5573ec5fef50018b08e596828920d03c9244bfe8
|
7d8f1fcd9adda95ab1a29cb65e255a5e7379ec8f
|
/R Code/Repeated CV/Summary_1.R
|
3b7e141416ba674662df220861c08037f52830a7
|
[] |
no_license
|
kstatju/DMC-2019
|
54d7961da8677dbbec7e3015b08c3d0f0da84f6d
|
55d718c11230a27b321679ef552c5f83fdaeec15
|
refs/heads/main
| 2023-02-01T02:53:57.629804
| 2020-12-15T03:27:04
| 2020-12-15T03:27:04
| 321,537,099
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,384
|
r
|
Summary_1.R
|
0.9885
df1 = scale(df[,!(names(df) %in% c("fraud"))])
df = data.frame(df1, fraud = df[,"fraud"])
df_train = upSample(x=df[,!(names(df) %in% c("fraud"))] ,
y=as.factor(df$fraud) ,
yname = "fraud")
nm = names(df_train)
xnm = nm[!(nm%in%c('fraud'))]
cv.lasso <- cv.glmnet(x = as.matrix(df_train[,xnm]),
y = df_train$fraud,
alpha = .88,
family = "binomial")
fit = glmnet(x = as.matrix(df_train[,xnm]), y = df_train$fraud,
alpha = .88,
family = "binomial",
lambda = cv.lasso$lambda.min)
myCoefs <- coef(fit, s="lambda.min");
myCoefs[which(myCoefs != 0 ) ]
nam = as.vector(unlist(myCoefs@Dimnames[[1]][which(myCoefs != 0 ) ][-1]))
df_train = upSample(x=df[,!(names(df) %in% c("fraud"))] ,
y=as.factor(df$fraud) ,
yname = "fraud")
a = mda::fda(formula = fraud~.,
data = df_train,
method = earth,
degree = 3,
penalty = 4.5,
nk = 80,
thresh = 0.0001,
minspan = 3,
endspan = 75,
fast.k = 0,
pmethod = "backward",
nprune = 15,
nfold = 5,
ncross = 1,
Adjust.endspan = 8,
Get.leverages = F)
opt = options()
options(digits=15)
options(opt)
pred = predict(fit, as.matrix(Train[,xnm]), type = "response")
pred1 = predict(fit, as.matrix(Test), type = "response")
prop.table(table(Train$fraud, pred>0.946))
prop.table(table(pred>0.946))
prop.table(table(pred1>0.946))
[1] "trustLevel" "totalScanTimeInSeconds" "grandTotal" "lineItemVoids"
[5] "scansWithoutRegistration" "quantityModifications" "scannedLineItemsPerSecond" "valuePerSecond"
[9] "lineItemVoidsPerPosition" "fraud" "X27_Nitems" "X78_valueItem"
aa = data.frame(Train, pred = ifelse(pred>0.946, 1,0))
bb = aa[(aa$s0==0 & aa$fraud==1) | (aa$s0==1 & aa$fraud==0),]
ggplot(Train, aes(y = X27_Nitems, x = quantityModifications, col = fraud, size=fraud))+
geom_point(alpha =0.4)
ggplot(Train, aes(y = grandTotal/(1+scansWithoutRegistration*quantityModifications), x = trustLevel, col = fraud, size=fraud))+
geom_point(alpha =0.4)
ggplot(Train, aes(y = grandTotal/(1+scansWithoutRegistration*quantityModifications), x = trustLevel, col = fraud, size=fraud))+
geom_point(alpha =0.4)
ggplot(Train, aes(y = grandTotal/(1+lineItemVoidsPerPosition), x = scannedLineItemsPerSecond, col = fraud, size=fraud))+
geom_point(alpha =0.4)+scale_x_continuous(limits = c(0,.4))
ggplot(Train, aes(x = quantityModifications/X27_Nitems, y = grandTotal, col = fraud, size=fraud))+
geom_point(alpha =0.4)+scale_x_continuous(limits = c(0,.5))
table(con_2_dis_variable$totalScanTimeInSeconds_2_discrete, con_2_dis_variable$scannedLineItemsPerSecond_2_discrete)
table(con_2_dis_variable$totalScanTimeInSeconds_2_discrete, con_2_dis_variable$valuePerSecond_2_discrete)
table(con_2_dis_variable$scannedLineItemsPerSecond_2_discrete, con_2_dis_variable$valuePerSecond_2_discrete)
table(con_2_dis_variable_test$totalScanTimeInSeconds_2_discrete, con_2_dis_variable_test$scannedLineItemsPerSecond_2_discrete)
table(con_2_dis_variable_test$totalScanTimeInSeconds_2_discrete, con_2_dis_variable_test$valuePerSecond_2_discrete)
table(con_2_dis_variable_test$scannedLineItemsPerSecond_2_discrete, con_2_dis_variable_test$valuePerSecond_2_discrete)
Train$gr = "Train"
Test$fraud = NA
Test$gr = "Test"
df = rbind(Train, Test)
final_var_list = c( "totalScanTimeInSeconds" ,
"grandTotal" ,
"lineItemVoids" ,
"quantityModifications" ,
"scannedLineItemsPerSecond" ,
"valuePerSecond" ,
"X27_Nitems" ,
"totalScanTimeInSeconds_2_discrete" ,
"scannedLineItemsPerSecond_2_discrete" ,
"valuePerSecond_2_discrete" ,
"likelihood_trustLevel" ,
"likelihood_lineItemVoids" ,
"likelihood_scansWithoutRegistration" ,
"likelihood_totalScanTimeInSeconds_2_discrete" ,
"likelihood_scannedLineItemsPerSecond_2_discrete",
"likelihood_valuePerSecond_2_discrete" ,
"Cross_SLIPS_2_Dis_TSTIS" ,
"Nitem_SWR" ,
"Nitem_LIV" ,
"Nitem_TL" ,
"SLIPS_2_dis_TSTIS_2_dis_LTL" ,
"SLIPS_TSTIS_2_dis_LTL" ,
"Nitem_LTL" ,
"Nitem_LLIV" ,
"SLIPS_2_dis_SWR_LTSTIS_2_dis" ,
"SLIPS_2_dis_LTL_LTSTIS_2_dis" ,
"LTL_LSWR_LLSLIPS_2_dis" ,
"Nitem_LVPS_2_dis" ,
"LTL_LSLIPS_2_dis_LVPS_2_dis" ,
"GT_by_TSTIN" ,
"GT_by_TSTIN_QM" ,
"GT_by_TSTIN_LIV" ,
"GT_by_TSTIN_SWR" ,
"Nitem_VPS_VOID" ,
"Nitem_LIVPP_VOID" ,
"Nitem_LIVPP_TL" ,
"CPS_LSWR" ,
"X27_Nitems_pow3" ,
"X27_Nitems_SWR_pow3" ,
"trustLevel1" ,
"trustLevel2" )
for (i in final_var_list){
print(ggplot(df, aes(!!parse_expr(i), fill = gr)) + geom_density(alpha = 0.2) )
}
print(ggplot(df, aes(sapply(1:nrow(df), FUN = function(i) {
ifelse(df$SLIPS_2_dis_TSTIS_2_dis_LTL[i] == 0, log(1e-10),
log(df$SLIPS_2_dis_TSTIS_2_dis_LTL[i]))}), fill = gr)) + geom_density(alpha = 0.2) )
print(ggplot(df, aes((GT_by_TSTIN_LIV-mean(GT_by_TSTIN_LIV))/sd(GT_by_TSTIN_LIV), fill = gr)) + geom_density(alpha = 0.2))
print(ggplot(df, aes((GT_by_TSTIN_LIV), fill = gr)) + geom_density(alpha = 0.2))
+scale_x_continuous(limits = c(0,2)))
|
5af28157f6e4e30de350a489ee0f8541d5120f05
|
f5cf80f12817abe08167fb0c5aa2bb115dfcc8b0
|
/man/textgRid.Rd
|
b9b220db09716988227ddb7b62bdf182dbdba8f3
|
[] |
no_license
|
M-Lancien/textgRid
|
7d14294a845cca5f50137e0d02fdf1482d8040f9
|
3112881b042f0c0b661f380a52d922663fad21db
|
refs/heads/master
| 2021-09-18T08:07:54.060449
| 2018-07-11T21:35:41
| 2018-07-11T21:35:41
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,127
|
rd
|
textgRid.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/textgRid.R
\docType{package}
\name{textgRid}
\alias{textgRid}
\alias{textgRid-package}
\title{textgRid: Praat TextGrid Objects in R}
\description{
The software application Praat can be used to annotate
waveform data (e.g., to mark intervals of interest or to label events).
These annotations are stored in a Praat TextGrid object, which consists of
a number of interval tiers and point tiers. An interval tier consists of
sequential (i.e., not overlapping) labeled intervals. A point tier consists
of labeled events that have no duration. The textgRid package provides
S4 classes, generics, and methods for accessing information that is stored
in Praat TextGrid objects.
}
\section{S4 classes}{
\code{\link[=Tier-class]{Tier}},
\code{\link[=IntervalTier-class]{IntervalTier}},
\code{\link[=PointTier-class]{PointTier}},
\code{\link[=TextGrid-class]{TextGrid}}
}
\section{S4 generics and methods}{
\code{\link[=TextGrid-constructor]{TextGrid()} object constructor}
}
\section{Functions}{
\code{\link{findIntervals}},
\code{\link{findPoints}}
}
|
6ebcb6c11a8cfd06ce67ef580d5e903bb37ce252
|
b0ca8e870c2cd419de4cb68d000cd44bbfaf6d84
|
/Plot1.R
|
f6afc9947ec5a10a690668be2b17e7dc7b6ad6b8
|
[] |
no_license
|
aperelson/EDA-Course-Project-2
|
7f55a46d9fbcb8a3778bd7b98eac271adeea4051
|
42ce028dfccf3f4b473a044f31ca72e77dab607c
|
refs/heads/master
| 2020-06-09T13:30:32.441700
| 2016-12-10T15:10:45
| 2016-12-10T15:10:45
| 76,035,690
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 682
|
r
|
Plot1.R
|
plot1 <- function() {
## Read EPA data:
dfPM25 <- readRDS("summarySCC_PM25.rds")
## Convert to data.table for easier manipulation:
library(data.table)
dtPM25 <- data.table(dfPM25)
sumPM25 <- dtPM25[,list(sum=sum(Emissions)),by=year]
## Set option to display Y axis better:
opt <- options()
opt$scipen = 20
options(opt)
## Create histogram:
barplot((sumPM25$sum)/10^6,
names.arg=sumPM25$year,
col="wheat",
main="Total pm2.5 emission from all sources",
xlab="Year",
ylab="Total pm2.5 (10^6 tons)")
dev.copy(png, file="plot1.png")
dev.off()
}
|
93f87cd202fa536208ba692c0e2bc88d8339f951
|
b4d3faf682c97c7f96c5cf013a7f8f927207e7d3
|
/analysis01.R
|
5d431f45ebadfd68b6e7cf5a9ebfbc1a7a3c6edf
|
[
"MIT"
] |
permissive
|
uhkniazi/HPRU_AK_Rna_Seq
|
38982236cb5d6cca39ad4b305ecfdc45aeae856d
|
960be46b0c318ce7fb766a4d2b6c1c2deab0104e
|
refs/heads/master
| 2021-01-12T14:54:21.419733
| 2016-03-10T11:32:31
| 2016-03-10T11:32:31
| 47,402,604
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 18,123
|
r
|
analysis01.R
|
# Name: analysis01.R
# Auth: u.niazi@imperial.ac.uk
# Date: 12/01/2016
# Desc: DE analysis for count matrix data using RNA-Seq
library(DESeq2)
## data loading and setting
dfDat = read.csv('Data_external/Counts/toc_raw.txt', header=T, sep=' ')
# rownames(dfDat) = dfDat$emtrez_id
# dfDat = dfDat[,-1]
cn = colnames(dfDat)
fGroups = gsub('(\\w)\\d+', '\\1', cn)
fGroups = factor(fGroups, levels=c('D', 'H', 'C'))
mDat = as.matrix(dfDat)
dfDesign = data.frame(condition=fGroups, row.names = colnames(mDat))
## DE analysis
# call deseq2 constructor
oDseq = DESeqDataSetFromMatrix(mDat, dfDesign, design = ~ condition)
oDseq = DESeq(oDseq)
plotDispEsts(oDseq)
# get the results for each comparison
# where all three comparisons are performed
oRes.D.vs.C = results(oDseq, contrast = c('condition', 'D', 'C'))
oRes.H.vs.C = results(oDseq, contrast = c('condition', 'H', 'C'))
oRes.D.vs.H = results(oDseq, contrast = c('condition', 'D', 'H'))
plotMA(oRes.H.vs.C, main='H vs C')
plotMA(oRes.D.vs.C, main='D vs C')
plotMA(oRes.D.vs.H, main='D vs H')
# get results with significant p-values
dfD.vs.C = as.data.frame(oRes.D.vs.C[which(oRes.D.vs.C$padj < 0.1),])
dfH.vs.C = as.data.frame(oRes.H.vs.C[which(oRes.H.vs.C$padj < 0.1),])
dfD.vs.H = as.data.frame(oRes.D.vs.H[which(oRes.D.vs.H$padj < 0.1),])
nrow(dfD.vs.C)
nrow(dfD.vs.H)
nrow(dfH.vs.C)
## choose the comparison for plotting
library(org.Hs.eg.db)
# add annotation to the data set after selecting comparison
res = as.data.frame(oRes.D.vs.C)
rn = rownames(res)
df = select(org.Hs.eg.db, as.character(rn), c('SYMBOL'), 'ENTREZID')
df = df[!duplicated(df$ENTREZID),]
rownames(df) = df$ENTREZID
dfPlot = res
dfPlot = cbind(dfPlot[rn,], df[rn,])
dfPlot = na.omit(dfPlot)
## write csv file
write.csv(dfPlot, file='Results/DEAnalysis_D.vs.C.csv')
dfGenes = data.frame(P.Value=dfPlot$pvalue, logFC=dfPlot$log2FoldChange, adj.P.Val = dfPlot$padj, SYMBOL=dfPlot$SYMBOL)
f_plotVolcano = function(dfGenes, main, p.adj.cut = 0.1, fc.lim = c(-3, 3)){
p.val = -1 * log10(dfGenes$P.Value)
fc = dfGenes$logFC
# cutoff for p.value y.axis
y.cut = -1 * log10(0.01)
col = rep('lightgrey', times=length(p.val))
c = which(dfGenes$adj.P.Val < p.adj.cut)
col[c] = 'red'
plot(fc, p.val, pch=20, xlab='Fold Change', ylab='-log10 P.Value', col=col, main=main, xlim=fc.lim)
abline(v = 0, col='grey', lty=2)
abline(h = y.cut, col='red', lty=2)
# second cutoff for adjusted p-values
y.cut = quantile(p.val[c], probs=0.95)
abline(h = y.cut, col='red')
# identify these genes
g = which(p.val > y.cut)
lab = dfGenes[g, 'SYMBOL']
text(dfGenes$logFC[g], y = p.val[g], labels = lab, pos=2, cex=0.6)
}
f_plotVolcano(dfGenes, 'D vs C')
## repeat for the second comparison
res = as.data.frame(oRes.D.vs.H)
rn = rownames(res)
df = select(org.Hs.eg.db, as.character(rn), c('SYMBOL'), 'ENTREZID')
df = df[!duplicated(df$ENTREZID),]
rownames(df) = df$ENTREZID
dfPlot = res
dfPlot = cbind(dfPlot[rn,], df[rn,])
dfPlot = na.omit(dfPlot)
## write csv file
write.csv(dfPlot, file='Results/DEAnalysis_D.vs.H.csv')
dfGenes = data.frame(P.Value=dfPlot$pvalue, logFC=dfPlot$log2FoldChange, adj.P.Val = dfPlot$padj, SYMBOL=dfPlot$SYMBOL)
f_plotVolcano(dfGenes, 'D vs H')
## third comparison
res = as.data.frame(oRes.H.vs.C)
rn = rownames(res)
df = select(org.Hs.eg.db, as.character(rn), c('SYMBOL'), 'ENTREZID')
df = df[!duplicated(df$ENTREZID),]
rownames(df) = df$ENTREZID
dfPlot = res
dfPlot = cbind(dfPlot[rn,], df[rn,])
dfPlot = na.omit(dfPlot)
## write csv file
write.csv(dfPlot, file='Results/DEAnalysis_H.vs.C.csv')
dfGenes = data.frame(P.Value=dfPlot$pvalue, logFC=dfPlot$log2FoldChange, adj.P.Val = dfPlot$padj, SYMBOL=dfPlot$SYMBOL)
f_plotVolcano(dfGenes, 'H vs C')
## group the genes by expression profile i.e. DE or not DE
cvCommonGenes = unique(c(rownames(dfD.vs.H), rownames(dfD.vs.C)))
mCommonGenes = matrix(NA, nrow=length(cvCommonGenes), ncol=2)
mCommonGenes[,1] = cvCommonGenes %in% rownames(dfD.vs.H)
mCommonGenes[,2] = cvCommonGenes %in% rownames(dfD.vs.C)
rownames(mCommonGenes) = cvCommonGenes
colnames(mCommonGenes) = c('D.vs.H', 'D.vs.C')
#### analysis by grouping genes
# create groups in the data based on 2^2-1 combinations
mCommonGenes.grp = mCommonGenes
set.seed(123)
dm = dist(mCommonGenes.grp, method='binary')
hc = hclust(dm)
# cut the tree at the bottom to create groups
cp = cutree(hc, h = 0.2)
# sanity checks
table(cp)
length(cp)
length(unique(cp))
mCommonGenes.grp = cbind(mCommonGenes.grp, cp)
### print and observe this table and select the groups you are interested in
temp = mCommonGenes.grp
temp = (temp[!duplicated(cp),])
temp2 = cbind(temp, table(cp))
rownames(temp2) = NULL
print(temp2)
# write csv file with gene names in each group
rn = rownames(mCommonGenes.grp[mCommonGenes.grp[,'cp'] == '1',])
length(rn)
head(mCommonGenes.grp[rn,])
df = select(org.Hs.eg.db, as.character(rn), c('SYMBOL'), 'ENTREZID')
df = df[!duplicated(df$ENTREZID),]
rownames(df) = df$ENTREZID
write.csv(df, 'Results/DEAnalysis_Genes_Group1.csv')
# repeat for other 2 groups
rn = rownames(mCommonGenes.grp[mCommonGenes.grp[,'cp'] == '2',])
length(rn)
head(mCommonGenes.grp[rn,])
df = select(org.Hs.eg.db, as.character(rn), c('SYMBOL'), 'ENTREZID')
df = df[!duplicated(df$ENTREZID),]
rownames(df) = df$ENTREZID
write.csv(df, 'Results/DEAnalysis_Genes_Group2.csv')
rn = rownames(mCommonGenes.grp[mCommonGenes.grp[,'cp'] == '3',])
length(rn)
head(mCommonGenes.grp[rn,])
df = select(org.Hs.eg.db, as.character(rn), c('SYMBOL'), 'ENTREZID')
df = df[!duplicated(df$ENTREZID),]
rownames(df) = df$ENTREZID
write.csv(df, 'Results/DEAnalysis_Genes_Group3.csv')
fSamples = fGroups
## get the count matrix
mCounts = counts(oDseq, normalized=T)
mCounts = na.omit(log(mCounts))
f = is.finite(rowSums(mCounts))
mCounts = mCounts[f,]
fGroups = fSamples
# data quality check
plot(density(rowMeans(mCounts)))
### create graph and clusters
library(org.Hs.eg.db)
library(downloader)
source('../CGraphClust/CGraphClust.R')
# plotting parameters
p.old = par()
# try different combinations of graphs
rn = rownames(mCommonGenes.grp[mCommonGenes.grp[,'cp'] == '1',])
length(rn)
# or
rn = rownames(mCommonGenes.grp)
# select significant genes and prepare data for graphing
mCounts = mCounts[rownames(mCounts) %in% rn,]
colnames(mCounts) = fGroups
mCounts = mCounts[,order(fGroups)]
fGroups = fGroups[order(fGroups)]
mCounts = t(mCounts)
mCounts.bk = mCounts
dfMap = AnnotationDbi::select(org.Hs.eg.db, colnames(mCounts), 'UNIPROT', 'ENTREZID')
dfMap = na.omit(dfMap)
### load the uniprot2reactome mapping obtained from
# http://www.reactome.org/download/current/UniProt2Reactome_All_Levels.txt
# get reactome data
url = 'http://www.reactome.org/download/current/UniProt2Reactome_All_Levels.txt'
dir.create('Data_external', showWarnings = F)
csReactomeFile = 'Data_external/UniProt2Reactome_All_Levels.txt'
# download the reactome file if it doesnt exist
if (!file.exists(csReactomeFile)) download(url, csReactomeFile)
dfReactome = read.csv(csReactomeFile, header = F, stringsAsFactors=F, sep='\t')
x = gsub('\\w+-\\w+-(\\d+)', replacement = '\\1', x = dfReactome$V2, perl = T)
dfReactome$V2 = x
## map reactome ids to uniprot ids
dfReactome.sub = dfReactome[dfReactome$V1 %in% dfMap$UNIPROT,]
# get the matching positions for uniprot ids in the reactome table
i = match(dfReactome.sub$V1, dfMap$UNIPROT)
dfReactome.sub$ENTREZID = dfMap$ENTREZID[i]
dfGraph = dfReactome.sub[,c('ENTREZID', 'V2')]
dfGraph = na.omit(dfGraph)
n = unique(dfGraph$ENTREZID)
mCounts = mCounts[,n]
print(paste('Total number of genes with Reactome terms', length(n)))
levels(fGroups)
# create a correlation matrix to decide cor cutoff
mCor = cor(mCounts)
# check distribution
hist(sample(mCor, 1000, replace = F), prob=T, main='Correlation of genes', xlab='', family='Arial', breaks=20, xaxt='n')
axis(1, at = seq(-1, 1, by=0.1), las=2)
# stabalize the data and check correlation again
# mCounts.bk = mCounts
# # stabalize the data
# mCounts.st = apply(mCounts, 2, function(x) f_ivStabilizeData(x, fGroups))
# rownames(mCounts.st) = fGroups
#
# # create a correlation matrix
# mCor = cor(mCounts.st)
# # check distribution
# hist(sample(mCor, 1000, replace = F), prob=T, main='Correlation of genes', xlab='', family='Arial', breaks=20, xaxt='n')
# axis(1, at = seq(-1, 1, by=0.1), las=2)
# use the unstabalized version
# create the graph cluster object
# using absolute correlation vs actual values lead to different clusters
oGr = CGraphClust(dfGraph, abs(mCor), iCorCut = 0.6, bSuppressPlots = T)
# sanity check
getSignificantClusters(oGr, t(mCounts), fGroups, p.cut = 0.02)
## general graph structure
set.seed(1)
plot.final.graph(oGr)
ecount(getFinalGraph(oGr))
vcount(getFinalGraph(oGr))
## community structure
## overview of how the commuinties look like
# plot the main communities in 2 different ways
ig = getFinalGraph(oGr)
par(mar=c(1,1,1,1)+0.1)
set.seed(1)
ig = f_igCalculateVertexSizesAndColors(ig, t(mCounts), fGroups, bColor = T, iSize = 10)
plot(getCommunity(oGr), ig, vertex.label=NA, layout=layout_with_fr,
vertex.frame.color=NA, mark.groups=NULL, edge.color='lightgrey')
set.seed(1)
ig = getFinalGraph(oGr)
ig = f_igCalculateVertexSizesAndColors(ig, t(mCounts), fGroups, bColor = F, iSize = 10)
plot(getCommunity(oGr), ig, vertex.label=NA, layout=layout_with_fr,
vertex.frame.color=NA, edge.color='darkgrey')
## centrality diagnostics
## centrality parameters should not be correlated significantly and the location of the central
## genes can be visualized
# look at the graph centrality properties
set.seed(1)
ig = plot.centrality.graph(oGr)
# plot the genes or vertex sizes by fold change
ig = f_igCalculateVertexSizesAndColors(ig, t(mCounts), fGroups, bColor = F, iSize = 10)
set.seed(1)
plot(ig, vertex.label=NA, layout=layout_with_fr, vertex.frame.color=NA, edge.color='darkgrey')
par(p.old)
## the diagnostic plots show the distribution of the centrality parameters
# these diagnostics plots should be looked at in combination with the centrality graphs
plot.centrality.diagnostics(oGr)
# get the centrality parameters
mCent = mPrintCentralitySummary(oGr)
## top vertices based on centrality scores
## get a table of top vertices
dfTopGenes.cent = dfGetTopVertices(oGr, iQuantile = 0.85)
rownames(dfTopGenes.cent) = dfTopGenes.cent$VertexID
# assign metadata annotation to these genes and clusters
dfCluster = getClusterMapping(oGr)
colnames(dfCluster) = c('gene', 'cluster')
rownames(dfCluster) = dfCluster$gene
df = f_dfGetGeneAnnotation(as.character(dfTopGenes.cent$VertexID))
dfTopGenes.cent = cbind(dfTopGenes.cent[as.character(df$ENTREZID),], SYMBOL=df$SYMBOL, GENENAME=df$GENENAME)
dfCluster = dfCluster[as.character(dfTopGenes.cent$VertexID),]
dfTopGenes.cent = cbind(dfTopGenes.cent, Cluster=dfCluster$cluster)
dir.create('Results', showWarnings = F)
write.csv(dfTopGenes.cent, file='Results/Top_Centrality_Genes.csv')
## if we want to look at the expression profiles of the top genes
# plot a heatmap of these top genes
library(NMF)
m1 = mCounts[,as.character(dfTopGenes.cent$VertexID)]
m1 = scale(m1)
m1 = t(m1)
# threshhold the values
m1[m1 < -3] = -3
m1[m1 > 3] = 3
rownames(m1) = as.character(dfTopGenes.cent$SYMBOL)
# draw the heatmap color='-RdBu:50'
aheatmap(m1, color=c('blue', 'black', 'red'), breaks=0, scale='none', Rowv = TRUE,
annColors=NA, Colv=NA)
## in addition to heatmaps the graphs can be plotted
# plot a graph of these top genes
# plot for each contrast i.e. base line vs other level
lev = levels(fGroups)[-1]
m = mCounts
m = apply(m, 2, function(x) f_ivStabilizeData(x, fGroups))
rownames(m) = rownames(mCounts)
par(mar=c(1,1,1,1)+0.1)
for(i in 1:length(lev)){
ig = induced_subgraph(getFinalGraph(oGr), vids = as.character(dfTopGenes.cent$VertexID))
fG = factor(fGroups, levels= c(levels(fGroups)[1], lev[-i], lev[i]) )
ig = f_igCalculateVertexSizesAndColors(ig, t(m), fG, bColor = T, iSize=10)
n = V(ig)$name
lab = f_dfGetGeneAnnotation(n)
V(ig)$label = as.character(lab$SYMBOL)
set.seed(1)
plot(ig, vertex.label.cex=0.2, layout=layout_with_fr, vertex.frame.color='darkgrey', edge.color='lightgrey',
main=paste(lev[i], 'vs', levels(fGroups)[1]))
legend('topright', legend = c('Underexpressed', 'Overexpressed'), fill = c('lightblue', 'pink'))
}
### Looking at the largest clique can be informative in the graph
# plot the graph with location of the clique highlighted
set.seed(1)
ig = plot.graph.clique(oGr)
ig = f_igCalculateVertexSizesAndColors(ig, t(mCounts), fGroups, bColor = F)
par(mar=c(1,1,1,1)+0.1)
set.seed(1)
plot(ig, vertex.label=NA, layout=layout_with_fr, vertex.frame.color=NA, edge.color='lightgrey')
# plot the largest clique at each grouping contrast
lev = levels(fGroups)[-1]
m = mCounts
#m = apply(m, 2, function(x) f_ivStabilizeData(x, fGroups))
#rownames(m) = rownames(mCounts)
par(mar=c(1,1,1,1)+0.1)
for(i in 1:length(lev)){
ig = induced_subgraph(getFinalGraph(oGr), vids = unlist(getLargestCliques(oGr)))
fG = factor(fGroups, levels= c(levels(fGroups)[1], lev[-i], lev[i]) )
ig = f_igCalculateVertexSizesAndColors(ig, t(m), fG, bColor = T, iSize=80)
n = V(ig)$name
lab = f_dfGetGeneAnnotation(n)
V(ig)$label = as.character(lab$SYMBOL)
set.seed(1)
plot(ig, layout=layout_with_fr, main=paste(lev[i], 'vs', levels(fGroups)[1]))
legend('topright', legend = c('Underexpressed', 'Overexpressed'), fill = c('lightblue', 'pink'))
}
## instead of looking at individual genes we can look at clusters
## we can look at the problem from the other direction and look at clusters instead of genes
# some sample plots
# mean expression of groups in every cluster
par(p.old)
plot.mean.expressions(oGr, t(mCounts), fGroups, legend.pos = 'bottomleft', main='Total Change in Each Cluster', cex.axis=0.7)
# only significant clusters
par(mar=c(7, 3, 2, 2)+0.1)
plot.significant.expressions(oGr, t(mCounts), fGroups, main='Significant Clusters', lwd=1, bStabalize = T, cex.axis=0.7, p.cut=0.02,
legend.pos='bottomright')
# principal component plots
pr.out = plot.components(oGr, t(mCounts), fGroups, bStabalize = T, p.cut=0.02)
par(mar=c(4,2,4,2))
biplot(pr.out, cex=0.8, cex.axis=0.8, arrow.len = 0)
# plot summary heatmaps
# marginal expression level in each cluster
plot.heatmap.significant.clusters(oGr, t(mCounts), fGroups, bStabalize = F, p.cut=0.02)
# plot variance of cluster
m = getSignificantClusters(oGr, t(mCounts), fGroups, p.cut=0.02)$clusters
#m = getClusterMarginal(oGr, t(mCounts))
# plot.cluster.variance(oGr, m[c('1280218', '1280215'),], fGroups, log = F)
csClust = rownames(m)
length(csClust)
pdf('Temp/clusters.var.pdf')
i = 1
temp = t(as.matrix(m[csClust[i],]))
rownames(temp) = csClust[i]
plot.cluster.variance(oGr, temp, fGroups, log=FALSE); i = i+1
par(mfrow=c(2,2))
boxplot.cluster.variance(oGr, m, fGroups, log=T, iDrawCount = length(csClust))
dev.off(dev.cur())
# cluster names
i = which(dfReactome.sub$V2 %in% csClust)
dfCluster.name = dfReactome.sub[i,c('V2', 'V4')]
dfCluster.name = dfCluster.name[!duplicated(dfCluster.name$V2),]
rownames(dfCluster.name) = NULL
dfCluster.name
#### plot a graph of clusters
#m = getSignificantClusters(oGr, t(mCounts), fGroups, bStabalize = T)
dfCluster = getClusterMapping(oGr)
colnames(dfCluster) = c('gene', 'cluster')
rownames(dfCluster) = dfCluster$gene
# how many genes in each cluster
sort(table(dfCluster$cluster))
#csClust = rownames(m$clusters)
csClust = as.character(unique(dfCluster$cluster))
# graph
lev = levels(fGroups)[-1]
m = mCounts
#m = apply(m, 2, function(x) f_ivStabilizeData(x, fGroups))
#rownames(m) = rownames(mCounts)
par(mar=c(1,1,1,1)+0.1)
for(i in 1:length(lev)){
ig = getClusterSubgraph(oGr, csClust)
fG = factor(fGroups, levels= c(levels(fGroups)[1], lev[-i], lev[i]) )
ig = f_igCalculateVertexSizesAndColors(ig, t(m), fG, bColor = T, iSize=10)
n = V(ig)$name
lab = f_dfGetGeneAnnotation(n)
V(ig)$label = as.character(lab$SYMBOL)
set.seed(1)
plot(ig, vertex.label.cex=0.14, layout=layout_with_fr, vertex.frame.color='darkgrey', edge.color='lightgrey',
main=paste(lev[i], 'vs', levels(fGroups)[1]))
legend('topright', legend = c('Underexpressed', 'Overexpressed'), fill = c('lightblue', 'pink'))
}
df = f_dfGetGeneAnnotation(as.character(dfCluster$gene))
dfCluster = cbind(dfCluster[as.character(df$ENTREZID),], SYMBOL=df$SYMBOL, GENENAME=df$GENENAME)
write.csv(dfCluster, file='Results/Clusters.csv')
##### Various plots for one cluster of choice
csClust = '913531'
lev = levels(fGroups)[-1]
m = mCounts
#m = apply(m, 2, function(x) f_ivStabilizeData(x, fGroups))
#rownames(m) = rownames(mCounts)
par(mar=c(1,1,1,1)+0.1, mfrow=c(1,2))
for(i in 1:length(lev)){
ig = getClusterSubgraph(oGr, csClust)
fG = factor(fGroups, levels= c(levels(fGroups)[1], lev[-i], lev[i]) )
ig = f_igCalculateVertexSizesAndColors(ig, t(m), fG, bColor = T, iSize=60)
n = V(ig)$name
lab = f_dfGetGeneAnnotation(n)
V(ig)$label = as.character(lab$SYMBOL)
set.seed(1)
plot(ig, vertex.label.cex=0.7, layout=layout_with_fr, vertex.frame.color='darkgrey', edge.color='lightgrey',
main=paste(lev[i], 'vs', levels(fGroups)[1]))
legend('topright', legend = c('Underexpressed', 'Overexpressed'), fill = c('lightblue', 'pink'))
}
par(p.old)
# heatmap of the genes
ig.sub = getClusterSubgraph(oGr, csClustLabel = csClust)
n = f_dfGetGeneAnnotation(V(ig.sub)$name)
mC = t(mCounts)
mC = mC[n$ENTREZID,]
rownames(mC) = n$SYMBOL
mC = t(scale(t(mC)))
# threshhold the values
mC[mC < -3] = -3
mC[mC > +3] = +3
# draw the heatmap
hc = hclust(dist(mC))
aheatmap(mC, color=c('blue', 'black', 'red'), breaks=0, scale='none', Rowv = hc, annRow=NA,
annColors=NA, Colv=NA)
# if we want to plot variance of one gene at a time
n = f_dfGetGeneAnnotation(V(ig.sub)$name)
mC = t(mCounts)
mC = mC[n$ENTREZID,]
rownames(mC) = n$SYMBOL
rn = rownames(mC)
length(rn)
i = 1
par(p.old)
par(mfrow=c(2,2))
boxplot.cluster.variance(oGr, (mC), fGroups, iDrawCount = length(rn))
temp = t(as.matrix(mC[rn[i],]))
rownames(temp) = rn[i]
plot.cluster.variance(oGr, temp, fGroups, log=FALSE); i = i+1
|
df163d10aa9f4014a798cfc1cc3fb13a4cc3717e
|
246481254574ea04bf8d61d2001592bf433f2c04
|
/R/ancillary.R
|
a59e3022dd471a0f05d15c3261ec748f1854ae39
|
[] |
no_license
|
gvdr/EHA
|
898bdd5b3fe3b70db528e3f7f64a71404bda2f76
|
fefa353b946553fd9459b68e1a600d27a2a81b7c
|
refs/heads/master
| 2021-01-09T20:39:37.875796
| 2016-07-25T16:01:45
| 2016-07-25T16:01:45
| 64,148,563
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 979
|
r
|
ancillary.R
|
#' This files provide basic functions to load multiple
#' libraries, source all the files in directory, ...
#' `sourceDir()` sources all the `.R` files in a directory
sourceDir <- function(path, trace = TRUE, ...) {
for (nm in list.files(path, pattern = "\\.[Rr]$")) {
if(trace) cat(nm,":")
source(file.path(path, nm), ...)
if(trace) cat("\n")
}
}
#' `try_and_install()` testa for the presence of a package in the library
#' and, if not present, installa it.
try_and_install <- function(package_names){
installed_packages <- installed.packages()
installed <- character(0)
for(Name in package_names){
if(!Name %in% installed_packages){
installed <- c(installed,Name)
install.packages(Name,
verbose = F)
}
}
if(length(installed) == 0){return("Nothing to install.")}
installed_text <- paste(installed,
sep="\n")
return(paste("All packages installed:",installed_text))
}
|
610f24830389588191c55dd49e0f5a36ad65eef4
|
3ffe61be1846789242fba8c3bb71663b2053d074
|
/practice_chap12(๋จ๊ณ๊ตฌ๋ถ์ง๋)/ex01.R
|
1d4e062e2b7a0f38bcc69d2fde2114492653e95c
|
[] |
no_license
|
harrycjy1/R-bootcamp
|
244cefae2b1785ce3b31acf57753447febf2f871
|
2db273a53188dd1fd825b35f8a3bdb3ba308fb0e
|
refs/heads/master
| 2020-04-02T21:07:22.552300
| 2018-10-26T06:48:57
| 2018-10-26T06:48:57
| 154,788,516
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,326
|
r
|
ex01.R
|
# ์ง์ญ๋ณ๋ก ํต๊ณ์น๋ฅผ ๋ํ๋ผ ๋, ์๊น์ ์ฐจ์ด๋ก ํํํ ์ง๋๋ฅผ ๋จ๊ณ๊ตฌ๋ถ๋๋ผ๊ณ ํ๋ค.
#๋จ๊ณ๊ตฌ๋ถ๋๋ฅผ ๋ณด๋ฉด ์ธ๊ตฌ๋ ์๋ ๊ฐ์ ํน์ฑ์ด ์ง์ญ๋ณ๋ก ์ผ๋ง๋ ๋ค๋ฅธ์ง ํ๋์ ์ ์ ๊ฐ ์๋ค.
#๋ฏธ๊ตญ ์ฃผ๋ณ ๊ฐ๋ ฅ ๋ฒ์ฃ์จ ๋ฐ์ดํฐ๋ฅผ ์ด์ฉํด์ ๋จ๊ณ ๊ตฌ๋ถ๋๋ฅผ ๋ง๋ค์ด๋ณด์.
# ํจํค์ง ์ค๋นํ๊ธฐ
install.packages("ggiraphExtra") #๋จ๊ณ๊ตฌ๋ถ๋๋ฅผ ๊ทธ๋ฆฌ๊ธฐ ์ํด์ ํจํค์ง๋ฅผ ์ค์น
library(ggiraphExtra)
#๋ฏธ๊ตญ ์ฃผ๋ณ ๋ฒ์ฃ ๋ฐ์ดํฐ ์ค๋นํ๊ธฐ
# R์ ๋ด์ฅ๋ ๋ฐ์ดํฐ์
์ธ USArrests๋ 1973๋
๋ฏธ๊ตญ ์ฃผ (state)๋ณ ๊ฐ๋ ฅ ๋ฒ์ฃ์จ ์ ๋ณด๋ฅผ ๋ด๊ณ ์๋ค.
str(USArrests)
head(USArrests)
library(tibble)
#ํ ์ด๋ฆ์ state๋ณ์๋ก ๋ฐ๊ฟ ๋ฐ์ดํฐ ํ๋ ์ ์์ฑ
crime<-rownames_to_column(USArrests,var="state")
crime
#์ง๋ ๋ฐ์ดํฐ์ ๋์ผํ๊ฒ ๋ง์ถ๊ธฐ ์ํด state์ ๊ฐ์ ์๋ฌธ์๋ก ์์
crime$state <-tolower(crime$state)
crime
str(crime)
#๋ฏธ๊ตญ ์ฃผ ์ง๋ ๋ฐ์ดํฐ ์ค๋นํ๊ธฐ
library(ggplot2)
#R์ ๋ด์ฅ๋ mapsํจํค์ง์ ๋ฏธ๊ตญ ์ฃผ๋ณ ์๊ฒฝ๋๋ฅผ ๋ํ๋ธ state๋ฐ์ดํฐ๊ฐ ์๋ค.
#์ด๊ฒ์ ggplot2ํจํค์ง์ map_data()๋ฅผ ์ด์ฉํด์ ๋ฐ์ดํฐ ํ๋ ์ ํํ๋ก ๋ถ๋ฌ์จ๋ค.
states_map<-map_data("state")
str(states_map)
#์ง๋์ ํํํ ๋ฒ์ฃ ๋ฐ์ดํฐ์ ๋ฐฐ๊ฒฝ์ด ๋ ์ง๋ ๋ฐ์ดํฐ๊ฐ ์ค๋น๋์์ผ๋ ggiraphExtra
#ํจํค์ง์ ggChoropleth()๋ฅผ ์ด์ฉํด์ ๋จ๊ณ ๊ตฌ๋ถ๋๋ฅผ ๋ง๋ค์ด๋ณธ๋ค.
# ๋จ๊ณ ๊ตฌ๋ถ๋ ๋ง๋ค๊ธฐ
ggChoropleth(data=crime, #์ง๋์ ํํํ ๋ฐ์ดํฐ
aes(fill=Murder, #์๊น๋ก ํํํ ๋ณ์
map_id=state), #์ง์ญ ๊ธฐ์ค ๋ณ์
map=states_map)
#์ธํฐ๋ ํฐ๋ธ ๋จ๊ณ ๊ตฌ๋ถ๋ ๋ง๋ค๊ธฐ
ggChoropleth(data=crime, #์ง๋์ ํํํ ๋ฐ์ดํฐ
aes(fill=Murder, #์๊น๋ก ํํํ ๋ณ์
map_id=state), #์ง์ญ ๊ธฐ์ค ๋ณ์
map=states_map, #์ง๋ ๋ฐ์ดํฐ
interactive=TRUE) #์ธํฐ๋ ํฐ๋ธ ํฉ์ฑ
#Viewer ์ฐฝ์์ export->save as Wep Page...๋ฅผ ํด๋ฆญํ๋ฉด HTML ํฌ๋งท์ผ๋ก
#์ ์ฅํ ์ ์๋ค. ๋ง์ฐ์คํ ๋ก ํน์ ์์ญ์ ํ๋ ์ถ์๊ฐ ๊ฐ๋ฅ
#ํฌ๋กฌ์ผ๋ก ์ด๋ฉด ๊นจ์ง์๋ ์์ผ๋ ์ธํฐ๋ท ์น๋ธ๋ผ์ฐ์ ธ๋ก ์ด์.
|
4f175e5db073a025d6ddad50a25fcbfaff9239a6
|
5acd5558e4a5297af6d038ba6eeaa19cf3c7d3e4
|
/prep/prep_testis_common.R
|
09efb5627f5eba2c61fccd647473f2fbb22aca51
|
[
"BSD-3-Clause"
] |
permissive
|
tkonopka/correcting-ml
|
2b9eaf5830b59326eb431e9c678a59d6d24fe5f3
|
634f649d744cbff9e02b83c26047da7754194135
|
refs/heads/main
| 2023-06-28T19:45:04.803493
| 2021-07-29T09:09:59
| 2021-07-29T09:09:59
| 389,623,137
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 234
|
r
|
prep_testis_common.R
|
# some helper functions for used in testis scripts
# file paths and file path templates
testis_path <- function(x) {
file.path("..", "results", paste0("testis_", x))
}
testis_data_template <- testis_path("data_{VARIANT}.tsv.gz")
|
e29be65ead152003ff471e84de75ed216637c2df
|
0af8332ed8cb059282d6b870a9eb843c1e6680bc
|
/Rpkg/R/mbl_aesthetics.R
|
a56d97db2c30b6aed78dc2dd1948ddfc1c4bb7de
|
[] |
no_license
|
tomsing1/mbl2018
|
44383313845897323be29fc9263e309276d38418
|
6c69e0693abf75955ad22b3e502b33a3c3c0e5d5
|
refs/heads/master
| 2020-03-19T02:22:25.456235
| 2018-06-20T17:12:27
| 2018-06-20T17:12:27
| 135,623,059
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,852
|
r
|
mbl_aesthetics.R
|
# Helper functions to create categorical maps for shapes and colors
#' Maps colors to categorical values
#'
#' Map can be a RColorBrewer name, or a vector of colors. Colors will be
#' recycled if `length(map) <`
#' If `vals`, map can be is categoricasl
#'
#' @export
#' @param vals a vector of values to create a colormap over. Curently this is
#' restricted to categorical vectors (character, factor), but something smart
#' will happen when you provide a numeric vector in due time.
#' @param map a map specification. defaults to a combination of
#' RColorBrewer Set1 and Set2 colors
#' @return a named character vector, where `names()` are the unique levels of
#' `vals`, and the value is the color it maps to. Colors will be recycled
#' if there are more levels than colors provided by `map`.
mbl_create_color_map <- function(vals, map = NULL) {
is.cat <- is.categorical(vals)
if (is.cat) {
if (is.null(map)) map <- mucho.colors()
if (is.brewer.map.name(map)) {
map <- suppressWarnings(brewer.pal(20, map))
}
if (!is.character(map)) {
stop("The color map should be a list of colors by now")
}
out <- xref.discrete.map.to.vals(map, vals)
} else {
stop("Not mapping real values yet")
}
out
}
#' Maps shapes to categorical values
#'
#' Map unique leves of `vals` to different shapes. Only works for categorical
#' variables.
#'
#' TODO: Use plotly shapes (currently we use base R pch). This webpage shows
#' you the symbols and how to generate them:
#' http://www.r-graph-gallery.com/125-the-plotlys-symbols/
#'
#' @export
#' @param vals a vector of categorical values
#' @param map a map definition. By default we use pch symbol identifiers.
#' @return a named vector. `names()` are the unique values in `vals`, and values
#' are the different shapes (pch integers)
#' @examples
#' # This isn't a real example. It is the code from the aforementioned page
#' # that generates the plotly shapes.
#' library(plotly)
#' data=expand.grid(c(1:6) , c(1:6))
#' data=cbind(data , my_symbol=c(1:36))
#' data=data[data$my_symbol<33 , ]
#'
#' # Make the graph
#' my_graph=plot_ly(data , x=~Var1 , y=~Var2 , type="scatter",
#' mode="markers+text" , hoverinfo="text", text=~my_symbol,
#' textposition = "bottom right",
#' marker=list(symbol=~my_symbol, size=40, color="red",
#' opacity=0.7)) %>%
#' layout(
#' hovermode="closest",
#' yaxis=list(autorange="reversed", title="",
#' tickfont=list(color="white")) ,
#' xaxis=list( title="" , tickfont=list(color="white"))
#' )
#' # show graph
#' my_graph
mbl_create_shape_map <- function(vals, map = NULL) {
stopifnot(is.categorical(vals))
# pch symbols
# if (is.null(map)) {
# map <- 15:18
# map <- c(map, setdiff(1:25, map))
# }
# plotly symbols go from 1:32. I rearrange them here a bit to put the most
# visually diverse ones up front
if (is.null(map)) {
all.shapes <- 1:32
# remove ones that look too similar
shapes <- setdiff(all.shapes, c(14:16, 28, 20, 32))
first <- c(27, 3, 17, 1, 2, 13)
map <- c(first, setdiff(shapes, first))
}
out <- xref.discrete.map.to.vals(map, vals)
out
}
#' @noRd
#' @importFrom RColorBrewer brewer.pal.info
is.brewer.map.name <- function(x) {
is.character(x) && length(x) == 1L && x %in% rownames(brewer.pal.info)
}
#' @noRd
#' @importFrom RColorBrewer brewer.pal
mucho.colors <- function() {
s1 <- RColorBrewer::brewer.pal(9, "Set1")
s2 <- RColorBrewer::brewer.pal(8, "Set2")
s3 <- RColorBrewer::brewer.pal(12, "Set3")
# the sixth set1 color is a yellow that is too bright for anyone's good
muchos <- c(s1[-6], s2[1:8])
}
#' @noRd
#' @param map named character vector, where names are the entries found in
#' `vals`
#' @param vals a categorical vector (character or factor)
#' @return a character vector like `map` but with recycled entries if the number
#' of `length(unique(vals)) > length(map)`
xref.discrete.map.to.vals <- function(map, vals) {
stopifnot(is.categorical(vals))
stopifnot(is.character(map) || is.integerish(map))
map.type <- if (is.character(map)) "char" else "int"
if (is.factor(vals)) {
uvals <- levels(vals)
} else {
uvals <- sort(unique(as.character(vals)))
}
if (is.null(names(map))) {
out.map <- if (map.type == "char") character() else integer()
rest.map <- map
} else {
out.map <- map[names(map) %in% uvals]
rest.map <- unname(map[!names(map) %in% names(out.map)])
}
remain <- setdiff(uvals, names(out.map))
if (length(remain)) {
cols <- unname(c(rest.map, out.map))
idxs <- seq(remain) %% length(cols)
idxs[idxs == 0] <- length(cols)
rest.map <- cols[idxs]
names(rest.map) <- remain
out.map <- c(out.map, rest.map)
}
out.map
}
|
b2df9c6b42a7483cf6c0d371a75184b44926e097
|
eb08ab7f3a97936b26ebacfe098e77e9a1754c8d
|
/R/utils.R
|
687323f05a82ab177c9f2503ca07e8755a9de69f
|
[] |
no_license
|
marcalva/diem
|
2bc0cd6ba3059984ad1ef17ca42d3399a7acef63
|
a9a4d9d5f4d2a72a553e8b41afe10785c19504e8
|
refs/heads/master
| 2022-12-23T19:42:53.503040
| 2022-12-20T04:35:28
| 2022-12-20T04:35:28
| 184,798,028
| 9
| 6
| null | null | null | null |
UTF-8
|
R
| false
| false
| 504
|
r
|
utils.R
|
#' fraction of logs
#'
#' @param x numeric vector
#' @export
fraction_log <- function(x){
xinf <- is.infinite(x)
if (any(xinf)){
frac <- rep(0, length(x))
frac[which(xinf)] <- 1
} else {
x_c = x - max(x)
x_c = exp(x_c)
frac = x_c / sum(x_c);
}
return(frac);
}
#' sum of logs
#'
#' @param x numeric vector
#' @export
sum_log <- function(x){
max_x <- max(x)
x_c = x - max_x
x_sum <- log(sum(exp(x_c))) + max_x
return(x_sum)
}
|
4a2216968dcc155d47f27a1f79107075465c62a2
|
5e1ef84be4c398fc28b79d162e10a888c10a043b
|
/euk_heuristic_fulldataset.r
|
495f7c8d6a8fc2d5a8979e6515f24224bbf0f8e8
|
[
"CC0-1.0",
"Apache-2.0",
"LicenseRef-scancode-public-domain",
"LicenseRef-scancode-us-govt-public-domain"
] |
permissive
|
Joel-Barratt/Eukaryotyping
|
6bae5302b4f4dbf1065f8f338e35e17dd62d503a
|
ce9b695c6b525ec4862cdf49bc9a71585873f2da
|
refs/heads/master
| 2023-02-20T06:19:11.039445
| 2023-02-04T19:53:06
| 2023-02-04T19:53:06
| 194,131,154
| 2
| 1
| null | 2021-01-19T15:06:01
| 2019-06-27T16:43:04
|
R
|
UTF-8
|
R
| false
| false
| 8,939
|
r
|
euk_heuristic_fulldataset.r
|
#### Calculate matrix using Barratt's Unsupervised Heuristic Mixture Model
alleles = list()
frequencies = list()
for (j in 1:nloci) {
locicolumns = grepl(paste(locinames[j],"",sep=""),colnames(data))
raw_alleles = c(as.matrix(data[,locicolumns]))
raw_alleles[raw_alleles == "NA"] = NA
raw_alleles[raw_alleles == 0] = NA
alleles[[j]] = unique(raw_alleles[!is.na(raw_alleles)])
frequencies[[j]] = sapply(alleles[[j]], function(x) sum(raw_alleles == x,na.rm=TRUE))
frequencies[[j]] = frequencies[[j]] / sum(frequencies[[j]])
}
observeddatamatrix = list()
for (j in 1:nloci) {
locus = locinames[j]
locicolumns = grepl(paste(locus,"",sep=""),colnames(data))
oldalleles = as.vector(data[,locicolumns])
oldalleles [oldalleles == "NA"] = NA
oldalleles [oldalleles == 0] = NA
if (length(dim(oldalleles)[2]) == 0) {
oldalleles = matrix(oldalleles,length(oldalleles),1)
}
observeddatamatrix[[j]] = oldalleles
}
m <<- rep(1,nloci)
H_nu = sapply(1:nloci, function (j) -sum(frequencies[[j]] * logb(frequencies[[j]],2)))
sub_per_locus = function(isolate1,isolate2,j) {
v1 = observeddatamatrix[[j]][isolate1,]
v1 = v1[!is.na(v1)]
p1 = frequencies[[j]][match(v1,alleles[[j]])]
v2 = observeddatamatrix[[j]][isolate2,]
v2 = v2[!is.na(v2)]
p2 = frequencies[[j]][match(v2,alleles[[j]])]
if (ploidy[j] > 1) {
x = length(unique(v1)) + length(unique(v2))
n = min(length(unique(v1)),length(unique(v2)))
w = x * (n > 1) + 4 * (n == 1) * (x == 2) + (1+x) * (n== 1) * (x > 2)
jj = 2 * (m[j] == 1) + m[j] * (m[j] > 1)
y = length(intersect(v1,v2))
z = 3 * (((2*(n == 1) + 1 * (y == 1) * (x > 2)))==3) + 2 * (y *(n>1)*(jj>=y)+jj*(n > 1)* (y > jj) + jj * (n==1) * (x==2) * (y==1))
delta_nu_raw = w * (y == 0) + 2 * jj * (y > 0) + sum(sapply(jj:(2*jj), function (ii) -ii*(z == ii)))
shared_alleles = intersect(v1 , v2)
if (length(shared_alleles) > 0) {
temp_shared = sapply(1:nids, function (x) sum(shared_alleles %in% as.matrix(observeddatamatrix[[j]][x,])))
notmissing = sapply(1:nids, function (x) sum(!is.na(observeddatamatrix[[j]][x,])))
P_nu = (sum(temp_shared == length(shared_alleles)) / sum(notmissing != 0))^2
} else {
P_nu = 1
}
k = 1 * (y == 0) + P_nu * (y > 0)
delta_nu = H_nu[j] * ( delta_nu_raw * (delta_nu_raw > 0) + P_nu * (delta_nu_raw == 0))*k
delta = delta_nu
} else {
delta_ex_raw = 0
x = length(unique(v1)) + length(unique(v2))
y = length(intersect(v1,v2))
delta_ex_raw = 2*x*(y == 0)
shared_alleles = intersect(v1 , v2)
if (length(shared_alleles) > 0) {
temp_shared = sapply(1:nids, function (x) sum(shared_alleles %in% as.matrix(observeddatamatrix[[j]][x,])))
notmissing = sapply(1:nids, function (x) sum(!is.na(observeddatamatrix[[j]][x,])))
P_ex = (sum(temp_shared == length(shared_alleles)) / sum(notmissing != 0))^2
} else {
P_ex = 1
}
k = 1 * (y == 0) + P_ex * (y > 0)
delta_ex = H_nu[j] * ( delta_ex_raw * (delta_ex_raw > 0) + P_ex * (delta_ex_raw == 0))*k
delta = delta_ex
}
if (sum(!is.na(v1)) == 0 | sum(!is.na(v2)) == 0) { delta = NA }
delta
}
pairwisedistance_heuristic = function(isolate1,isolate2){
print(((isolate2-1)*nids+isolate1)/ (nids*nids))
delta = sapply(1:nloci, function (x) sub_per_locus(isolate1,isolate2,x))
c(delta,sum(delta))
}
####### MODIFY NUMBER OF CORES USED BELOW - mc.cores=##
allpossiblepairs = expand.grid(1:nids,1:nids)
allpossiblepairs = unique(allpossiblepairs[allpossiblepairs[,1] <= allpossiblepairs[,2],])
pairwisedistancevector = do.call(cbind,mclapply(1:dim(allpossiblepairs)[1], function (x) pairwisedistance_heuristic(allpossiblepairs[x,1],allpossiblepairs[x,2]),mc.cores=12))
pairwisedistancematrix_components = list()
for (j in 1:(nloci+1)) {
pairwisedistancematrix_temp = matrix(NA,nids,nids)
sapply(1:dim(allpossiblepairs)[1], function (x) pairwisedistancematrix_temp[allpossiblepairs[x,1],allpossiblepairs[x,2]] <<- pairwisedistancevector[j,x])
sapply(1:dim(allpossiblepairs)[1], function (x) pairwisedistancematrix_temp[allpossiblepairs[x,2],allpossiblepairs[x,1]] <<- pairwisedistancevector[j,x])
pairwisedistancematrix_components[[j]] = pairwisedistancematrix_temp
}
#### impute missing values
pairwisedistancematrix_components_imputed = pairwisedistancematrix_components
whichna = which(rowSums(is.na(pairwisedistancematrix_components[[nloci+1]])) == nids)
imputemissing = function(isolate1) {
missingloci = which(sapply(1:nloci, function (j) sum(!is.na(observeddatamatrix[[j]][isolate1,]))) == 0)
nonmissingloci = (1:nloci)[-missingloci]
matchingsamples = which(rowSums(rbind(sapply(nonmissingloci, function (j) sapply(1:nids, function (x) (setequal(observeddatamatrix[[j]][x,],observeddatamatrix[[j]][isolate1,]))))))==length(nonmissingloci))
matchingsamples = setdiff( matchingsamples , whichna)
for (j in missingloci ) {
if (length(matchingsamples) > 0) {
sapply(1:nids, function (x) pairwisedistancematrix_components_imputed[[j]][isolate1,x] <<- mean(pairwisedistancematrix_components[[j]][x,matchingsamples],na.rm=TRUE))
sapply(1:nids, function (x) pairwisedistancematrix_components_imputed[[j]][x,isolate1] <<- mean(pairwisedistancematrix_components[[j]][x,matchingsamples],na.rm=TRUE))
pairwisedistancematrix_components_imputed[[j]][isolate1,isolate1] <<- mean(diag(pairwisedistancematrix_components_imputed[[j]])[matchingsamples],na.rm=TRUE)
} else {
pairwisedistancematrix_components_imputed[[j]][isolate1,] <<-mean(pairwisedistancematrix_components[[j]],na.rm=TRUE)
pairwisedistancematrix_components_imputed[[j]][,isolate1] <<-mean(pairwisedistancematrix_components[[j]],na.rm=TRUE)
pairwisedistancematrix_components_imputed[[j]][isolate1,isolate1] <<-mean(diag(pairwisedistancematrix_components_imputed[[j]]),na.rm=TRUE)
}
}
}
sapply(whichna, imputemissing)
temppairwisedistancematrix = matrix(0,nids,nids)
for (j in 1:(nloci)) {
temppairwisedistancematrix = temppairwisedistancematrix + pairwisedistancematrix_components_imputed[[j]]
}
whichna2 = which(rowSums(is.na(temppairwisedistancematrix )) != 0)
pairwisedistancematrix_components_imputed_secondpass = pairwisedistancematrix_components_imputed
imputemissing_secondpass = function(isolate1) {
missingloci = which(sapply(1:nloci, function (j) sum(!is.na(observeddatamatrix[[j]][isolate1,]))) == 0)
nonmissingloci = (1:nloci)[-missingloci]
matchingsamples = which(rowSums(rbind(sapply(nonmissingloci, function (j) sapply(1:nids, function (x) (setequal(observeddatamatrix[[j]][x,],observeddatamatrix[[j]][isolate1,]))))))==length(nonmissingloci))
matchingsamples = setdiff( matchingsamples , whichna)
for (j in missingloci ) {
if (length(matchingsamples) > 0) {
sapply(1:nids, function (x) pairwisedistancematrix_components_imputed_secondpass[[j]][isolate1,x] <<- mean(pairwisedistancematrix_components_imputed[[j]][x,matchingsamples],na.rm=TRUE))
sapply(1:nids, function (x) pairwisedistancematrix_components_imputed_secondpass[[j]][x,isolate1] <<- mean(pairwisedistancematrix_components_imputed[[j]][x,matchingsamples],na.rm=TRUE))
pairwisedistancematrix_components_imputed_secondpass[[j]][isolate1,isolate1] <<- mean(diag(pairwisedistancematrix_components_imputed_secondpass[[j]])[matchingsamples],na.rm=TRUE)
} else {
pairwisedistancematrix_components_imputed_secondpass[[j]][isolate1,] <<-mean(pairwisedistancematrix_components_imputed[[j]],na.rm=TRUE)
pairwisedistancematrix_components_imputed_secondpass[[j]][,isolate1] <<-mean(pairwisedistancematrix_components_imputed[[j]],na.rm=TRUE)
pairwisedistancematrix_components_imputed_secondpass[[j]][isolate1,isolate1] <<-mean(diag(pairwisedistancematrix_components_imputed_secondpass[[j]]),na.rm=TRUE)
}
}
}
sapply(whichna2, imputemissing_secondpass)
# calculate final
finalpairwisedistancematrix = matrix(0,nids,nids)
for (j in 1:(nloci)) {
finalpairwisedistancematrix = finalpairwisedistancematrix + pairwisedistancematrix_components_imputed_secondpass[[j]]
}
#pairwisedistancematrix2 = sapply(1:nids, function (x) sapply(1:nids, function (y) pairwisedistance_heuristic(x,y)))
colnames(pairwisedistancematrix) = ids
rownames(pairwisedistancematrix) = ids
#write.csv(finalpairwisedistancematrix,"pairwisedistancematrix_heuristic.csv")
Heuristic_pairwisedistancematrix = finalpairwisedistancematrix
#normalized_finalpairwisedistancematrix <- finalpairwisedistancematrix/(max(finalpairwisedistancematrix))
#colnames(normalized_finalpairwisedistancematrix) <- ids
#rownames(normalized_finalpairwisedistancematrix) <- ids
#write.csv(normalized_finalpairwisedistancematrix,"Heuristic_pairwisedistancematrix_norm.csv")
print("Calculation of heuristic matrix complete")
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.