blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
1eee8ce799b18280032a9a767dc5341ffeb5e8f2
|
a84842eeb58542b8598497c4208675a8afb3a1a7
|
/0_processNTS.R
|
9054ddbcf9930c60d9117e0ed5831b54d34ff5ea
|
[] |
no_license
|
ITHIM/TravelSurvey_BaselineBuild
|
6d1b1f8d02a20d169a2afdd903571b4b0df2f2b7
|
e4d2a446fa13de85548d7a318fc2cac990a80f02
|
refs/heads/master
| 2021-01-19T10:35:44.321472
| 2018-05-01T11:27:26
| 2018-05-01T11:27:26
| 82,195,993
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,437
|
r
|
0_processNTS.R
|
####################### REQUIREMENTS ###############################
# a 'data' folder with all the sources from the specific Travel Survey
# in .tab format (for .csv, .Rds, .xls..... replace reading commands)
# Script subsets to years 2004-2014 *ONLY* when using the latest release NTS2014
#rm(list=ls())
# define data path
# Read data from v drive
# datapath ='V:/Studies/MOVED/HealthImpact/Data/National_Travel_Survey_2014/2014/tab/'
# Read from local dir
datapath ='data/National_Travel_Survey_2014/2014/tab/'
#see all files
ficheros <- dir(path = datapath, pattern = '.tab')
#reads all key NTS files with data 2004-2014
#days the trips were made
day2014 <- read.table(file.path(datapath, "day.tab"), sep = "\t", header = T)
day2014 <- subset(day2014, subset = SurveyYear>=2004) #include years 2004-2014
#individuals making the trips
ind2014<- read.table(file.path(datapath, "individual.tab"), sep = "\t", header = T)
#18-84 y.o. + with travel diary
ind2014<- subset(ind2014, subset = Age_B01ID >= 8 & Age_B01ID < 21 & SurveyYear>=2004 & W1==1 )
#stages per trip
stage2014<- read.table(file.path(datapath, "stage.tab"), sep = "\t", header = T)
stage2014<- subset(stage2014, SurveyYear >= 2004)
# households the individuals belong to
household2014 <- read.table(file.path(datapath, "household.tab"), sep = "\t", header = T, as.is = T)
household2014 <- subset(household2014, SurveyYear >= 2004)
|
9d167fbda4654ab6e6c9e4f45424c08716f3bebd
|
3b0be5721a5478b1bac4e6b08cdcd1b88e3a4046
|
/inst/snippets/Example2.19.R
|
b8bc0198b75779efca0649a741a855f1d07725f4
|
[] |
no_license
|
stacyderuiter/Lock5withR
|
b7d227e5687bc59164b9e14de1c8461cb7861b14
|
417db714078dc8eaf91c3c74001b88f56f09b562
|
refs/heads/master
| 2020-04-06T06:33:39.228231
| 2015-05-27T11:41:42
| 2015-05-27T11:41:42
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 166
|
r
|
Example2.19.R
|
( 204 - mean(~Systolic, data=ICUAdmissions) ) / sd(~Systolic, data=ICUAdmissions)
( 52 - mean(~HeartRate, data=ICUAdmissions) ) / sd(~HeartRate, data=ICUAdmissions)
|
93d238d52f3fefa26b5d3458843939fab9defed5
|
fb61adddae4c5a98c0996be7e89f3172dd51ab33
|
/R1.NameConverter.r
|
e1a3a271aa457e253af5be2abe5a745c5c566d9f
|
[] |
no_license
|
debjitray/Copy-Number-Analysis
|
589572caaac084ba5238e121526b69a5668c8e02
|
6032fc6312f8bf1c9754b18ec444182ef981cc40
|
refs/heads/master
| 2021-01-11T14:25:30.302596
| 2017-02-09T03:55:40
| 2017-02-09T03:55:40
| 81,404,480
| 3
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,018
|
r
|
R1.NameConverter.r
|
cat("\nThis code converts the Peptide names into their respective Gene Names, Chromosome Name, Start and End location\n\n")
# Author: Debjit Ray
# Date: 20 September, 2014
# Usage: From terminal
# Rscript Q1.ProteinToGeneNameConverter.r INPUTFILE OUTPUTFILE MARTNAME DATASET QUERY CONVERTED
# Rscript Q1.ProteinToGeneNameConverter.r 1.crossTab_W_joint_var1.txt 4.PeptideName_GeneName_Mapped.csv ensembl hsapiens_gene_ensembl refseq_peptide hgnc_symbol
# Installing biomaRt
source("http://bioconductor.org/biocLite.R")
biocLite("biomaRt")
rm(list=ls(all=TRUE))
args<-commandArgs(TRUE)
library("biomaRt")
#####################################################################################
# Funtion for the conversion
#####################################################################################
peptide_to_gene <- function(PEPTIDE_LIST,MART_NAME,DATASET,QUERY,CONVERTED){
MY_MART = useMart(MART_NAME,host="www.ensembl.org")
hsa = useDataset(DATASET, mart=MY_MART)
out = getBM(attributes= c(QUERY, CONVERTED,"chromosome_name","start_position","end_position"),
filters=c(QUERY),
values=PEPTIDE_LIST,
mart=hsa)
return(out)
}
#####################################################################################
#####################################################################################
# Input parameters
INPUTFILE = args[1]
OUTPUTFILE = args[2]
MART_NAME = args[3]
DATASET = args[4]
QUERY = args[5]
CONVERTED = args[6]
#####################################################################################
K=unlist(strsplit(INPUTFILE, "\\."))
if (K[3] == 'csv') {
PROTEIN=read.csv(INPUTFILE,sep=",",header=T)
} else {
PROTEIN=read.csv(INPUTFILE,sep="\t",header=T)
}
LIST <- PROTEIN[,1]
PEPTIDE_LIST <- sub("(.*)\\.\\d+", "\\1", LIST) # REMOVING THE DOTS
PEPTIDE_LIST <- sub("\'", "", PEPTIDE_LIST)
PEPTIDE_LIST <- sub("\"", "", PEPTIDE_LIST)
UPDATED_LIST=peptide_to_gene(PEPTIDE_LIST,MART_NAME,DATASET,QUERY,CONVERTED)
DATASET=data.frame(UPDATED_LIST[,2],UPDATED_LIST[,1],UPDATED_LIST[,3],UPDATED_LIST[,4],UPDATED_LIST[,5])
colnames(DATASET)=cbind(colnames(UPDATED_LIST)[2],colnames(UPDATED_LIST)[1],colnames(UPDATED_LIST)[3],colnames(UPDATED_LIST)[4],colnames(UPDATED_LIST)[5])
write.table(DATASET, sep=",",file=OUTPUTFILE,row.names=FALSE)
ACTUAL = length(PROTEIN[,1])
MAPPED = length(UPDATED_LIST[,1])
cat("\nActual entries: ", ACTUAL,"\n")
cat("Final mapped entries: ", MAPPED,"\n\n")
################################################################################
# http://davetang.org/muse/2012/04/27/learning-to-use-biomart/
# listMarts(Biomart)
# MY_MART = useMart('ensembl')
# listDatasets(MY_MART)
# hsa = useDataset('uniprot', mart=MY_MART)
# filters = listFilters(hsa)
#Test <- UPDATED_LIST[order(as.numeric(UPDATED_LIST$end_position)),]
#Test <- Test[order(as.numeric(Test$start_position)),]
#Test <- Test[order(as.numeric(Test$chromosome_name)),]
|
72c4ab861f4bb5234b65fb76d8c482ab04721288
|
57bb3030d0f6eb51004f1505d26de71e7b9b9530
|
/Time_Series_Analysis.R
|
1fce081fc974fdb6e5df640610be38522ad37316
|
[] |
no_license
|
vicky-lee/Time-Series-Models-Using-R
|
569bc93051f1d469a53ee9cee5a49dcdacdc497f
|
90279351e10909f318ffbd48b029767f1f5e8416
|
refs/heads/master
| 2021-01-19T23:02:30.528069
| 2017-04-20T22:14:37
| 2017-04-20T22:14:37
| 88,914,533
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,806
|
r
|
Time_Series_Analysis.R
|
# data Retrieval and libraries
setwd("C:/Users/Vicky Lee/Desktop/CSC 425/Project")
myd=read.table("ex_rate.csv", header = T, sep = ',')
head(myd)
library(rugarch)
library(tseries)
library(fBasics)
library(zoo)
library(forecast)
library(lmtest)
# missing Values and time Plot
sapply(myd, function(x) sum(is.na(x)))
ratets = zoo(myd$rate, as.Date(as.character(myd$date),format=c("%Y-%m-%d")))
rate = na.approx(ratets)
plot(rate,xlab="Time",ylab="Exchange Rate",main="U.S./U.K. Foregin Exchange Rate(# of dollars per 1 british pound)")
# ACF and PACF of daily exchange rate
acf(coredata(rate),main="ACF of Exchange Rate")
pacf(coredata(rate),main="PACF of Exchange Rate")
# log return time series, its plot, and basic statistics
rets = log(rate/lag(rate, -1))
plot(rets,xlab="Time",ylab="Rate",main="log returns of exchage rate")
basicStats(rets)
# log return exchange rates histogram
par(mfcol=c(1,1))
hist(rets, xlab="log returns of exchange rate", prob=TRUE, main="Histogram of log returns")
xfit<-seq(min(rets),max(rets), length=40)
yfit<-dnorm(xfit,mean=mean(rets),sd=sd(rets))
lines(xfit, yfit, col="blue", lwd=2)
# Plots ACF function of log returns
ret = coredata(rets);
acf(ret,main="ACF of log returns of exchange rate")
# plot ACF of squared returns to check for ARCH effect
acf(ret^2,main="ACF of squared log returns")
# plot ACF of absolute returns to check for ARCH effect
acf(abs(ret),main="ACF of absolute log returns")
# computes Ljung-Box test on log returns to test independence
Box.test(coredata(rets),lag=4,type='Ljung')
Box.test(coredata(rets),lag=5,type='Ljung')
Box.test(coredata(rets),lag=7,type='Ljung')
# computes Ljung-Box test on squared log returns to test non-linear independence
Box.test(coredata(rets^2),lag=3,type='Ljung')
Box.test(coredata(rets^2),lag=5,type='Ljung')
Box.test(coredata(rets^2),lag=7,type='Ljung')
# computes Ljung-Box test on absolute log returns to test non-linear independence
Box.test(abs(coredata(rets)),lag=3,type='Ljung')
Box.test(abs(coredata(rets)),lag=5,type='Ljung')
Box.test(abs(coredata(rets)),lag=7,type='Ljung')
# fit ARMA(0,0)-GARCH(1,1) with normal distributed errors
garch11.spec=ugarchspec(variance.model=list(garchOrder=c(1,1)), mean.model=list(armaOrder=c(0,0)))
# estimate model
garch11.fit=ugarchfit(spec=garch11.spec, data=rets)
garch11.fit
# fit ARMA(0,0)-GARCH(1,1) model with t-distribution
garch11.t.spec=ugarchspec(variance.model=list(garchOrder=c(1,1)), mean.model=list(armaOrder=c(0,0)), distribution.model = "std")
# estimate model
garch11.t.fit=ugarchfit(spec=garch11.t.spec, data=rets)
garch11.t.fit
# fit ARMA(0,0)-GARCH(1,1) model with skewed t-distribution
garch11.st.spec=ugarchspec(variance.model=list(garchOrder=c(1,1)), mean.model=list(armaOrder=c(0,0)), distribution.model = "sstd")
# estimate model
garch11.st.fit=ugarchfit(spec=garch11.st.spec, data=rets)
garch11.st.fit
# fit ARMA(0,0)-eGARCH(1,1) model with skewed t-distribution
egarch11.st.spec=ugarchspec(variance.model=list(model = "eGARCH", garchOrder=c(1,1)), mean.model=list(armaOrder=c(0,0)), distribution.model = "sstd")
# estimate model
egarch11.st.fit=ugarchfit(spec=egarch11.st.spec, data=rets)
egarch11.st.fit
plot(egarch11.st.fit)
# compute h-step ahead forecasts for h=1,2,...,10
garch11.st.fcst=ugarchforecast(garch11.st.fit, n.ahead=10)
garch11.st.fcst
plot(garch11.st.fcst)
# compute h-step ahead forecasts for h=1,2,...,10
egarch11.st.fcst=ugarchforecast(egarch11.st.fit, n.ahead=10)
egarch11.st.fcst
plot(egarch11.st.fcst)
# rolling forecasts
egarch11.st.fit=ugarchfit(spec=egarch11.st.spec, data=rets, out.sample=500)
egarch11.st.fcst=ugarchforecast(egarch11.st.fit, n.ahead=12, n.roll=450)
plot(egarch11.st.fcst)
# rolling forecasts
garch11.st.fit=ugarchfit(spec=garch11.st.spec, data=rets, out.sample=500)
garch11.st.fcst=ugarchforecast(garch11.st.fit, n.ahead=12, n.roll=450)
plot(garch11.st.fcst)
# backtesting method to compare EGARCH and GARCH models:
mod_egarch = ugarchroll(egarch11.st.spec, data = rets, n.ahead = 1,
n.start = 2000, refit.every = 130, refit.window = "recursive")
mod_garch = ugarchroll(garch11.st.spec, data = rets, n.ahead = 1,
n.start = 2000, refit.every = 130, refit.window = "recursive")
# type=VaR shows VaR at 1% level: this is the tail probability.
report(mod_egarch, type="VaR", VaR.alpha = 0.01, conf.level = 0.95)
# type="fpm" shows forecast performance measures
# (Mean Squared Error (MSE), mean absolute error(MAE) and directional accuracy
# of the forecasts vs realized returns(DAC)).
report(mod_egarch, type="fpm")
report(mod_garch, type="fpm")
# compute 10-step ahead forecasts
egarch11.st.fcst=ugarchforecast(egarch11.st.fit, n.ahead=10)
stats=egarch11.st.fcst
plot(egarch11.st.fcst)
# aggregate daily into monthly exchange rate, time plot, and basic statistics
myd$date <- as.Date(myd$date)
myd$Month <- format(myd$date, format="%m")
myd$Year <- format(myd$date,format="%Y")
monthly<-aggregate( rate ~ Month + Year , myd , mean )
monthly$monthly <- as.yearmon(paste(monthly$Year,monthly$Month, sep = "-"), format=c("%Y-%m"))
rate =ts(monthly[,3], start = c(2006), frequency = 12)
ratets = zoo(monthly$rate, as.Date(monthly$monthly,format=c("%m-%Y")))
plot(rate,xlab="Time",ylab="Exchange Rate",main="Monthly U.S./U.K. Foregin Exchange Rate")
basicStats(rate)
# ACF, PACF of monthly exchange rate
acf(coredata(rate),main="ACF of monthly exchange rate")
pacf(coredata(rate),main="PACF of monthly exchange rate")
# ACF, PACF of differenced series
dx = diff(rate)
head(dx)
acf(as.vector(dx),main="ACF of first difference")
pacf(as.vector(dx),main="PACF of first difference")
auto.arima(dx, ic =c("bic"), trace=TRUE, allowdrift=TRUE)
# Dickey Fuller test for monthly exchange rate
library(fUnitRoots)
# tests for AR model with time trend
adfTest(coredata(rate), lags=1, type=c("ct"))
adfTest(coredata(rate), lags=3, type=c("ct"))
adfTest(coredata(rate), lags=5, type=c("ct"))
# best arima based on bic
auto.arima(rate, ic =c("bic"), trace=TRUE, allowdrift=TRUE)
# fitting ARIMA(1,1,0)(1,0,0)[12] with drift
m1=Arima(rate,order=c(1,1,0),seasonal=list(order=c(1,0,0),period=12), method="ML",include.drift=T)
coeftest(m1)
# residual analysis of ARIMA(1,1,0)(1,0,0)[12] with drift
acf(coredata(m1$resid), main="ACF of Residuals")
pacf(coredata(m1$resid), main="PACF of Residuals")
# ljung box test on residuals
Box.test(m1$resid, 6, "Ljung-Box", fitdf=2)
Box.test(m1$resid, 12, "Ljung-Box", fitdf=2)
# fitting ARIMA(1,1,0)
m2=Arima(rate,order=c(1,1,0), method="ML")
coeftest(m2)
# residual analysis of ARIMA(1,1,0)
acf(coredata(m2$resid), main="ACF of Residuals")
pacf(coredata(m2$resid), main="PACF of Residuals")
# ljung box test on residuals
Box.test(m2$resid, 6, "Ljung-Box", fitdf=1)
Box.test(m2$resid, 12, "Ljung-Box", fitdf=1)
# model validation using backtesting
source("backtest.R")
backtest(m1, rate, h=1, orig=length(rate)*0.8)
backtest(m2, rate, h=1, orig=length(rate)*0.8)
# plot forecasts of ARIMA(1,1,0)(1,0,0)[12] with drift & ARIMA(1,1,0)
plot(forecast.Arima(m1, h=6), include=120, main="Actual vs Predicted & 6-step forecasts: SARIMA(1,1,0)(1,0,0)[12]", xlab="Date", ylab="Exchange Rate")
lines(fitted(m1), col=2)
legend('topright', legend=c("Actual","Predicted"),lty=1, col=c('black','red' ), cex=.75)
forecast(m1, h=10)
plot(forecast.Arima(m2, h=6), include=120, main="Actual vs Predicted & 6-step forecasts: ARIMA(1,1,0)", xlab="Date", ylab="Exchange Rate")
lines(fitted(m2), col=2)
legend('topright', legend=c("Actual","Predicted"),lty=1, col=c('black','red' ), cex=.75)
forecast(m2, h=10)
|
fc99a4ef39f0a424299384bb594f33ef73a58d56
|
0762da61488e95451972d5c726dd9483088cfb0f
|
/taller SKAT secuencia/analisis SKAT_secuenciacion_GENE.R
|
7651437c5d9b270b44d9c4cd13c84fb55bf8efe2
|
[] |
no_license
|
fepo68/TallerBioinformatica_CG_COMF_2021
|
eee36314bafafc85068bb52ad160284da90efe6e
|
afb89aee906dd4d75f10400d58c543e880d209a9
|
refs/heads/main
| 2023-08-20T14:18:36.467577
| 2021-10-12T00:08:49
| 2021-10-12T00:08:49
| 416,098,517
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,176
|
r
|
analisis SKAT_secuenciacion_GENE.R
|
## SKAT PARA 1 (O POCOS GENES)
library(SKAT)
setwd("/cloud/project/taller SKAT secuencia")
RH <- read.delim("RH_seq_ALL.txt",stringsAsFactors=F,na.strings=c("","."))
head(RH)
dim(RH)
names(RH)
table(RH$GENE)# vemos los recuentos de variantes para cada gen
table(RH$GENE, RH$Polyphen2_HDIV_pred)# distribucion de variantes de cada gen en funcion de polyphen
# no hace falta (lo arregla SKAT) pero si queremos la MAF real en nuestro estudio:
RH$cHET<-apply(RH[,16:43],1,function(x) length(which(x==1)))
RH$cHOM<-apply(RH[,16:43],1,function(x) length(which(x==2)))
RH$MAF<-(RH$cHET+(2*RH$cHOM))/(2*28)
# RH[RH$MAF>=0.5,c(16:25,43:48)]# visualizamos los casos que pueden ser problematicos (m?s abajo)
# si quisieramos dar la vuelta a esto
# RH[RH$MAF>0.5,16:43]<-lapply(RH[RH$MAF>0.5,16:43],function(x) ifelse(x==2,0,ifelse(x==0,2,1)))
# PARA DEFINIR PESOS EN FUNCION DE POLYPHEN POR EJ...
RH$W_POLYPHEN<-ifelse(RH$Polyphen2_HDIV_pred=='B', 0.25,
ifelse(RH$Polyphen2_HDIV_pred=='NA', 0.55,
ifelse(RH$Polyphen2_HDIV_pred=='P', 0.75,
ifelse(RH$Polyphen2_HDIV_pred=='D', 0.85,-99))))
table(RH$W_POLYPHEN)
head(RH)
# ANALISIS PARA LOS 51 MARCADORES DE RP1L1 - TAMBIEN SE PODR?A ANALIZAR TODO JUNTO PORQUE HAY UNA HIP?TESIS/MOTIVO
# (genes implicados en la homeostasis de retina)
# Seleccionamos las filas correspondientes a ese gen y generamos la matriz de genotipos (Z) TRASPUESTA
names(RH)
R_RP1L1<-RH[RH$GENE=='RP1L1',]# seleccionamos las filas de ese gen en el archivo original
R_RP1L1[,c(16:44,48,49)]
RP1L1<-R_RP1L1[,16:43]# seleccionamos las columnas con los genotipos en las filas (variantes) de ese gen
head(RP1L1)
RP1L1[1:15,]
## IMPORTANTE:la funcion SKAT necesita que la matriz de genotipos est? al reves que como la hemos generado
# trasponemos
Z1<-t(RP1L1)
dim(Z1)
Z1# ahora los 28 individuos est?n en filas y las 51 variantes de RP1L1 son las columnas
# si quisiesemos otro..:
# NPHP4<-RH[RH$GENE=='NPHP4',16:43]
# NPHP4[1:11,1:8]
# Z2<-t(NPHP4)
# ABRIMOS EL ARCHIVO FENOTIPICO
feno_RH <- read.delim("FENOTIPO_RH.txt",stringsAsFactors=F,na.strings=c("","."))
head(feno_RH)
# si especificamos aqui las variables que vamos a usar en el modelo ya ponemos luego la f?rmula directamente:
# si no hay que indicar feno_RH$EDAD etc...
y.b<-feno_RH$GRAVEDAD # gravedad definida en dos categor?as - situaci?n an?loga a CASO/CONTROL
y.q<-feno_RH$GRAV_CUANT # medida de gravedad num?rica, variable cuantitativa
edad<-feno_RH$EDAD
sex<-feno_RH$SEX
# ajustamo el modelo nulo solo con las covariables o sin nada...
modelo1<-SKAT_Null_Model(y.b ~ edad+sex, out_type="D")
modelo0<-SKAT_Null_Model(y.b ~ 1, out_type="D")
# modelo1<-SKAT_Null_Model(feno_RH$GRAVEDAD ~ feno_RH$SEX+feno_RH$EDAD, out_type="D")# especificando el origen de las variables...
########################################################################################################################################
# SKAT "normal". Opciones por defecto: weights.beta=c(1,25), r.corr=0...
?SKAT
# sin covariables:
out_SKAT<-SKAT(Z1,modelo0)# avisan de la incongruencia de tener marcadores en los que la codificacion no encaja
# con lo esperado (0/1/2 en funcion de la maf propia - PERO skat ya le da la vuelta
out_SKAT # se ve todo el output en la consola
out_SKAT$p.value # lo importante: la probabilidad de SKAT para el gen analizado
out_SKAT$param$n.marker # numero de marcadores iniciales
out_SKAT$param$n.marker.test # numero de marcadores con los que se hace test (marcadores NO MONOMORFICOS)
# con COVARIABLES (lo m?s normal):
out_SKATc<-SKAT(Z1,modelo1)
out_SKATc$p.value
out_SKATc$param$n.marker
out_SKATc$param$n.marker.test
# para hacer un test m?s b?sico, BURDEN:
out_BURDENc<-SKAT(Z1, modelo1, r.corr=1)# para hacer BURDEN
out_BURDENc$p.value
#########################################################################################################################################
# JUGANDO CON LOS PESOS - por defecto SKAT da mucho mayor peso a las variantes raras:
out_SKATw1<-SKAT(Z1, modelo1, weights.beta=c(1,1))# con peso=1 a todos
out_SKATw1$p.value
r_weights<-R_RP1L1$W_POLYPHEN
out_SKATpolyphen<-SKAT(Z1, modelo1, weights=r_weights)
out_SKATpolyphen$p.value
###########################################################################################################################################
# PARA HACER SKAT solo con las variantes raras (opci?n max_maf)
out_RARES05<-SKAT(Z1, modelo1, max_maf=0.05)# para hacer SKAT SOLO RARAS DEFINIDAS COMO MAF<0.05 (se puede cambiar)
out_RARES05$p.value
out_RARES05$param$n.marker.test
## OJO! estos son procedimientos muucho m?s lentos, aqui sin problema (cuidado en GENOME):
# SKAT-O
out_SKATO<-SKAT(Z1, modelo1, method="SKATO")
out_SKATO
out_SKATO$p.value
# CON RESAMPLING BOOTSTRAP - ojo! tarda aun mas que SKATO
# EN ESTO HACEMOS PERMUTACIONES (se tiene que especificar en el modelo)
modelo_RESAMPLING<-SKAT_Null_Model(y.b ~ edad+sex, out_type="D", n.Resampling=1000)
out_SKATboot<-SKAT(Z1, modelo_RESAMPLING, weights.beta=c(1,25),r.corr=0)# SKAT con pesos por defecto
names(out_SKATboot)
out_SKATboot$p.value
|
20b617ea50cb05a412b3aeb0cc0942f196ab59b9
|
8c374f8b433c33bd2989a5cd66c6dff601208efa
|
/man/mr_raps.Rd
|
2baf2729c6945b485c6ec818c1058d1812935258
|
[
"MIT"
] |
permissive
|
MRCIEU/TwoSampleMR
|
2514d01692c95db1e9fbe23f8696e99a12c6ab34
|
592ebe05538558b330c39ddeda0d11b1313ad819
|
refs/heads/master
| 2023-08-29T22:47:33.163801
| 2023-05-29T20:46:39
| 2023-05-29T20:46:39
| 49,515,156
| 277
| 160
|
NOASSERTION
| 2023-06-13T00:24:11
| 2016-01-12T16:57:46
|
R
|
UTF-8
|
R
| false
| true
| 1,460
|
rd
|
mr_raps.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mr.R
\name{mr_raps}
\alias{mr_raps}
\title{Robust adjusted profile score}
\usage{
mr_raps(b_exp, b_out, se_exp, se_out, parameters = default_parameters())
}
\arguments{
\item{b_exp}{Vector of genetic effects on exposure.}
\item{b_out}{Vector of genetic effects on outcome.}
\item{se_exp}{Standard errors of genetic effects on exposure.}
\item{se_out}{Standard errors of genetic effects on outcome.}
\item{parameters}{A list of parameters. Specifically, \code{over.dispersion} and \code{loss.function}.
\code{over.dispersion} is a logical concerning should the model consider overdispersion (systematic pleiotropy).
And \code{loss.function} allows using either the squared error loss (\code{"l2"}) or robust loss functions/scores (\code{"huber"} or \code{"tukey"}).
The default is \code{parameters=list(overdispersion = TRUE, loss.function = "tukey")}.}
}
\value{
List with the following elements:
\describe{
\item{b}{MR estimate}
\item{se}{Standard error}
\item{pval}{p-value}
\item{nsnp}{Number of SNPs}
}
}
\description{
Robust adjusted profile score
}
\details{
This function calls the \code{mr.raps} package. Please refer to the documentation of that package for more detail.
}
\references{
Qingyuan Zhao, Jingshu Wang, Jack Bowden, Dylan S. Small. Statistical inference in two-sample summary-data Mendelian randomization using robust adjusted profile score. Forthcoming.
}
|
4206f5100143bc320668c4a2cee018ea1b3ea9ad
|
983c7ea16d0b8b97976a4b246edfad344e2e3e24
|
/pollutantmean.R
|
64b038430d3f7f67ef9a4171722a4d67bd769de8
|
[] |
no_license
|
mpallavi/Data-Science-Certifications-Coursera
|
d8c7d9825fafc1c7446d82f41ebe951aac4e9b60
|
e7dd15bcd5d91c21d38f1dd65af4b9de1f518fae
|
refs/heads/master
| 2021-01-02T08:33:40.311288
| 2015-03-07T03:39:20
| 2015-03-07T03:39:20
| 29,446,325
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 315
|
r
|
pollutantmean.R
|
pollutantmean <- function(directory, pollutant, id = 1:332) {
data = lapply(id, function(i) read.csv(paste(directory, "/", formatC(i,
width = 3, flag = "0"), ".csv", sep = ""))[[pollutant]])
return(mean(unlist(data), na.rm = TRUE))
}
|
7b04c5167e6ef35ddb276ee36c161582f33112d3
|
6b9818dd01030e8b3a9fec4a15b75cbfc750744a
|
/plot5.R
|
5663a9f134dd7a8c678a6bdad2945085618dc1e6
|
[] |
no_license
|
ssheremet/ExData_Plotting2
|
f50e41130dd6b5103fea7cd5618683b873fd83d8
|
112046d7fa7617430600f8a5a7ca7857bbb70d5d
|
refs/heads/master
| 2021-01-22T23:25:27.223054
| 2014-05-21T21:09:51
| 2014-05-21T21:09:51
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 975
|
r
|
plot5.R
|
plot5 <- function(){
require(ggplot2)
##read data from files
NEI <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
##subsetting data by coal combustion
NEI_motor <- subset(NEI,type=="ON-ROAD" & fips == "24510")
##aggregate data by years and set dim names
data <- aggregate(NEI_motor$Emissions, by=list(NEI_motor$year), FUN=sum)
colnames(data) <- c("Year","Emissions")
##create plot objiect
g <- ggplot(data, aes(Year, Emissions))
##add layers and print plot
g + geom_point(shape=2, color="steelblue") + geom_line(col="steelblue") + ggtitle("Emission of motor vehicle in Baltimore by years") + theme_bw() + scale_x_continuous(breaks=c(1999, 2002, 2005, 2008))
##copy diagram to png file
dev.copy(png, filename="plot5.png", width = 480, height = 480, units = "px")
dev.off()
}
|
341ea021098cbc0549ac590606118727dcee8b64
|
bea0e56a7a0ff5597a21158ae4b8c566a5a6404f
|
/R/RcppExports.R
|
d651ca5b61146ed8d5217d7e9c8c73fb5dfcc0e4
|
[] |
no_license
|
EliGurarie/smoove
|
98c5c128316ad37bfc9d44916495f9f5b4d4a920
|
998e456c308d289f7428c9cf2043fd27de76f600
|
refs/heads/master
| 2022-08-08T03:17:21.322981
| 2022-07-28T20:51:17
| 2022-07-28T20:51:17
| 85,118,943
| 5
| 2
| null | 2018-06-30T15:00:18
| 2017-03-15T20:42:44
|
R
|
UTF-8
|
R
| false
| false
| 241
|
r
|
RcppExports.R
|
# Generated by using Rcpp::compileAttributes() -> do not edit by hand
# Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393
Mahalanobis <- function(x, center, cov) {
.Call('_smoove_Mahalanobis', PACKAGE = 'smoove', x, center, cov)
}
|
c3827ad0559a618dcf1e73f4659e84a13102437a
|
2d549b99d2f77abe9f4658ffb7ef3fef6e25c286
|
/man/create_ejMetadata.Rd
|
bb5ab8bb56301d18f78bbf73dc61ab392c42b1bb
|
[] |
no_license
|
Hackout2/repijson
|
549b538135badb358b864be5c47e26e177de74c0
|
4ad8f9d7c33cd2225e11674f651f608bff08bc91
|
refs/heads/master
| 2020-12-29T02:36:05.984596
| 2017-02-22T22:06:34
| 2017-02-22T22:06:34
| 35,152,284
| 6
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 388
|
rd
|
create_ejMetadata.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/constructors.R
\name{create_ejMetadata}
\alias{create_ejMetadata}
\title{Create metadata}
\usage{
create_ejMetadata(attributes)
}
\arguments{
\item{attributes}{list of attributes of the metadata}
}
\value{
an ejMetadata object
}
\description{
This function defines epiJSON Metadata
output \code{ejMetadata}
}
|
f281571d5d53a79f16f7c8cf037a0ec7f7d951e1
|
1f3c33ff4573e8d00421be9c1064286c7c16bdd7
|
/old FUNCTIONS/INPUT.R
|
e2b739a6b2354727875cc8154ba5dc0fc1e46222
|
[] |
no_license
|
mccannecology/model
|
bcddb18189a49507d10a49f063c99a9d0e529e49
|
3ab691c6069b174d97230719ee40f36e6129a0bf
|
refs/heads/master
| 2021-05-28T08:39:34.460524
| 2015-03-18T15:38:43
| 2015-03-18T15:38:43
| 17,213,665
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,645
|
r
|
INPUT.R
|
#####################################################
# Accepts .csv of parameter values #
# Assigns values to objects in environment #
# STILL IN DEVELOPMENT #
# #
# By: Michael J. McCann #
# Last Updated: 2/26/2014 #
#####################################################
INPUT <- function(x){
assign("height", parameters$height[simulnumb], envir = .GlobalEnv) # height of the grid
assign("width", parameters$width[simulnumb], envir = .GlobalEnv) # width of the grid
assign("timesteps", parameters$timesteps[simulnumb], envir = .GlobalEnv) # number of time-steps (+1, actually) in a "growing season"
assign("years", parameters$years[simulnumb], envir = .GlobalEnv) # number of years ("growing seasons") to run the model
assign("wind_prob", parameters$wind_prob[simulnumb], envir = .GlobalEnv) # proportion of time steps where wind knocks a row/col off of the grid
assign("wind_directions", c(as.character(parameters$wind_directions[simulnumb]),as.character(parameters$wind_directions.1[simulnumb]),as.character(parameters$wind_directions.2[simulnumb]),as.character(parameters$wind_directions.3[simulnumb])), envir = .GlobalEnv)
assign("buffer", parameters$buffer[simulnumb], envir = .GlobalEnv) # distance from focal cell - used to count up the number of neighbors
assign("numbspecies", parameters$numbspecies[simulnumb], envir = .GlobalEnv) # number of species in the model
assign("initial01", parameters$initial01[simulnumb], envir = .GlobalEnv) # initial number of individuals - species 01
assign("initial02", parameters$initial02[simulnumb], envir = .GlobalEnv) # initial number of individuals - species 02
assign("initial03", parameters$initial03[simulnumb], envir = .GlobalEnv) # initial number of individuals - species 03
assign("initial04", parameters$initial04[simulnumb], envir = .GlobalEnv) # initial number of individuals - species 04
assign("agedead01", parameters$agedead01[simulnumb], envir = .GlobalEnv) # average age that individuals die at - species 01
assign("agedead02", parameters$agedead02[simulnumb], envir = .GlobalEnv) # average age that individuals die at - species 02
assign("agedead03", parameters$agedead03[simulnumb], envir = .GlobalEnv) # average age that individuals die at - species 03
assign("agedead04", parameters$agedead04[simulnumb], envir = .GlobalEnv) # average age that individuals die at - species 04
assign("maxrgr01", parameters$maxrgr01[simulnumb], envir = .GlobalEnv) # maximum relative growth rate - species 01
assign("maxrgr02", parameters$maxrgr02[simulnumb], envir = .GlobalEnv) # maximum relative growth rate - species 02
assign("maxrgr03", parameters$maxrgr03[simulnumb], envir = .GlobalEnv) # maximum relative growth rate - species 03
assign("maxrgr04", parameters$maxrgr04[simulnumb], envir = .GlobalEnv) # maximum relative growth rate - species 04
assign("overwinter01", parameters$overwinter01[simulnumb], envir = .GlobalEnv) # proportion of individuals that overwinter - species 01
assign("overwinter02", parameters$overwinter02[simulnumb], envir = .GlobalEnv) # proportion of individuals that overwinter - species 02
assign("overwinter03", parameters$overwinter03[simulnumb], envir = .GlobalEnv) # proportion of individuals that overwinter - species 03
assign("overwinter04", parameters$overwinter04[simulnumb], envir = .GlobalEnv) # proportion of individuals that overwinter - species 04
}
|
27892d3c57da742286b167e85e13a6d51f94cb3d
|
49d51566b41a141a0dcad5119fcf910efcdfd82d
|
/Ex.2_11.R
|
c8da6faaee66d8295231fcc6ce9a1296acfb9bf8
|
[] |
no_license
|
himanshu6980/RSolvedProblems
|
460124c286f8d6814ae7f9ba762cb01dcb7e5cc4
|
d9928290279030543572005c84638bcab98c72ac
|
refs/heads/master
| 2020-03-06T18:42:59.866055
| 2018-03-27T16:10:59
| 2018-03-27T16:10:59
| 127,012,585
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 550
|
r
|
Ex.2_11.R
|
# lists
Numbers <- c(2,5.1,3.9,9,6,7)
Logic <- c(TRUE, TRUE, FALSE)
strings <- c("nikita", "manoj","himanshu", "uday")
complexnumbers <- c(3.2-4.5i,12.8+2.2i)
# creating a list
items <- list(Numbers,Logic,strings,complexnumbers)
# naming list elements
items <- list(first=Numbers,second=Logic,third=strings,fourth=complexnumbers)
# extracting elements
items[[3]]
items[[3]][1]
class(items)
# The function lapply applies a specified function to each of the elements of a list.
lapply(items,length)
lapply(items,class)
lapply(items,mean)
summary(items)
|
29b4857baf628d5fd97cbca43510e002f50a0f13
|
a346948645c961c79baa6c83adea72d6fa8a5093
|
/man/hidden.Rd
|
435eae0cfe7382dc5b7f50e09c039c824121357f
|
[] |
no_license
|
Chabedi/Ellipses
|
3fce410e0988ce8eca9f5eb2096e930605f13ee9
|
23731b91bcaee5b96a10699e2a98013cd6a032f0
|
refs/heads/master
| 2020-03-23T12:39:31.399917
| 2018-07-19T11:59:55
| 2018-07-19T11:59:55
| 141,572,849
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 182
|
rd
|
hidden.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/hidden.R
\name{hidden}
\alias{hidden}
\title{Title}
\usage{
hidden()
}
\value{
}
\description{
Title
}
|
20128a91e03079d0bdbb31ecf7ea0c616a7ef0e1
|
2bec5a52ce1fb3266e72f8fbeb5226b025584a16
|
/landscapeR/R/landscapeR-package.R
|
2610921680a6c9fe1e59d4eb42d6b7d76bf318e0
|
[] |
no_license
|
akhikolla/InformationHouse
|
4e45b11df18dee47519e917fcf0a869a77661fce
|
c0daab1e3f2827fd08aa5c31127fadae3f001948
|
refs/heads/master
| 2023-02-12T19:00:20.752555
| 2020-12-31T20:59:23
| 2020-12-31T20:59:23
| 325,589,503
| 9
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,483
|
r
|
landscapeR-package.R
|
#' landscapeR: A landscape simulator for R.
#'
#' This package is aimed at simulating categorical landscapes on actual geographical realms,
#' starting from either empty landscapes, or landscapes provided by the user (e.g. land use maps).
#' landscapeR allows to tweak or create landscapes while retaining a high degree of control on its features,
#' without the hassle of specifying each location attribute. In this it differs from other tools
#' which generate null or neutral landscape in a theoretical space. The basic algorithm currently
#' implemented uses a simple agent style/cellular automata growth model, with no rules
#' (apart from areas of exclusion). Outputs are raster dataset exportable to any common GIS format.
#'
#' @useDynLib landscapeR
#' @importFrom Rcpp sourceCpp
#'
#' @section landscapeR functions:
#' \itemize{
#' \item{\code{\link{makePatch}} creates a single patch in the landscape.}
#' \item{\code{\link{makeClass}} creates a group of patches belonging to the same class.}
#' \item{\code{\link{expandClass}} expands an existing class of patches.}
#' \item{\code{\link{makeLine}} creates a linear patch.}
#' \item{\code{\link{rmSingle}} removes single tones from patches and background.}
#' }
#' @details Check out the vignette illustrating the use of landscapeR.\cr
#' Also: \url{https://github.com/dariomasante/landscapeR}
#' @docType package
#' @name landscapeR-package
#' @aliases landscapeR
#' @author Dario Masante
NULL
|
968d7d3f90c64a80aa3d88a4bd1939653ee0a04f
|
abf9a60795618b176c40741696b5cd0b97ae0a5a
|
/cachematrix.R
|
04a4b6f1edc76b16838cae6f9e336236509a53b0
|
[] |
no_license
|
catods/ProgrammingAssignment2
|
48b89952dfa5c7ae730d4eb5566becd7a023c396
|
2f8d1887ae431f1c60ff78d10f6689354e46a982
|
refs/heads/master
| 2022-11-07T09:52:42.006172
| 2020-06-21T23:08:13
| 2020-06-21T23:08:13
| 273,986,808
| 0
| 0
| null | 2020-06-21T21:32:14
| 2020-06-21T21:32:13
| null |
UTF-8
|
R
| false
| false
| 1,312
|
r
|
cachematrix.R
|
## This sets of functions allow to create a data structure to manipulate
## a matrix, caching the inverse of the matrix, so it does not need to
## be re-calculated when you call the function with the same argument
## makeCacheMatrix creates a list structure of functions that allow get
## and set the matrix and the inverse of the matrix. When getting the
## inverse of the matrix, it returns the value in cache if it was previously
## calculated.
makeCacheMatrix <- function(x = matrix()) {
solve_x <- NULL
set <- function(y) {
x <<- y
solve_x <<- NULL
}
get <- function() x
setsolve <- function(new_solve_x) solve_x <<- new_solve_x
getsolve <- function() solve_x
list(set = set, get = get,
setsolve = setsolve,
getsolve = getsolve)
}
## cacheSolve calculates the inverse of the matrix passed as part of the
## object returned by the makeCacheMatrix function, returning the inverse matrix
## in cache (if any) or calculating the inverse and storing that in the cache.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
solve_x <- x$getsolve()
if(!is.null(solve_x)) {
message("getting cached data")
return(solve_x)
}
data <- x$get()
solve_x <- solve(data, ...)
x$setsolve(solve_x)
solve_x
}
|
65a1ad58a2954940b1476d6dc3018e0d10c00bd9
|
a67e2ecb5ee1d33f396d03e07a89ddbddaf36fa2
|
/ui.R
|
7acc2d1e614a23633aa54e49864ed6888a305cab
|
[] |
no_license
|
sydneykpaul/Data_Visualization_Final_Project
|
0e8b2157e984ed183c5c645f990f2adae5b13c2f
|
96421ea4a98c5bc2d4e51877a9517f8a9fa7c9d9
|
refs/heads/master
| 2020-11-24T17:50:54.915839
| 2019-12-16T01:56:32
| 2019-12-16T01:56:32
| 228,279,937
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,576
|
r
|
ui.R
|
###########################################
## Title: Data Visualization Final Project
## Authors: Sam Anderson, Sydney Paul, Kenneth Shelley
## Date Created: 12/6/2019
##
## Description:
## - ui.R file
## - Community guide to the South Bend area
###########################################
# Load necessary libraries
library(shiny)
library(shinyWidgets)
library(plotly)
library(shinythemes)
library(leaflet)
library(leaflet.extras)
library(ggthemes)
library(rgdal)
library(data.table)
library(tidyverse)
navbarPage(
theme = shinytheme("flatly"),
title = "South Bend Community Guide",
id = "navbar",
# ---------------------------------------- Welcome Tab ---------------------------------------- #
# Author: Sydney
tabPanel(
title = "Introduction",
value = "introduction_tab",
img(src = "background_collage_wide.png", width = '100%'),
br(),
br(),
h1("The City of South Bend"),
h4("Sam Anderson, Sydney Paul, Kenneth Shelley"),
h4("December 15th, 2019"),
br(),
br(),
fluidRow(
column(width = 3,
h4("Purpose:"),
tags$ul(
tags$li("Explore South Bend's public facililites, public parks, and businesses."),
tags$li("See how different datasets can aggregated together to get a better understanding of the city as a whole."),
)
),
column(width = 4,
h4("Data Source: City of South Bend's Open Data Portal"),
tags$ul(
tags$li("Business licenses"),
tags$li("Locations of street lights"),
tags$li("Parks"),
tags$li("Public facilities"),
tags$li("City Council districts")
)
)
)
), # end of welcome tab
# ------------------------------------ Public Services Tab ------------------------------------ #
# Author: Sam
tabPanel(
title = "Fun with Facilities in the Bend",
value = "public_services_tab",
titlePanel("Facilities"),
fluidPage(
fluidRow(
column(2,
checkboxGroupInput("variable1",
"Facilities",
choiceNames = c("FIRE STATION", "LIBRARY", "POLICE STATION"),
choiceValues = c("FIRE STATION", "LIBRARY", "POLICE STATION"),
selected = "FIRE STATION"),
selectInput("variable2",
"District",
choices = c('1301', '1302', '1302', '1304', '1305', '1306'),
selected = '1301')
)
),
column(width = 10, offset =2,
leafletOutput("map2"))
)
), # end of public services tab
# ----------------------------------- Public Facilities Tab ----------------------------------- #
# Author: Ken
tabPanel(
title = "Parks of South Bend",
value = "public_facilities_tab",
titlePanel("Parks"),
fluidPage(
fluidRow(
column(2,
selectInput("variable",
"Districts",
choices = c('1301', '1302', '1302', '1304', '1305', '1306'),
selected = "1301"
)
),
# mainPanel -------------------------
column(width = 10, offset = 2,
# Map of parks
leafletOutput("map")
) # end of sidebarLayout
) # end of page with sidebar + map
),
fluidRow(
column(2,
"Park Names"
),
column(10,
tableOutput("plot")
)
)
), # end of public facilities tab
# ---------------------------------------- Lights Tab ----------------------------------------- #
# Author: Sydney
tabPanel(
title = "Downtown is lit!",
value = "lights_tab",
fluidRow(
column(width = 6,
h4("As we enter winter, days are growing shorter,
but this shouldn't stop you from enjoying everything that downtown has to offer!
South Bend is very well-lit, but don't just take our word for it.
Use the tool below to see for yourself."),
h4("Either select a category of businesses, or search for a specific one that you have in mind.
Then flip the switch and just see how lit that area is!")
)
),
fluidRow(
column(width = 3,
h4("You may compare up to three categories or individual businesses.
Hover over a location to see it's name, or click it to see its name and address.")
),
column(width = 9,
br(),
br(),
br(),
materialSwitch(inputId = "light_switch",
label = span(style = "font-size: 24px", "Turn on/off the sun"),
status = "primary",
value = TRUE,
width = "100%")
),
),
hr(),
sidebarLayout(
sidebarPanel(width = 3,
radioButtons(inputId = "type_of_filter",
label = "I want to select a:",
choices = list("category of businesses" = "category",
"business by name" = "name"),
selected = "category",
width = '100%'),
br(),
selectizeInput(inputId = 'user_selected_names',
label = "Available options:",
choices = NULL,
multiple = TRUE,
width = '100%'),
br(),
checkboxInput(inputId = "time_checkbox",
label = "Check this box to go back in time.",
value = FALSE,
width = '100%'),
uiOutput("slider_placeholder")
), # end of sidebarPanel
mainPanel(column(width = 12, offset = 0,
leafletOutput("downtown_map", height = 500))
) # end of mainPanel
), # end of sidebarLayout
) # end of the lights tab
) # end of navbarPage
|
2dd427bfe75e51b2b9a7fc5c1b02204954b1b616
|
abd0439e4334255e89858b6ba0298254b24e3f00
|
/類別變數預測.R
|
9836650f62ebc1e384f67f3f5f3cebfcc4294422
|
[] |
no_license
|
vvsy/DM_Project4
|
c557eb12f6a4e5413b978762e85f5d791d1dc2eb
|
67fceb221f8ef0e66a025478c9c1e0086e6eed58
|
refs/heads/master
| 2020-04-13T22:38:13.798498
| 2019-01-16T19:03:14
| 2019-01-16T19:03:14
| 163,484,193
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,178
|
r
|
類別變數預測.R
|
file = "/Users/wongshnyau/Dropbox/mice.data.Rda"
load(file)
library(caret)
library(tidyr)
library(dplyr)
library(pROC)
micedf %>% select(-id) -> micedf
as.numeric(micedf$cno) -> micedf$cno
#splite data to train and test
trainIndices = createDataPartition(micedf$dmfail, p=0.7, list=F)
df_train = micedf %>%
slice(trainIndices)
df_test = micedf %>%
slice(-trainIndices)
#set cross-validation
cv_opts10 = trainControl(method= 'cv', number = 10)
#predict
## logistic regression
results_regreg = train(dmfail~.,
data=df_train,
method = "glm",
trControl = cv_opts10,
family=binomial())
save(results_regreg,file="~/Desktop/results_regreg.Rda")
regreg_pred<-predict(results_regreg,df_test)
confusionMatrix(regreg_pred,df_test$dmfail,positive = '1') -> cfm_regreg
save(cfm_regreg,file="~/Desktop/cfm_regreg.Rda")
regreg_pred<-predict(results_regreg,df_test,type="prob")
roc(df_test$dmfail,regreg_pred$`1`) -> roc_regreg
save(roc_regreg,file="~/Desktop/roc_regreg.Rda")
## logistic rigid
regreg_opts = expand.grid(.alpha = 0,
.lambda = seq(.1, .5, length = 5))
results_rigid = train(dmfail~.,
data=df_train,
method = "glmnet",
trControl = cv_opts10,
tuneGrid = regreg_opts)
save(results_rigid,file="~/Desktop/results_rigid.Rda")
rigid_pred<-predict(results_rigid,df_test)
confusionMatrix(rigid_pred,df_test$dmfail,positive = '1') -> cfm_rigid
save(cfm_rigid,file="~/Desktop/cfm_rigid.Rda")
rigid_pred<-predict(results_rigid,df_test,type="prob")
roc(df_test$dmfail,rigid_pred$`1`) -> roc_rigid
save(roc_rigid,file="~/Desktop/roc_rigid.Rda")
## logistic lasso
regreg_opts = expand.grid(.alpha = 1,
.lambda = seq(.1, .5, length = 5))
results_lasso = train(dmfail~.,
data=df_train,
method = "glmnet",
trControl = cv_opts10,
tuneGrid = regreg_opts)
save(results_lasso,file="~/Desktop/results_lasso.Rda")
lasso_pred<-predict(results_lasso,df_test)
confusionMatrix(lasso_pred,df_test$dmfail,positive = '1') -> cfm_lasso
save(cfm_lasso,file="~/Desktop/cfm_lasso.Rda")
lasso_pred<-predict(results_lasso,df_test,type="prob")
roc(df_test$dmfail,lasso_pred$`1`) -> roc_lasso
save(roc_lasso,file="~/Desktop/roc_lasso.Rda")
## logistic GAM
results_GAM = train(dmfail~.,
data=df_train,
method = "gam",
trControl = cv_opts10)
save(results_GAM,file="~/Desktop/results_GAM.Rda")
GAM_pred<-predict(results_GAM,df_test)
confusionMatrix(GAM_pred,df_test$dmfail,positive = '1') -> cfm_GAM
save(cfm_GAM,file="~/Desktop/cfm_GAM.Rda")
GAM_pred<-predict(results_GAM,df_test,type="prob")
roc(df_test$dmfail,GAM_pred$`1`) -> roc_GAM
save(roc_GAM,file="~/Desktop/roc_GAM.Rda")
## Decision Tree
results_dtree = train(dmfail~.,
data = df_train,
method = 'rpart',
trControl=cv_opts10)
save(results_dtree,file="~/Desktop/results_dtree.Rda")
dtree_pred<-predict(results_dtree,df_test)
confusionMatrix(dtree_pred,df_test$dmfail,positive = '1') -> cfm_dtree
save(cfm_dtree,file="~/Desktop/cfm_dtree.Rda")
dtree_pred<-predict(results_dtree,df_test,type="prob")
roc(df_test$dmfail,dtree_pred$`1`) -> roc_dtree
save(roc_dtree,file="~/Desktop/roc_dtree.Rda")
## k-Nearest Neighbor Classification
knn_opts = data.frame(k=c(seq(3, 11, 2)))
results_knn = train(dmfail~.,
data=df_train,
method='knn',
trControl=cv_opts10,
tuneGrid = knn_opts)
save(results_knn,file="~/Desktop/results_knn.Rda")
knn_pred<-predict(results_knn,df_test)
confusionMatrix(knn_pred,df_test$dmfail,positive = '1') -> cfm_knn
save(cfm_knn,file="~/Desktop/cfm_knn.Rda")
knn_pred<-predict(results_knn,df_test,type="prob")
roc(df_test$dmfail,knn_pred$`1`) -> roc_knn
save(roc_knn,file="~/Desktop/roc_knn.Rda")
## Random Forest
results_rf = train(dmfail~.,
data = df_train,
method = 'rf',
ntree=100,
trControl=cv_opts10)
save(results_rf,file="~/Desktop/results_rf.Rda")
rf_pred<-predict(results_rf,df_test)
confusionMatrix(rf_pred,df_test$dmfail,positive = '1') -> cfm_rf
save(cfm_rf,file="~/Desktop/cfm_rf.Rda")
rf_pred<-predict(results_rf,df_test,type="prob")
roc(df_test$dmfail,rf_pred$`1`) -> roc_rf
save(roc_rf,file="~/Desktop/roc_rf.Rda")
## Neural Networks
results_nnet = train(dmfail~.,
data = df_train,
method = 'avNNet',
trControl=cv_opts10,
trace=FALSE)
save(results_nnet,file="~/Desktop/results_nnet.Rda")
nnet_pred<-predict(results_nnet,df_test)
confusionMatrix(nnet_pred,df_test$dmfail,positive = '1') -> cfm_nnet
save(cfm_nnet,file="~/Desktop/cfm_nnet.Rda")
nnet_pred<-predict(results_nnet,df_test,type="prob")
roc(df_test$dmfail,nnet_pred$`1`) -> roc_nnet
save(roc_nnet,file="~/Desktop/roc_nnet.Rda")
## Bagging
results_bagg = train(dmfail~.,
data = df_train,
method = 'treebag',
trControl=cv_opts10)
save(results_bagg,file="~/Desktop/results_bagg.Rda")
bagg_pred<-predict(results_bagg,df_test)
confusionMatrix(bagg_pred,df_test$dmfail,positive = '1') -> cfm_bagg
save(cfm_bagg,file="~/Desktop/cfm_bagg.Rda")
bagg_pred<-predict(results_bagg,df_test,type="prob")
roc(df_test$dmfail,bagg_pred$`1`) -> roc_bagg
save(roc_bagg,file="~/Desktop/roc_bagg.Rda")
### test bagging model
testfile = "/Users/wongshnyau/Dropbox/mice.testdf.Rda"
load(testfile)
bagg_pred<-predict(results_bagg,micetestdf)
bagg_pred
bagg_prob<-predict(results_bagg,micetestdf,type="prob")
bagg_prob
### fill ANS!
ans <- read.csv("/Users/wongshnyau/Dropbox/dmclass2018predict.csv")
ans$dmfailpredyesno <- bagg_pred
ans$dmfailpredprob <- bagg_prob$`1`
save(ans,file="~/Dropbox/710661118dmclass2018predict.csv.Rda")
|
c7bd927f7cd3ec4f1b5a7fb4d1c71fd4a33c468b
|
add550bc664a83f3ec02597d0d3116d38cb6e774
|
/man/Q.RW1.Rd
|
12c556f3a6dac778db42bf52767cf6e167443c0c
|
[] |
no_license
|
nmmarquez/ar.matrix
|
f39f61eac1761b8ea42b26eaacb4a89d80e45a65
|
c8f75efb08d5ea926aee5817cce22a940f03efa0
|
refs/heads/master
| 2021-01-11T18:04:46.181292
| 2019-02-16T22:49:58
| 2019-02-16T22:49:58
| 79,485,817
| 7
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,278
|
rd
|
Q.RW1.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Q.RW1.R, R/r.RW1.R
\name{Q.RW1}
\alias{Q.RW1}
\alias{r.RW1}
\title{Precision matrix for an RW1 process}
\usage{
Q.RW1(M, sigma, sparse=TRUE)
r.RW1(n, M, sigma)
}
\arguments{
\item{M}{int > 0, number of elements in the RW1 process.}
\item{sigma}{float > 0, pairwise observation standard deviation.}
\item{sparse}{bool Should the matrix be of class 'dsCMatrix'}
\item{n}{int > 0, number of observations to simulate from the GMRF.}
}
\value{
Q.RW1 returns a precision matrix with a RW1 structure.
r.RW1 retrurns a matrix with n rows which are the n observations of an
Intrinsic Gaussian Markov random field RW1 process.
}
\description{
Functions for creating precision matricies and observations of
an RW1 process
}
\examples{
require("ggplot2")
# simulate RW1 GMRF
obs <- r.RW1(100, M=30, sigma=1)
# resulting matrix is n x M
dim(obs)
# subtract off the first time point to more easily observe correlation
obs_adj <- obs - obs[,1]
# move objects to a data frame
rw1_df <- data.frame(obs=c(t(obs_adj)), realization=rep(1:100, each=30),
time=rep(1:30, 100))
# plot each realization
ggplot(data=rw1_df, aes(time, obs, group=realization, color=realization)) +
geom_line()
}
|
e6e9d4a9c9bfd1c27262890990885cd995805bda
|
1be1c1a52dcdc8b63e19b430f4fba64f8403b7b8
|
/man/get_PHDistSubtreeCorrelation.Rd
|
ec3b711857de4a88bc5de4d2cb3fe15330938f98
|
[] |
no_license
|
JanEngelstaedter/cophy
|
5af999cd1aed5261a1ab5052e1de3348698e146c
|
81853cd5b56502e6f9ab5595606feeb8a37b9bb4
|
refs/heads/master
| 2023-08-16T20:28:45.084296
| 2023-08-14T06:18:45
| 2023-08-14T06:18:45
| 134,203,070
| 3
| 3
| null | 2021-02-18T06:29:43
| 2018-05-21T01:36:30
|
R
|
UTF-8
|
R
| false
| true
| 815
|
rd
|
get_PHDistSubtreeCorrelation.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/stats.R
\name{get_PHDistSubtreeCorrelation}
\alias{get_PHDistSubtreeCorrelation}
\title{Correlation between pairwise distances of associated hosts and parasites by
host subtree.}
\usage{
get_PHDistSubtreeCorrelation(cophy, h = NULL, k = NULL)
}
\arguments{
\item{cophy}{a cophylogeny (object of class "cophylogeny") containing one
host and one parasite tree.}
\item{h}{numeric scalar or vector with heights where the tree should be cut.}
\item{k}{an integer scalar or vector with the desired number of groups}
}
\description{
Calculating the correlation between the distance matrixes of parasites and
their associated hosts within subtrees specified by particular height.
Requires at least three living parasites.
}
\keyword{internal}
|
877798c5831db60c514df95bb323e5b65be2e8cb
|
f65baf1d2762504d6d341d1a14466c0d519f697c
|
/R/ror.R
|
f527856277c878972503e5650ebc1904b22d6601
|
[] |
no_license
|
gchung05/mdsstat
|
da9815609491cbe391dca971100c504a1714bb02
|
199e4355a3283a8d11ec4b73cd308a89b18553c8
|
refs/heads/master
| 2021-06-02T23:10:44.804219
| 2020-03-09T01:38:34
| 2020-03-09T01:38:34
| 132,389,664
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,954
|
r
|
ror.R
|
#' Reporting Odds Ratio
#'
#' Test on device-events using the reporting odds ratio (ROR). From
#' the family of disproportionality analyses (DPA) used to generate signals of
#' disproportionate reporting (SDRs).
#'
#' For parameter \code{ts_event}, in the uncommon case where the
#' device-event count (Cell A) variable is not \code{"nA"}, the name of the
#' variable may be specified here. Note that the remaining 3 cells of the 2x2
#' contingency table (Cells B, C, D) must be the variables \code{"nB"},
#' \code{"nC"}, and \code{"nD"} respectively in \code{df}. A named character
#' vector may be used where the name is the English description of what was
#' analyzed. Note that if the parameter \code{analysis_of} is specified, it will
#' override this name. Example: \code{ts_event=c("Count of Bone Cement
#' Leakages"="event_count")}
#'
#' @param df Required input data frame of class \code{mds_ts} or, for generic
#' usage, any data frame with the following columns:
#' \describe{
#' \item{time}{Unique times of class \code{Date}}
#' \item{nA}{Cell A count (class \code{numeric}) of the 2x2 table:
#' device/event of interest.}
#' \item{nB}{Cell B count (class \code{numeric}) of the 2x2 table:
#' device/non-event of interest.}
#' \item{nC}{Cell C count (class \code{numeric}) of the 2x2 table:
#' non-device/event of interest.}
#' \item{nD}{Cell D count (class \code{numeric}) of the 2x2 table:
#' non-device/non-event of interest.}
#' }
#' @param ts_event Required if \code{df} is of class \code{mds_ts}. Named string
#' indicating the variable corresponding to the event count (cell A in the 2x2
#' contingency table). In most cases, the default is the appropriate setting.
#' See details for alternative options.
#'
#' Default: \code{c("Count"="nA")} corresponding to the event count column in
#' \code{mds_ts} objects. Name is generated from \code{mds_ts} metadata.
#'
#' @param analysis_of Optional string indicating the English description of what
#' was analyzed. If specified, this will override the name of the
#' \code{ts_event} string parameter.
#'
#' Default: \code{NA} indicates no English description for plain \code{df}
#' data frames, or \code{ts_event} English description for \code{df} data frames
#' of class \code{mds_ts}.
#'
#' Example: \code{"Count of bone cement leakages"}
#'
#' @param eval_period Required positive integer indicating the number of unique
#' times counting in reverse chronological order to sum over to create the 2x2
#' contingency table.
#'
#' Default: \code{1} considers only the most recent time in \code{df}.
#'
#' Example: \code{12} sums over the last 12 time periods to create the 2x2
#' contingency table.
#'
#' @param null_ratio Numeric ROR value representing the null hypothesis, used
#' with \code{alpha} to establish the signal status and the p-value.
#'
#' Default: \code{1} indicates a null hypothesis of ROR=1 and tests if the
#' actual ROR is greater than 1.
#'
#' @param alpha Numeric value representing the statistical alpha used to
#' establish the signal status.
#'
#' Default: \code{0.05} corresponds to the standard alpha value of 5\%.
#'
#' @param cont_adj Numeric value 0 or greater representing the continuity
#' adjustment to be added to each cell of the 2x2 contingency table. A value
#' greater than 0 allows for contingency tables with 0 cells to run the
#' algorithm. A typical non-zero value is 0.5.
#'
#' Default: \code{0} adds zero to each cell, thus an unadjusted table. If any
#' cell of the 2x2 is 0, the algorithm will not run.
#'
#' @param ... Further arguments passed onto \code{ror} methods
#'
#' @return A named list of class \code{mdsstat_test} object, as follows:
#' \describe{
#' \item{test_name}{Name of the test run}
#' \item{analysis_of}{English description of what was analyzed}
#' \item{status}{Named boolean of whether the test was run. The name contains
#' the run status.}
#' \item{result}{A standardized list of test run results: \code{statistic}
#' for the test statistic, \code{lcl} and \code{ucl} for the set
#' confidence bounds, \code{p} for the p-value, \code{signal} status, and
#' \code{signal_threshold}.}
#' \item{params}{The test parameters}
#' \item{data}{The data on which the test was run}
#' }
#'
#' @examples
#' # Basic Example
#' data <- data.frame(time=c(1:25),
#' nA=as.integer(stats::rnorm(25, 25, 5)),
#' nB=as.integer(stats::rnorm(25, 50, 5)),
#' nC=as.integer(stats::rnorm(25, 100, 25)),
#' nD=as.integer(stats::rnorm(25, 200, 25)))
#' a1 <- ror(data)
#' # Example using an mds_ts object
#' a2 <- ror(mds_ts[[3]])
#'
#' @references
#' Stricker BH, Tijssen JG. Serum sickness-like reactions to cefaclor. J Clin Epidemiol. 1992;45(10):1177-84.
#'
#' Bohm R, Klein H.-J. (v2018-10-16). Primer on Disportionality Analysis. OpenVigil http://openvigil.sourcefourge.net/doc/DPA.pdf
#'
#' @export
ror <- function (df, ...) {
UseMethod("ror", df)
}
#' @describeIn ror ROR on mds_ts data
#' @export
ror.mds_ts <- function(
df,
ts_event=c("Count"="nA"),
analysis_of=NA,
...
){
input_param_checker(ts_event, check_names=df, max_length=1)
if (is.null(names(ts_event))) stop("ts_event must be named")
# Set NA counts to 0 for "nA" default
df$nA <- ifelse(is.na(df$nA), 0, df$nA)
# Set analysis_of
if (is.na(analysis_of)){
name <- paste(names(ts_event), "of",
attributes(df)$dpa_detail$nA)
} else name <- analysis_of
if (attributes(df)$dpa){
out <- data.frame(time=df$time,
nA=df[[ts_event]],
nB=df$nB,
nC=df$nC,
nD=df$nD, stringsAsFactors=T)
} else{
stop("Input mds_ts df does not contain data for disproportionality analysis.")
}
ror.default(out, analysis_of=name, ...)
}
#' @describeIn ror ROR on general data
#' @export
ror.default <- function(
df,
analysis_of=NA,
eval_period=1,
null_ratio=1,
alpha=0.05,
cont_adj=0,
...
){
# Contingency table primary variables
c2x2 <- c("nA", "nB", "nC", "nD")
input_param_checker(df, "data.frame")
input_param_checker(c("time", c2x2), check_names=df)
input_param_checker(null_ratio, "numeric")
input_param_checker(alpha, "numeric")
input_param_checker(cont_adj, "numeric")
input_param_checker(eval_period, "numeric", null_ok=F, max_length=1)
if (eval_period %% 1 != 0) stop("eval_period must be an integer")
if (null_ratio < 1) stop("null_ratio must be 1 or greater")
if (alpha <= 0 | alpha >= 1) stop("alpha must be in range (0, 1)")
if (cont_adj < 0) stop("cont_adj must be 0 or greater")
# Order by time
df <- df[order(df$time), ]
# Restrict to eval_period
if (!is.null(eval_period)){
if (eval_period > nrow(df)){
stop("eval_period cannot be greater than df rows")
} else if (eval_period < 1){
stop("eval_period must be greater than 0")
} else{
df <- df[c((nrow(df) - eval_period + 1):nrow(df)), ]
# Sum over eval_period
timeRange <- range(df$time)
df <- cbind(data.frame(time_start=timeRange[1],
time_end=timeRange[2], stringsAsFactors=T),
data.frame(t(colSums(df[, c2x2], na.rm=T)),
stringsAsFactors=T))
# Apply continuity adjustment
df[, c2x2] <- df[, c2x2] + cont_adj
}
}
# Return data
tlen <- nrow(df)
rd <- list(reference_time=timeRange,
data=df)
# Check for non-runnable conditions
hyp <- "Not run"
if(any(df[, c2x2] == 0)){
rr <- NA
rs <- stats::setNames(F, "contingency table has zero counts")
} else{
# If all conditions are met, run ROR test
# Calculate ROR
stat <- (df$nA / df$nB) / (df$nC / df$nD)
s <- sqrt((1 / df$nA) + (1 / df$nB) + (1 / df$nC) + (1 / df$nD))
# Establish confidence limits
z <- stats::qnorm(1 - (alpha / 2))
cl <- c(exp(log(stat) - z * s), exp(log(stat) + z * s))
p <- min(stats::pnorm((log(null_ratio) - log(stat)) / s) * 2, 1)
# Determine signal & hypothesis
sig <- p <= alpha
hyp <- paste0("Two-sided test at alpha=", alpha, " of ROR > ", null_ratio)
rr <- list(statistic=stats::setNames(stat, "ROR"),
lcl=cl[1],
ucl=cl[2],
p=p,
signal=sig,
signal_threshold=stats::setNames(alpha, "critical p-value"),
sigma=exp(s))
rs <- stats::setNames(T, "Success")
}
# Return test
out <- list(test_name="Reporting Odds Ratio",
analysis_of=analysis_of,
status=rs,
result=rr,
params=list(test_hyp=hyp,
eval_period=eval_period,
null_ratio=null_ratio,
alpha=alpha,
cont_adj=cont_adj),
data=rd)
class(out) <- append(class(out), "mdsstat_test")
return(out)
}
|
d0995abb96169ca87ab8b1199086a1bec891a77b
|
fc7336e0c1114304ef0eca71527f362a61641125
|
/R/summary.R
|
8d7986ae8847b4467178503d023c0a5043c931e5
|
[] |
no_license
|
anne-laureferchaud/stackr
|
ce8df87f6a572ca787c9f7aa94fa00b84f0839bd
|
3b88196acbd43cecb940db89e88934a6727850fe
|
refs/heads/master
| 2020-12-24T12:13:50.244896
| 2016-03-03T20:34:52
| 2016-03-03T20:34:52
| 42,137,160
| 0
| 0
| null | 2015-09-08T20:26:25
| 2015-09-08T20:26:25
| null |
UTF-8
|
R
| false
| false
| 41,024
|
r
|
summary.R
|
## Summary and tables
#' @title Haplotypes file summary v2
#' @description STACKS batch_x.haplotypes.tsv file summary.
#' Output summary table for populations with putative paralogs,
#' consensus, monomorphic and polymorphic loci. Detected paralogs are removed while estimating the subsequent statistics.
#' The haplotypes statistics for the observed and expected homozygosity and
#' heterozygosity. Wright’s inbreeding coefficient (Fis), and a proxy measure of
#' the realized proportion of the genome that is identical by descent (IBDG),
#' the FH measure based on the excess in the observed number of homozygous
#' genotypes within an individual relative to the mean number of homozygous
#' genotypes expected under random mating
#' (Keller et al., 2011; Kardos et al., 2015).
#' The nucleotide diversity (Pi) is also given. Pi measured here consider the
#' consensus loci in the catalog (no variation between population sequences).
#' @param haplotypes.file The 'batch_x.haplotypes.tsv' created by STACKS.
#' @param whitelist.loci (optional) A whitelist of loci and
#' a column header 'LOCUS'.
#' The whitelist is in the directory (e.g. "whitelist.txt").
#' @param blacklist.id (optional) A blacklist with individual ID and
#' a column header 'INDIVIDUALS'. The blacklist is in the directory
#' (e.g. "blacklist.txt").
#' @param pop.id.start The start of your population id
#' in the name of your individual sample.
#' @param pop.id.end The end of your population id
#' in the name of your individual sample.
#' @param pop.levels An optional character string with your populations ordered.
#' @param read.length The length in nucleotide of your reads (e.g. 80 or 100).
#' @import stringdist
#' @return The function returns a list with the summary, the paralogs and
#' consensus loci by populations and unique loci and 3 plots (use $ to access each
#' components).
#' Write 3 files in the working directory:
#' blacklist of unique putative paralogs and unique consensus loci
#' and a summary of the haplotypes file by population.
#' @details If the object for the function is 'haplotype.file.summary' then:
#'
#' haplo.summary <- haplotype.file.summary$summary
#'
#' paralogs.pop <- haplotype.file.summary$paralogs.pop
#'
#' paralogs.loci <- haplotype.file.summary$paralogs.loci
#'
#' consensus.pop <- haplotype.file.summary$consensus.pop
#'
#' consensus.loci <- haplotype.file.summary$consensus.loci
#'
#' scatter.plot <- haplotype.file.summary$scatter.plot
#'
#' boxplot.pi <- haplotype.file.summary$boxplot.pi
#'
#' boxplot.fh <- haplotype.file.summary$boxplot.fh
#' @rdname summary_haplotypes_v2
#' @export
#' @references Keller MC, Visscher PM, Goddard ME (2011)
#' Quantification of inbreeding due to distant ancestors and its detection
#' using dense single nucleotide polymorphism data. Genetics, 189, 237–249.
#' @references Kardos M, Luikart G, Allendorf FW (2015)
#' Measuring individual inbreeding in the age of genomics: marker-based
#' measures are better than pedigrees. Heredity, 115, 63–72.
#' @references Nei M, Li WH (1979)
#' Mathematical model for studying genetic variation in terms of
#' restriction endonucleases.
#' Proceedings of the National Academy of Sciences of
#' the United States of America, 76, 5269–5273.
#' @author Thierry Gosselin \email{thierrygosselin@@icloud.com} and
#' Anne-Laure Ferchaud \email{annelaureferchaud@@gmail.com}
summary_haplotypes <- function(haplotypes.file,
whitelist.loci = NULL,
blacklist.id = NULL,
pop.id.start,
pop.id.end,
pop.levels,
read.length) {
POP_ID <- NULL
POLYMORPHISM <- NULL
POLYMORPHISM_MAX <- NULL
PARALOGS <- NULL
CONSENSUS <- NULL
CONSENSUS_MAX <- NULL
ALLELES_COUNT <- NULL
ALLELES_COUNT_SUM <- NULL
POP_LEVEL_POLYMORPHISM <- NULL
MONOMORPHIC <- NULL
POLYMORPHIC <- NULL
TOTAL <- NULL
IND_LEVEL_POLYMORPHISM <- NULL
N_GENOT <- NULL
ALLELE_GROUP <- NULL
ALLELES <- NULL
HOM_O <- NULL
HOM_E <- NULL
HET_O <- NULL
HET_E <- NULL
FH <- NULL
PI <- NULL
HOM <- NULL
HET <- NULL
DIPLO <- NULL
FREQ_ALLELES <- NULL
# Import haplotype file ------------------------------------------------------
haplotype <- read_tsv(haplotypes.file, col_names = T) %>%
rename(LOCUS =`Catalog ID`) %>%
gather(INDIVIDUALS, HAPLOTYPES, -c(LOCUS, Cnt)) %>%
mutate(
POP_ID = str_sub(INDIVIDUALS, pop.id.start, pop.id.end),
POP_ID = factor(POP_ID, levels = pop.levels, ordered = T)
)
# Whitelist loci -------------------------------------------------------------
if (missing(whitelist.loci) == "FALSE" & is.vector(whitelist.loci) == "TRUE") {
message("Whitelist of loci: from the directory")
whitelist <- read_tsv(whitelist.loci, col_names = T)
} else if (missing(whitelist.loci) == "FALSE" & is.vector(whitelist.loci) == "FALSE") {
message("Whitelist of loci: from your global environment")
whitelist <- whitelist.loci
} else {
message("Whitelist of loci: no")
whitelist <- NULL
}
# Blacklisted individuals ----------------------------------------------------
if (missing(blacklist.id) == "FALSE" & is.vector(blacklist.id) == "TRUE") {
message("Blacklisted id: file from the directory")
blacklist.id <- read_tsv(blacklist.id, col_names = T)
} else if (missing(blacklist.id) == "FALSE" & is.vector(blacklist.id) == "FALSE") {
message("Blacklisted id: object from your global environment")
blacklist.id <- blacklist.id
} else {
message("Blacklisted id: no")
blacklist.id <- NULL
}
if (is.null(whitelist.loci) == TRUE & is.null(blacklist.id) == TRUE) {
# Combination 1: No whitelist and No blacklist -----------------------------
haplotype <- haplotype
} else if (is.null(whitelist.loci) == FALSE & is.null(blacklist.id) == TRUE) {
# Combination 2: Using whitelist, but No blacklist -------------------------
haplotype <- haplotype %>%
semi_join(whitelist, by = "LOCUS") %>%
arrange(LOCUS)
} else if (is.null(whitelist.loci) == TRUE & is.null(blacklist.id) == FALSE) {
# Combination 3: Using a blacklist of id, but No whitelist -----------------
haplotype <- haplotype %>%
mutate(INDIVIDUALS = as.character(INDIVIDUALS)) %>%
anti_join(blacklist.id, by = "INDIVIDUALS") %>%
arrange(LOCUS)
} else {
# Combination 4: Using a whitelist and blacklist---------------------------
haplotype <- haplotype %>%
semi_join(whitelist, by = "LOCUS") %>%
mutate(INDIVIDUALS = as.character(INDIVIDUALS)) %>%
anti_join(blacklist.id, by = "INDIVIDUALS") %>%
arrange(LOCUS)
}
# dump unused object
whitelist <- NULL
blacklist.id <- NULL
# Paralogs... Locus with > 2 alleles by individuals --------------------------
# Create a blacklist of catalog loci with paralogs
message("Looking for paralogs...")
paralogs.pop <- haplotype %>%
mutate(POLYMORPHISM = stri_count_fixed(HAPLOTYPES, "/")) %>%
group_by(LOCUS, POP_ID) %>%
summarise(POLYMORPHISM_MAX = max(POLYMORPHISM)) %>%
filter(POLYMORPHISM_MAX > 1) %>%
mutate(PARALOGS = rep("paralogs", times = n())) %>%
select(LOCUS, POP_ID, PARALOGS)
blacklist.loci.paralogs <- paralogs.pop %>%
group_by(LOCUS) %>%
select (LOCUS) %>%
distinct(LOCUS) %>%
arrange(LOCUS)
write.table(blacklist.loci.paralogs,
"blacklist.loci.paralogs.txt",
sep = "\t", row.names = F, col.names = T, quote = F)
paralogs.pop
# Haplo filtered paralogs
haplo.filtered.paralogs <- haplotype %>%
filter(!LOCUS %in% blacklist.loci.paralogs$LOCUS)
# Locus with concensus alleles-----------------------------------------------
message("Looking for consensus...")
consensus.pop <- haplotype %>%
mutate(CONSENSUS = stri_count_fixed(HAPLOTYPES, "consensus")) %>%
group_by(LOCUS, POP_ID) %>%
summarise(CONSENSUS_MAX = max(CONSENSUS)) %>%
filter(CONSENSUS_MAX > 0) %>%
mutate(CONSENSUS = rep("consensus", times = n())) %>%
select(LOCUS, POP_ID, CONSENSUS)
blacklist.loci.consensus <- consensus.pop %>%
group_by(LOCUS) %>%
select (LOCUS) %>%
distinct(LOCUS) %>%
arrange(LOCUS)
write.table(blacklist.loci.consensus,
"blacklist.loci.consensus.txt",
sep = "\t", row.names = F, col.names = T, quote = F)
consensus.pop
# Haplo filtered for consensus
haplo.filtered.consensus <- haplotype %>%
filter(!LOCUS %in% consensus.pop$LOCUS)
# Haplo filtered for consensus and paralogs
haplo.filtered.consensus.paralogs <- haplotype %>%
filter(!LOCUS %in% consensus.pop$LOCUS)%>%
filter(!LOCUS %in% blacklist.loci.paralogs$LOCUS)
# Summary dataframe by individual---------------------------------------------
message("Genome-Wide Identity-By-Descent calculations (FH)...")
summary.ind <- haplo.filtered.consensus.paralogs %>%
mutate(ALLELES_COUNT = stri_count_fixed(HAPLOTYPES, "/")) %>%
mutate(
IND_LEVEL_POLYMORPHISM = ifelse(HAPLOTYPES == "-", "missing",
ifelse(ALLELES_COUNT == 0 & HAPLOTYPES != "-", "hom", "het"))
) %>%
group_by(INDIVIDUALS) %>%
summarise(
HOM = length(IND_LEVEL_POLYMORPHISM[IND_LEVEL_POLYMORPHISM == "hom"]),
HET = length(IND_LEVEL_POLYMORPHISM[IND_LEVEL_POLYMORPHISM == "het"]),
MISSING = length(IND_LEVEL_POLYMORPHISM[IND_LEVEL_POLYMORPHISM == "missing"]),
N_GENOT = HOM + HET,
HOM_O = HOM/N_GENOT,
HET_O = HET/N_GENOT
) %>%
mutate(POP_ID = factor(str_sub(INDIVIDUALS, pop.id.start, pop.id.end),
levels = pop.levels, ordered = T)) %>%
arrange(POP_ID, INDIVIDUALS)
freq.alleles.loci.pop <- haplo.filtered.consensus.paralogs %>%
filter(HAPLOTYPES != "-") %>%
group_by(LOCUS, POP_ID) %>%
mutate(DIPLO= length(INDIVIDUALS) *2) %>%
separate(
col = HAPLOTYPES, into = c("ALLELE1", "ALLELE2"),
sep = "/", extra = "drop", remove = F
) %>%
mutate(ALLELE2 = ifelse(is.na(ALLELE2), ALLELE1, ALLELE2)) %>%
select(-Cnt, -HAPLOTYPES, -INDIVIDUALS) %>%
gather(ALLELE_GROUP, ALLELES, -c(LOCUS, POP_ID, DIPLO)) %>%
group_by(LOCUS, POP_ID, ALLELES) %>%
summarise(
FREQ_ALLELES = length(ALLELES)/mean(DIPLO),
HOM_E = FREQ_ALLELES * FREQ_ALLELES
) %>%
select(-FREQ_ALLELES)
freq.loci.pop<- freq.alleles.loci.pop %>%
group_by(LOCUS, POP_ID) %>%
summarise(HOM_E = sum(HOM_E))
freq.pop <- freq.loci.pop %>%
group_by(POP_ID) %>%
summarise(HOM_E = mean(HOM_E))
# IBDg with FH ---------------------------------------------------------------
fh.i <- summary.ind %>%
full_join(freq.pop, by = "POP_ID") %>%
mutate(FH = ((HOM_O - HOM_E)/(N_GENOT - HOM_E)))
fh.pop <- fh.i %>%
group_by(POP_ID) %>%
summarise(
HOM_O = round(mean(HOM_O), 6),
HOM_E = round(mean(HOM_E), 6),
HET_O = round(mean(1-HOM_O), 6),
HET_E = round(mean(1-HOM_E), 6),
FIS = ifelse(HET_O == 0, 0, round (((HET_E - HET_O) / HET_E), 6)),
FH = mean(FH)
)
fh.tot <- fh.i %>%
summarise(
HOM_O = round(mean(HOM_O), 6),
HOM_E = round(mean(HOM_E), 6),
HET_O = round(mean(1-HOM_O), 6),
HET_E = round(mean(1-HOM_E), 6),
FIS = ifelse(HET_O == 0, 0, round (((HET_E - HET_O) / HET_E), 6)),
FH = mean(FH)
)
fh.tot <- data_frame(POP_ID="OVERALL") %>%
bind_cols(fh.tot)
fh.res <- bind_rows(fh.pop, fh.tot) %>% select(-POP_ID)
# fh.i <- NULL
freq.pop <- NULL
summary.ind <- NULL
fh.tot <- NULL
fh.pop <- NULL
# Nei & Li 1979 Nucleotide Diversity -----------------------------------------
message("Nucleotide diversity (Pi) calculations")
pi.data <- haplo.filtered.paralogs %>%
select(-Cnt) %>%
filter(HAPLOTYPES != "-") %>%
separate(
col = HAPLOTYPES, into = c("ALLELE1", "ALLELE2"),
sep = "/", extra = "drop", remove = T
) %>%
mutate(ALLELE2 = ifelse(is.na(ALLELE2), ALLELE1, ALLELE2))
# Pi: by individuals----------------------------------------------------------
message("Pi calculations by individuals...")
pi.data.i <- pi.data %>%
mutate(
PI = (stringdist::stringdist(a = ALLELE1, b = ALLELE2, method = "hamming"))/read.length
) %>%
group_by(INDIVIDUALS) %>%
summarise(PI = mean(PI))
# Pi: by pop------------------------------------------------------------------
message("Pi calculations by populations, take a break...")
pi.data.pop <- pi.data %>%
gather(ALLELE_GROUP, ALLELES, -c(LOCUS, INDIVIDUALS, POP_ID))
df.split.pop <- split(x = pi.data.pop, f = pi.data.pop$POP_ID) # slip data frame by population
pop.list <- names(df.split.pop) # list the pop
pi.res <-list() # create empty list
for (i in pop.list) {
# message of progress for pi calculation by population
pop.pi.calculations <- paste("Pi calculations for pop ", i, sep = "")
message(pop.pi.calculations)
pi <- function(y, read.length) {
if(length(unique(y)) <= 1){
PI <- 0
PI <- data.frame(PI)
} else{
#1 Get all pairwise comparison
allele_pairwise <- combn(unique(y), 2)
# allele_pairwise
#2 Calculate pairwise nucleotide mismatches
pairwise_mismatches <- apply(allele_pairwise, 2, function(z) {
stringdist::stringdist(a = z[1], b = z[2], method = "hamming")
})
# pairwise_mismatches
#3 Calculate allele frequency
allele_freq <- table(y)/length(y)
# allele_freq
#4 Calculate nucleotide diversity from pairwise mismatches and allele frequency
pi.prep <- apply(allele_pairwise, 2, function(y) allele_freq[y[1]] * allele_freq[y[2]])
# pi.prep
# read.length <- 80
PI <- sum(pi.prep*pairwise_mismatches)/read.length
PI <- data.frame(PI)
}
return(PI)
}
pop.data <- df.split.pop[[i]]
pi.pop.data <- pop.data %>%
group_by(LOCUS, POP_ID) %>%
do(., pi(y = .$ALLELES, read.length = read.length))
pi.res[[i]] <- pi.pop.data
}
pi.res <- as.data.frame(bind_rows(pi.res))
pi.res <- pi.res %>% group_by(POP_ID) %>% summarise(PI_NEI = mean(PI))
df.split.pop <- NULL
pop.list <- NULL
# Pi: overall ---------------------------------------------------------------
message("Calculating Pi overall")
pi.overall <- pi.data.pop %>%
group_by(LOCUS) %>%
do(., pi(y = .$ALLELES, read.length = read.length)) %>%
ungroup() %>%
summarise(PI_NEI = mean(PI))
pi.tot <- data_frame(POP_ID="OVERALL") %>% bind_cols(pi.overall)
# Combine the pop and overall data
pi.res <- bind_rows(pi.res, pi.tot) %>% select(-POP_ID)
# Summary dataframe by pop ---------------------------------------------------
message("Working on the summary table")
summary.prep <- haplo.filtered.consensus %>%
filter(HAPLOTYPES != "-") %>%
select(-Cnt, -INDIVIDUALS) %>%
separate(
col = HAPLOTYPES, into = c("ALLELE1", "ALLELE2"),
sep = "/", extra = "drop", remove = T
) %>%
mutate(ALLELE2 = ifelse(is.na(ALLELE2), ALLELE1, ALLELE2)) %>%
gather(ALLELE_GROUP, ALLELES, -c(LOCUS, POP_ID))
summary.pop <- summary.prep %>%
group_by(LOCUS, POP_ID) %>%
distinct(ALLELES) %>%
summarise(ALLELES_COUNT = length(ALLELES)) %>%
group_by(POP_ID) %>%
summarise(
MONOMORPHIC = length(ALLELES_COUNT[ALLELES_COUNT == 1]),
POLYMORPHIC = length(ALLELES_COUNT[ALLELES_COUNT >= 2])
) %>%
full_join(
consensus.pop %>%
group_by(POP_ID) %>%
summarise(CONSENSUS = n_distinct(LOCUS)),
by = "POP_ID"
) %>%
group_by(POP_ID) %>%
mutate(TOTAL = MONOMORPHIC + POLYMORPHIC + CONSENSUS) %>%
full_join(
paralogs.pop %>%
group_by(POP_ID) %>%
summarise(PARALOGS = n_distinct(LOCUS)),
by = "POP_ID"
) %>%
mutate(
MONOMORPHIC_PROP = round(MONOMORPHIC/TOTAL, 4),
POLYMORPHIC_PROP = round(POLYMORPHIC/TOTAL, 4),
PARALOG_PROP = round(PARALOGS/TOTAL, 4)
)
total <- summary.prep %>%
group_by(LOCUS) %>%
distinct(ALLELES) %>%
summarise(ALLELES_COUNT = length(ALLELES)) %>%
summarise(
MONOMORPHIC = length(ALLELES_COUNT[ALLELES_COUNT == 1]),
POLYMORPHIC = length(ALLELES_COUNT[ALLELES_COUNT >= 2])
) %>%
bind_cols(
blacklist.loci.consensus %>%
ungroup %>%
summarise(CONSENSUS = n()) %>%
select(CONSENSUS)
) %>%
mutate(TOTAL = MONOMORPHIC + POLYMORPHIC + CONSENSUS) %>%
bind_cols(
blacklist.loci.paralogs %>%
ungroup %>%
summarise(PARALOGS = n()) %>%
select(PARALOGS)
) %>%
mutate(
MONOMORPHIC_PROP = round(MONOMORPHIC/TOTAL, 4),
POLYMORPHIC_PROP = round(POLYMORPHIC/TOTAL, 4),
PARALOG_PROP = round(PARALOGS/TOTAL, 4)
)
total.res <- data_frame(POP_ID="OVERALL") %>%
bind_cols(total)
summary <- bind_rows(summary.pop, total.res)
summary <- bind_cols(summary, fh.res, pi.res)
if (missing(whitelist.loci) == "FALSE") {
write.table(summary, "haplotype.catalog.loci.whitelist.summary.pop.tsv",
sep = "\t", row.names = F, col.names = T, quote = F)
filename.sum <- "haplotype.catalog.loci.whitelist.summary.pop.tsv"
} else {
write.table(summary, "haplotype.catalog.loci.summary.pop.tsv",
sep = "\t", row.names = F, col.names = T, quote = F)
filename.sum <- "haplotype.catalog.loci.summary.pop.tsv"
}
# Figures --------------------------------------------------------------------
fh.pi <- pi.data.i %>%
full_join(
fh.i %>% select(INDIVIDUALS, POP_ID, FH)
, by = "INDIVIDUALS")
scatter.plot <- ggplot(fh.pi, aes(x = FH, y = PI)) +
geom_point(aes(colour = POP_ID)) +
stat_smooth(method = lm, level = 0.95, fullrange = F, na.rm = T)+
labs(x = "Individual IBDg (FH)") +
labs(y = "Individual nucleotide diversity (Pi)") +
theme(
axis.title.x = element_text(size = 10, family = "Helvetica", face = "bold"),
axis.title.y = element_text(size = 10, family = "Helvetica", face = "bold"),
legend.title = element_text(size = 10, family = "Helvetica", face = "bold"),
legend.text = element_text(size = 10, family = "Helvetica", face = "bold"),
strip.text.y = element_text(angle = 0, size = 10, family = "Helvetica", face = "bold"),
strip.text.x = element_text(size = 10, family = "Helvetica", face = "bold")
)
boxplot.pi <- ggplot(fh.pi, aes(x = factor(POP_ID), y = PI, na.rm = T))+
geom_violin(trim = F)+
geom_boxplot(width = 0.1, fill = "black", outlier.colour = NA)+
stat_summary(fun.y = "mean", geom = "point", shape = 21, size = 2.5, fill = "white")+
labs(x = "Sampling sites")+
labs(y = "Individual nucleotide diversity (Pi)")+
theme(
legend.position = "none",
axis.title.x = element_text(size = 10, family = "Helvetica", face = "bold"),
axis.title.y = element_text(size = 10, family = "Helvetica", face = "bold"),
legend.title = element_text(size = 10, family = "Helvetica", face = "bold"),
legend.text = element_text(size = 10, family = "Helvetica", face = "bold"),
strip.text.x = element_text(size = 10, family = "Helvetica", face = "bold")
)
boxplot.fh <- ggplot(fh.pi, aes(x = factor(POP_ID), y = FH, na.rm = T))+
geom_violin(trim = F)+
geom_boxplot(width = 0.1, fill = "black", outlier.colour = NA)+
stat_summary(fun.y = "mean", geom = "point", shape = 21, size = 2.5, fill = "white")+
labs(x = "Sampling sites")+
labs(y = "Individual IBDg (FH)")+
theme(
legend.position = "none",
axis.title.x = element_text(size = 10, family = "Helvetica", face = "bold"),
axis.title.y = element_text(size = 10, family = "Helvetica", face = "bold"),
legend.title = element_text(size = 10, family = "Helvetica", face = "bold"),
legend.text = element_text(size = 10, family = "Helvetica", face = "bold"),
strip.text.x = element_text(size = 10, family = "Helvetica", face = "bold")
)
invisible(cat(sprintf(
"The number of loci in the catalog = %s LOCI
The number of putative paralogs loci in the catalog (> 2 alleles) = %s LOCI
The number of loci in the catalog with consensus alleles = %s LOCI
3 files were written in this directory: %s
1. blacklist.loci.paralogs.txt
2. blacklist.loci.consensus.txt
3. %s",
n_distinct(haplotype$LOCUS),
n_distinct(paralogs.pop$LOCUS),
n_distinct(consensus.pop$LOCUS),
getwd(),
filename.sum
)))
# Results
results <- list()
results$summary <- summary
results$paralogs.pop <- paralogs.pop
results$paralogs.loci <- blacklist.loci.paralogs
results$consensus.pop <- consensus.pop
results$consensus.loci <- blacklist.loci.consensus
results$scatter.plot <- scatter.plot
results$boxplot.pi <- boxplot.pi
results$boxplot.fh <- boxplot.fh
return(results)
}
#' @title Import and summarise the batch_x.hapstats.tsv file
#' @description Import and summarise the batch_x.hapstats.tsv file.
#' Necessary preparation for density distribution and box plot figures.
#' @param data The 'batch_x.hapstats.tsv' created by STACKS.
#' @param pop.num The number of populations analysed.
#' @param pop.col.types \code{"integer"} or \code{"character"} used in STACKS populations module?
#' @param pop.integer.equi When Integer was used for your population id,
#' give the character equivalence
#' @param pop.levels A character string with your populations in order.
#' @rdname summary_hapstats
#' @export
summary_hapstats <- function(data, pop.num, pop.col.types, pop.integer.equi, pop.levels) {
POP_ID <- NULL
skip.lines <- pop.num + 1
if(pop.col.types == "integer"){
col.types = "iiciiiiddddddc"
}
if(pop.col.types == "character") {
col.types = "iiciciiddddddc"
} else {
col.types = NULL
}
hapstats <- read_tsv(data,
na = "NA",
skip = skip.lines,
progress = interactive(),
col_names = c("BATCH_ID", "LOCUS", "CHR", "BP", "POP_ID", "N", "HAPLOTYPE_CNT", "GENE_DIVERSITY", "SMOOTHED_GENE_DIVERSITY", "SMOOTHED_GENE_DIVERSITY_PVALUE", "HAPLOTYPE_DIVERSITY", "SMOOTHED_HAPLOTYPE_DIVERSITY", "SMOOTHED_HAPLOTYPE_DIVERSITY_PVALUE", "HAPLOTYPES"),
col_types = col.types) %>%
mutate (
POP_ID = stri_replace_all_fixed(POP_ID, seq(from = 1, to = pop.num, by = 1), pop.integer.equi, vectorize_all=F),
POP_ID = factor(POP_ID, levels = pop.levels, ordered = T)
) %>%
arrange(LOCUS, POP_ID)
# separate(HAPLOTYPES, c("ALLELE_P", "ALLELE_Q"), sep = "/", extra = "error", remove = F) %>%
}
## VCF
#' @title Summary statistics of a tidy VCF by population and markers
#' @description Summarise and prepare the tidy VCF.
#' Summary, by population and markers (SNP), of frequency of the REF
#' and the ALT alleles, the observed and the expected heterozygosity
#' and the inbreeding coefficient. The Global MAF of Loci,
#' with STACKS GBS/RAD loci = read or de novo haplotypes,
#' is included and repeated over SNP.
#' @param filename (optional) Name of the file written to the working directory.
#' @param data The tidy VCF file created with \link{read_stacks_vcf}.
#' @rdname summary_stats_vcf_tidy
#' @export
summary_stats_vcf_tidy <- function(data, filename) {
GT <- NULL
GL <- NULL
INDIVIDUALS <- NULL
POP_ID <- NULL
N <- NULL
HET_O <- NULL
HOM_O <- NULL
HET_E <- NULL
HOM_E <- NULL
FREQ_ALT <- NULL
FREQ_REF <- NULL
GLOBAL_MAF <- NULL
PP <- NULL
PQ <- NULL
QQ <- NULL
vcf.summary <- data %>%
filter(GT != "./.") %>%
group_by(LOCUS, POS, POP_ID) %>%
summarise(
N = as.numeric(n()),
PP = as.numeric(length(GT[GT == "0/0"])),
PQ = as.numeric(length(GT[GT == "1/0" | GT == "0/1"])),
QQ = as.numeric(length(GT[GT == "1/1"]))
) %>%
mutate(
FREQ_REF = ((PP*2) + PQ)/(2*N),
FREQ_ALT = ((QQ*2) + PQ)/(2*N),
HET_O = PQ/N,
HET_E = 2 * FREQ_REF * FREQ_ALT,
FIS = ifelse(HET_O == 0, 0, round (((HET_E - HET_O) / HET_E), 6))
)
global.maf <- vcf.summary %>%
group_by(LOCUS, POS) %>%
summarise_each_(funs(sum), vars = c("N", "PP", "PQ", "QQ")) %>%
mutate(GLOBAL_MAF = (PQ + (2 * QQ)) / (2*N)) %>%
select(LOCUS, POS, GLOBAL_MAF)
vcf.prep <- global.maf %>%
left_join(vcf.summary, by = c("LOCUS", "POS"))
vcf.prep <- vcf.prep[c("LOCUS", "POS", "POP_ID", "N", "PP", "PQ", "QQ", "FREQ_REF", "FREQ_ALT", "GLOBAL_MAF", "HET_O", "HET_E", "FIS")]
if (missing(filename) == "FALSE") {
message("Saving the file in your working directory...")
write_tsv(vcf.prep, filename, append = FALSE, col_names = TRUE)
saving <- paste("Saving was selected, the filename:", filename, sep = " ")
} else {
saving <- "Saving was not selected"
}
return(vcf.prep)
}
#' @title Summary statistics of a tidy VCF by population
#' @description Summarise the tidy VCF.
#' The populations summary on : frequency of the REF
#' and the ALT alleles, the observed and the expected heterozygosity
#' and the inbreeding coefficient. The Global MAF of Loci,
#' with STACKS GBS/RAD loci = read or de novo haplotypes,
#' is included and repeated over SNP.
#' @param filename (optional) Name of the file written to the working directory.
#' @param data The tidy VCF file created with read_stacks_vcf.
#' @rdname summary_stats_pop
#' @export
summary_stats_pop <- function(data, filename) {
POP_ID <- NULL
N <- NULL
HET_O <- NULL
HET_E <- NULL
FREQ_REF <- NULL
FIS <- NULL
SNP <- NULL
LOCUS <- NULL
vcf.summary <- data %>%
group_by(POP_ID) %>%
summarise(
SNP = length(unique(POS)),
LOCUS = length(unique(LOCUS)),
N = max(N, na.rm = TRUE),
FREQ_REF = mean(FREQ_REF, na.rm = TRUE),
HET_O = mean(HET_O, na.rm = TRUE),
HET_E = mean(HET_E, na.rm = TRUE),
FIS = mean(FIS, na.rm = TRUE)
) %>%
select(POP_ID, N, SNP, LOCUS, FREQ_REF, HET_O, HET_E, FIS)
if (missing(filename) == "FALSE") {
message("Saving the file in your working directory...")
write_tsv(vcf.summary, filename, append = FALSE, col_names = TRUE)
saving <- paste("Saving was selected, the filename:", filename, sep = " ")
} else {
saving <- "Saving was not selected"
}
return(vcf.summary)
}
## Coverage
#' @title Coverage summary
#' @description This function create a table summary of the important
#' coverage statistics from the tidy vcf created with read_stacks_vcf.
#' @param tidy.vcf.file The tidy VCF file created with read_stacks_vcf.
#' @param pop.levels Character string defining your ordered populations.
#' @param filename Name of the file saved to the working directory.
#' @details The tables contains summary statistics (mean, median, min, max)
#' of read, ref and alt allele coverage. To access
#' the two tables, use $. The table that summarise by populations was created
#' using average nested: loci -> individuals -> populations.
#' The long format is used for creating figures.
#' @return A list with 2 tables: the long format of loci and populations
#' coverage statistics and the short format by populations.
#' The short-format is more user-friendly and
#' is written to the working directory.
#' @rdname summary_coverage
#' @export
summary_coverage <- function (tidy.vcf.file, pop.levels, filename) {
POP_ID <- NULL
READ_DEPTH <- NULL
ALLELE_REF_DEPTH <- NULL
ALLELE_ALT_DEPTH <- NULL
INDIVIDUALS <- NULL
if (is.vector(tidy.vcf.file) == "TRUE") {
data <- read_tsv(tidy.vcf.file, col_names = T, col_types = "iiiiccddcdccddddc")
message("Using the file in your directory")
} else {
data = tidy.vcf.file
message("Using the file from your global environment")
}
coverage.sum.loci <- data %>%
group_by(LOCUS, POP_ID) %>%
summarise(
READ_MEAN = mean(READ_DEPTH, na.rm = T),
READ_MEDIAN = median(READ_DEPTH, na.rm = T),
READ_MIN = min(READ_DEPTH, na.rm = T),
READ_MAX = max(READ_DEPTH, na.rm = T),
REF_MEAN = mean(ALLELE_REF_DEPTH, na.rm = T),
REF_MEDIAN = median(ALLELE_REF_DEPTH, na.rm = T),
REF_MIN = min(ALLELE_REF_DEPTH, na.rm = T),
REF_MAX = max(ALLELE_REF_DEPTH, na.rm = T),
ALT_MEAN = mean(ALLELE_ALT_DEPTH, na.rm = T),
ALT_MEDIAN = median(ALLELE_ALT_DEPTH, na.rm = T),
ALT_MIN = min(ALLELE_ALT_DEPTH, na.rm = T),
ALT_MAX = max(ALLELE_ALT_DEPTH, na.rm = T)
) %>%
melt(
id.vars = c("LOCUS", "POP_ID"),
# measure.vars = c(), # if left blank will use all the non id.vars
variable.name = "COVERAGE_GROUP",
value.name = "VALUE"
)
if (missing(pop.levels) == "TRUE") {
coverage <- coverage.sum.loci
} else {
coverage <- coverage.sum.loci %>%
mutate(POP_ID = factor(POP_ID, levels = pop.levels, ordered = T))
}
# by pop
coverage.sum.pop <- data %>%
group_by(POP_ID, INDIVIDUALS) %>%
summarise(
READ_DEPTH_MEAN = mean(READ_DEPTH, na.rm = T),
READ_DEPTH_MEDIAN = median(READ_DEPTH, na.rm = T),
READ_DEPTH_MIN = min(READ_DEPTH, na.rm = T),
READ_DEPTH_MAX = max(READ_DEPTH, na.rm = T),
ALLELE_REF_DEPTH_MEAN = mean(ALLELE_REF_DEPTH, na.rm = T),
ALLELE_REF_DEPTH_MEDIAN = median(ALLELE_REF_DEPTH, na.rm = T),
ALLELE_REF_DEPTH_MIN = min(ALLELE_REF_DEPTH, na.rm = T),
ALLELE_REF_DEPTH_MAX = max(ALLELE_REF_DEPTH, na.rm = T),
ALLELE_ALT_DEPTH_MEAN = mean(ALLELE_ALT_DEPTH, na.rm = T),
ALLELE_ALT_DEPTH_MEDIAN = median(ALLELE_ALT_DEPTH, na.rm = T),
ALLELE_ALT_DEPTH_MIN = min(ALLELE_ALT_DEPTH, na.rm = T),
ALLELE_ALT_DEPTH_MAX = max(ALLELE_ALT_DEPTH, na.rm = T)
) %>%
group_by(POP_ID) %>%
summarise_each_(funs(mean), vars = c("READ_DEPTH_MEAN", "READ_DEPTH_MEDIAN", "READ_DEPTH_MIN", "READ_DEPTH_MAX", "ALLELE_REF_DEPTH_MEAN", "ALLELE_REF_DEPTH_MEDIAN", "ALLELE_REF_DEPTH_MIN", "ALLELE_REF_DEPTH_MAX", "ALLELE_ALT_DEPTH_MEAN", "ALLELE_ALT_DEPTH_MEDIAN", "ALLELE_ALT_DEPTH_MIN", "ALLELE_ALT_DEPTH_MAX")) %>%
melt(
id.vars = c("POP_ID"),
variable.name = "GENOTYPE_LIKELIHOOD_GROUP",
value.name = "VALUE"
)
coverage.summary.total <- coverage.sum.pop %>%
summarise_each(funs(mean))
coverage.summary.total[1,1] <- "TOTAL"
if (missing(pop.levels) == "TRUE") {
coverage.summary.pop.total <- coverage.sum.pop %>%
rbind(coverage.summary.total)
} else {
coverage.summary.pop.total <- coverage.sum.pop %>%
rbind(coverage.summary.total) %>%
mutate(POP_ID = factor(POP_ID, levels = c(pop.levels, "TOTAL"), ordered = T)) %>%
arrange(POP_ID)
}
coverage.summary.pop.total
write.table(coverage.summary.pop.total, filename, sep = "\t", row.names = F, col.names = T, quote = F)
invisible(cat(sprintf(
"Filename: %s
Written in this directory: %s",
filename,
getwd()
)))
# results
results <- list()
results$coverage.summary.long <- coverage.sum.loci
results$coverage.summary.pop <- coverage.summary.pop.total
return(results)
}
#' @title Table of low coverage genotypes
#' @description This function create a table summary of the genotypes
#' below a user-define threshold.
#' coverage statistics by populations.
#' @param tidy.vcf.file The tidy VCF file created with read_stacks_vcf.
#' @param pop.levels Character string defining your ordered populations.
#' @param read.depth.threshold The read depth threshold to evaluate.
#' @param filename.low.coverage Filename of the low coverage table written
#' in the working directory.
#' @param filename.low.coverage.imbalance Filename of ...
#' @return a list of 2 tables (accessed with $). The values in the tables
#' represent percentage of samples.
#' @details work in progress....
#' Table 1: low coverage summary $low.coverage.summary (homo- and
#' hetero- zygotes genotypes).
#' Table 2: summary of coverage imbalance between alleles in the heterozygotes.
#' 0/0 : homozygote REF allele.
#' 1/1 : homozygote ALT allele.
#' 0/1 : heterozygote with coverage REF > ALT allele.
#' 1/0 : heterozygote with coverage REF < ALT allele.
#' @rdname table_low_coverage_summary
#' @export
table_low_coverage_summary <- function(tidy.vcf.file,
pop.levels,
read.depth.threshold,
filename.low.coverage,
filename.low.coverage.imbalance) {
INDIVIDUALS <- NULL
POP_ID <- NULL
GT <- NULL
READ_DEPTH <- NULL
ALLELE_COVERAGE_RATIO <- NULL
SAMPLES_NUMBER <- NULL
TOTAL_NUMBER <- NULL
IMBALANCE_NUMBER <- NULL
if (is.vector(tidy.vcf.file) == "TRUE") {
data <- read_tsv(tidy.vcf.file,
col_names = T,
col_types = "diidccddccccdddddc") %>%
mutate(INDIVIDUALS = factor(INDIVIDUALS))
message("Using the file in your directory")
} else {
data <- tidy.vcf.file
message("Using the file from your global environment")
}
if (missing(pop.levels) == "TRUE") {
data <- tidy.vcf.file
} else {
data <- tidy.vcf.file %>%
mutate(POP_ID = factor(POP_ID, levels = pop.levels, ordered = T))
}
low.coverage.summary <- data %>%
filter(GT != "./.") %>%
select(GT, POP_ID) %>%
group_by(GT, POP_ID) %>%
summarise(
TOTAL_NUMBER = n()
) %>%
full_join(
data %>%
filter(READ_DEPTH < read.depth.threshold & GT != "./.") %>%
group_by(GT, POP_ID) %>%
summarise(
SAMPLES_NUMBER = n()
),
by = c("GT", "POP_ID")
) %>%
full_join(
data %>%
filter(READ_DEPTH < read.depth.threshold & GT != "./.") %>%
filter(ALLELE_COVERAGE_RATIO != "NA" & ALLELE_COVERAGE_RATIO != 0 ) %>%
group_by(GT, POP_ID) %>%
summarise(
IMBALANCE_NUMBER = n()
),
by = c("GT", "POP_ID")
) %>%
mutate(
LOW_COVERAGE_PERCENT = round(SAMPLES_NUMBER / TOTAL_NUMBER * 100, 2),
IMBALANCE_PERCENT = round(IMBALANCE_NUMBER / TOTAL_NUMBER * 100, 2)
)
low.coverage.summary.table <- low.coverage.summary %>%
dcast(POP_ID ~ GT, value.var = "LOW_COVERAGE_PERCENT")
write.table(low.coverage.summary.table, filename.low.coverage, sep = "\t", row.names = F, col.names = T, quote = F)
low.coverage.imbalance.summary.table <- low.coverage.summary %>%
filter(GT != "0/0" & GT != "1/1") %>%
dcast(POP_ID ~ GT, value.var = "IMBALANCE_PERCENT")
write.table(low.coverage.imbalance.summary.table, filename.low.coverage.imbalance, sep = "\t", row.names = F, col.names = T, quote = F)
invisible(
cat(
sprintf(
"2 files:
%s
%s\n
Written in the directory:
%s",
filename.low.coverage, filename.low.coverage.imbalance, getwd()
)))
res <- list()
res$low.coverage.summary <- low.coverage.summary.table
res$heterozygote.imbalance <- low.coverage.imbalance.summary.table
return(res)
}
## Genotype likelihood ###
#' @title Genotype likelihood summary
#' @description This function create 2 tables summary of the important
#' genotype likelihood statistics from the tidy vcf created
#' with \link{read_stacks_vcf}.
#' @param tidy.vcf.file The tidy VCF file created with read_stacks_vcf.
#' @param pop.levels Character string defining your ordered populations.
#' @param filename The name of the file written in the directory.
#' @return A list with 2 tables: the long format of loci ($gl.summary.long)
#' and populations genotype likelihood statistics
#' and the short format by populations ($gl.summary.pop).
#' The short-format is more user-friendly and
#' is written to the working directory.
#' @details The table contains summary statistics: mean, median, min, max and
#' diff (max-min), of genotype likelihood by locus and populations. To access
#' the two tables, use $. The table that summarise by populations was created
#' using average nested: loci -> individuals -> populations.
#' @rdname summary_genotype_likelihood
#' @export
summary_genotype_likelihood <- function(tidy.vcf.file, pop.levels, filename){
POP_ID <- NULL
GL <- NULL
GL_MAX <- NULL
GL_MIN <- NULL
INDIVIDUALS <- NULL
if (is.vector(tidy.vcf.file) == "TRUE") {
data <- read_tsv(tidy.vcf.file, col_names = T, col_types = "iiiiccddcdccddddc")
message("Using the file in your directory")
} else {
data <- tidy.vcf.file
message("Using the file from your global environment")
}
GL.loci.pop <- data %>%
group_by(LOCUS, POP_ID) %>%
summarise(
GL_MEAN = mean(GL, na.rm = T),
GL_MEDIAN = median(GL, na.rm = T),
GL_MIN = min(GL, na.rm = T),
GL_MAX = max(GL, na.rm = T),
GL_DIFF = GL_MAX - GL_MIN
) %>%
melt(
id.vars = c("LOCUS", "POP_ID"),
variable.name = "GENOTYPE_LIKELIHOOD_GROUP",
value.name = "VALUE"
)
if (missing(pop.levels) == "TRUE") {
GL.loci.pop.summary <- GL.loci.pop
} else {
GL.loci.pop.summary <- GL.loci.pop %>%
mutate(POP_ID = factor(POP_ID, levels = pop.levels, ordered = T)) %>%
arrange(POP_ID)
}
GL.pop <- data %>%
group_by(POP_ID, INDIVIDUALS) %>%
summarise(
GL_MEAN = mean(GL, na.rm = T),
GL_MEDIAN = median(GL, na.rm = T),
GL_MIN = min(GL, na.rm = T),
GL_MAX = max(GL, na.rm = T),
GL_DIFF = GL_MAX - GL_MIN
) %>%
group_by(POP_ID) %>%
summarise_each_(funs(mean), vars = c("GL_MEAN", "GL_MEDIAN", "GL_MIN", "GL_MAX", "GL_DIFF")) %>%
melt(
id.vars = c("POP_ID"),
variable.name = "GENOTYPE_LIKELIHOOD_GROUP",
value.name = "VALUE"
)
if (missing(pop.levels) == "TRUE") {
GL.pop.summary <- GL.pop
} else {
GL.pop.summary <- GL.pop %>%
mutate(POP_ID = factor(POP_ID, levels = pop.levels, ordered = T)) %>%
arrange(POP_ID)
}
GL.pop.summary.table <- GL.pop.summary %>%
dcast(POP_ID ~ GENOTYPE_LIKELIHOOD_GROUP, value.var = "VALUE")
message("Saving the summary table by pop in your working directory")
write.table(GL.pop.summary.table,
filename,
sep = "\t",
row.names = F,
col.names = T,
quote = F
)
invisible(cat(sprintf(
"Filename:
%s
Written in the directory:
%s",
filename, getwd()
)))
# results
results <- list()
results$gl.summary.long <- GL.loci.pop.summary
results$gl.summary.pop <- GL.pop.summary.table
return(results)
}
#' @title Import and summarise the batch_x.phistats.tsv file
#' @description Import and summarise the batch_x.phistats.tsv file.
#' Necessary preparation for density distribution and box plot figures.
#' @param data The 'batch_x.phistats.tsv' created by STACKS.
#' @param skip.lines The number of line without the header
#' to start reading the data.
#' @rdname summary_phistats
#' @export
summary_phistats <- function(data, skip.lines) {
BATCH_ID <- NULL
LOCUS <- NULL
CHR <- NULL
BP <- NULL
POP_COUNT <- NULL
PHI_ST <- NULL
SMOOTHED_PHI_ST <- NULL
SMOOTHED_PHI_ST_P_VALUE <- NULL
PHI_CT <- NULL
SMOOTHED_PHI_CT <- NULL
SMOOTHED_PHI_CT_P_VALUE <- NULL
PHI_SC <- NULL
SMOOTHED_PHI_SC <- NULL
SMOOTHED_PHI_SC_P_VALUE <- NULL
FST_PRIME <- NULL
SMOOTHED_FST_PRIME <- NULL
SMOOTHED_FST_PRIME_P_VALUE <- NULL
D_EST <- NULL
SMOOTHED_D_EST <- NULL
SMOOTHED_D_EST_P_VALUE <- NULL
phistats <- read_tsv(data,
na = "NA",
skip = skip.lines,
progress = interactive(),
col_types = "iiciiddddddddddddddd",
col_names = c("BATCH_ID", "LOCUS", "CHR", "BP", "POP_COUNT", "PHI_ST", "SMOOTHED_PHI_ST", "SMOOTHED_PHI_ST_P_VALUE", "PHI_CT", "SMOOTHED_PHI_CT", "SMOOTHED_PHI_CT_P_VALUE", "PHI_SC", "SMOOTHED_PHI_SC", "SMOOTHED_PHI_SC_P_VALUE", "FST_PRIME", "SMOOTHED_FST_PRIME", "SMOOTHED_FST_PRIME_P_VALUE", "D_EST", "SMOOTHED_D_EST", "SMOOTHED_D_EST_P_VALUE")
) %>%
select(-c(BATCH_ID, CHR, SMOOTHED_PHI_ST, SMOOTHED_PHI_ST_P_VALUE, SMOOTHED_PHI_CT, SMOOTHED_PHI_CT_P_VALUE, SMOOTHED_PHI_SC, SMOOTHED_PHI_SC_P_VALUE, SMOOTHED_FST_PRIME, SMOOTHED_FST_PRIME_P_VALUE, SMOOTHED_D_EST, SMOOTHED_D_EST_P_VALUE)) %>%
melt(
id.vars = c("LOCUS","BP","POP_COUNT"),
variable.name = c("PHI_ST", "FST_PRIME", "D_EST"),
value.name = "VALUE"
)
}
|
abcbf5a5a58d80f4e0a0bd8d5ac77ee62048b938
|
5e8eb93d9c1ece167da266e1d3eb4920453b6579
|
/survival_boosting_d7.R
|
4bf7beb2c68d6864aaabf19fcabc79bb6034746c
|
[] |
no_license
|
CodeYueXiong/Prediction_Challenge_T2E_ML
|
d5496ae233ab5cfae08c4ba4775ee9d9e2c9eb84
|
e5e9d12e26e786e0dffd472cf96d2bd563d42091
|
refs/heads/master
| 2023-03-17T12:11:16.374182
| 2021-02-24T08:58:26
| 2021-02-24T08:58:26
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 32,429
|
r
|
survival_boosting_d7.R
|
##################################################
##################################################
########## Boosting in COX Regression ############
################## dataframe1 ####################
##################################################
##################################################
#==========================================================================================
# clear all workspace
rm(list=ls())
# install the necessary packages
library(mlr3)
library(mlr3proba)
library(mlr3learners)
library(mlr3extralearners)
library(data.table)
library(mlr3viz)
library(mlr3tuning)
library(mlr3pipelines)
library(purrr)
library(mboost)
library(CoxBoost)
library(survival)
library("paradox")
library("ggrepel")
library(precrec)
library(ggplot2)
library("ggpubr")
## library(xgboost)
# set or change R working directory
setwd("/Users/echo/Desktop/1_WS20/1_T2E_ML/2_Prediction_Challenge/")
wd = dirname(rstudioapi::getActiveDocumentContext()$path)
##############################################################
########## Step 1: Default training set as d1 ##########
##############################################################
# read in the raw data
train_data_original <- readRDS("Data-20210107/train_data.Rds")
test_data <- readRDS("Data-20210107/test_list_x.Rds")
length(train_data_original)
## get the corresponding dataframe1
train_data_d7 <- train_data_original$d7
train_data_d7 <- na.omit(train_data_d7)
head(train_data_d7)
summary(train_data_d7$V10)
summary(train_data_d7$V1)
summary(train_data_d7$V2) # problem with V2, more than 53 categories
summary(train_data_d7$V5)
summary(train_data_d7$V7)
######### missing value: data imputation ##########
### ---- use missForest ---- #####
## Nonparametric missing value imputation on mixed-type data:
## Take a look at iris definitely has a variable that is a factor
library(missForest)
## Impute missing values providing the complete matrix for
## illustration. Use 'verbose' to see what happens between iterations:
d7.imp <- missForest(train_data_d7, verbose = TRUE) # Can not handle categorical predictors with more than 53 categories
## Here are the final results
iris.imp
## As can be seen here it still has the factor column
str(iris.imp$ximp)
## create the corresponding task as for dataframe 1
tsks_train_d7 <- TaskSurv$new("df7", backend = train_data_d7, time = "time", event = "status")
# tsks_train_list <- list(
# TaskSurv$new("df1", backend = train_data_original[[1]], time = "time", event = "status"),
# TaskSurv$new("df2", backend = train_data_original[[2]], time = "time", event = "status"),
# TaskSurv$new("df3", backend = train_data_original[[3]], time = "time", event = "status"),
# TaskSurv$new("df4", backend = train_data_original[[4]], time = "time", event = "status"),
# TaskSurv$new("df5", backend = train_data_original[[5]], time = "time", event = "status"),
# TaskSurv$new("df7", backend = train_data_original[[6]], time = "time", event = "status"),
# TaskSurv$new("df8", backend = train_data_original[[7]], time = "time", event = "status"),
# TaskSurv$new("df9", backend = train_data_original[[8]], time = "time", event = "status")
# )
## check the task
tsks_train_d7
#---------------------------Fine Tuning--------------------------------------------------------------------------
## Step 1: Boosting with AFT
## load the learner for the aft boosting and design the benchmark
## Step 1_1: Try with a default setting
design <- benchmark_grid(
tasks = tsks_train_d7,
learners = lrn("surv.mboost", baselearner = 'bols', family = "weibull"), # cannot compute ‘bbs’ for non-numeric variables; used ‘bols’ instead.
resampling = rsmp("cv", folds = 3L)
)
# define function to start benchmark with fixed seed
run_benchmark <- function(design){
set.seed(2021)
bmr <- benchmark(design, store_models = TRUE)
run_benchmark <- bmr
}
## run benchmark and save the results
aft_bmr <- run_benchmark(design)
# set the measure
time = train_data_d1$time[train_data_d1$status == 1]
quantile = quantile(time, probs = 0.5)
# set the global evaluation metric
all_measures <- msr("surv.cindex")
aft_results <- aft_bmr$aggregate(measures = all_measures)
aft_results
# plot the corresponding the performance
ggplot(data = aft_results, aes(x = learner_id, y = surv.harrell_c, label = round(surv.harrell_c, 4))) +
geom_point() + geom_text_repel() +
ggtitle("step1_1 aft with a default parameter setting")
## Step_1_2: model tuning with the "baselearner"
# load the learner with aft
lrn_aft <- lrn("surv.mboost", baselearner = 'bols', family = "weibull")
lrn("surv.mboost", baselearner = 'bols')$param_set
# train with "baselearner"
base_learner_type <- c("bols", "btree")
# set the search space
param_aft_bl <- ParamSet$new(params = list(
ParamFct$new("baselearner", levels = base_learner_type)
))
# inner resampling set
inner_rsmp <- rsmp("cv", folds = 4L)
# create the AutoTuner
aft_bl <- AutoTuner$new(
learner = lrn_aft, resampling = inner_rsmp,
measure = msr("surv.cindex"), search_space = param_aft_bl,
terminator = trm("none"), tuner = tnr("grid_search", resolution = 5)
)
# set the outer resampling
outer_rsmp <- rsmp("cv", folds = 3L)
# design the benchmark with bf
design_aft_bl <- benchmark_grid(
tasks = tsks_train_d1,
learners = aft_bl,
resamplings = outer_rsmp
)
bmr_aft_bl <- run_benchmark(design_aft_bl)
# aggregate to get results of model when tuning with bf
bmr_aft_bl_results <- bmr_aft_bl$aggregate(measures = msr("surv.cindex"))
# load install.packages("ggrepel")
# plot the corresponding performances
aft_bl_path1 <- bmr_aft_bl$data$learners()[[2]][[1]]$model$tuning_instance$archive$data()
aft_bl_ggp1 <- ggplot(aft_bl_path1, aes(
x = baselearner,
y = surv.harrell_c, #, col = factor(family)
label = round(surv.harrell_c, 4)
)) +
geom_point(size = 3, color = "red") +
geom_line() +
geom_text_repel()
aft_bl_path2 <- bmr_aft_bl$data$learners()[[2]][[2]]$model$tuning_instance$archive$data()
aft_bl_ggp2 <- ggplot(aft_bl_path2, aes(
x = baselearner,
y = surv.harrell_c, #, col = factor(family)
label = round(surv.harrell_c, 4)
)) +
geom_point(size = 3, color = "red") +
geom_line() +
geom_text_repel()
# geom_text(aes(label = round(surv.harrell_c, 4)), size = 3, position = position_dodge(width=0.9))
aft_bl_path3 <- bmr_aft_bl$data$learners()[[2]][[3]]$model$tuning_instance$archive$data()
aft_bl_ggp3 <- ggplot(aft_bl_path3, aes(
x = baselearner,
y = surv.harrell_c,#, col = factor(family)
label = round(surv.harrell_c, 4)
)) +
geom_point(size = 3, color = "red") +
geom_line() +
geom_text_repel()
# arrange all the plots together
ggarrange(aft_bl_ggp1, aft_bl_ggp2, aft_bl_ggp3, common.legend = TRUE, legend = "bottom")
## Step_1_3: model tuning with the "family"
# load the learner with aft
lrn_aft <- lrn("surv.mboost", baselearner = 'bols', family = "weibull")
# train with "family"
family_type <- c("weibull", "loglog", "lognormal")
# set the search space
param_aft_fam <- ParamSet$new(params = list(
ParamFct$new("family", levels = family_type)
))
# create the AutoTuner
aft_fam <- AutoTuner$new(
learner = lrn_aft, resampling = inner_rsmp,
measure = msr("surv.cindex"), search_space = param_aft_fam,
terminator = trm("none"), tuner = tnr("grid_search", resolution = 5)
)
# design the benchmark with bf
design_aft_fam <- benchmark_grid(
tasks = tsks_train_d1,
learners = aft_fam,
resamplings = outer_rsmp
)
bmr_aft_fam <- run_benchmark(design_aft_fam)
# aggregate to get results of model when tuning with bf
bmr_aft_fam_results <- bmr_aft_fam$aggregate(measures = msr("surv.cindex"))
# plot the corresponding performances
aft_fam_path1 <- bmr_aft_fam$data$learners()[[2]][[1]]$model$tuning_instance$archive$data()
aft_fam_ggp1 <- ggplot(aft_fam_path1, aes(
x = family,
y = surv.harrell_c, #, col = factor(family)
label = round(surv.harrell_c, 4)
)) +
geom_point(size = 3, color = "red") +
geom_line() +
geom_text_repel()
aft_fam_path2 <- bmr_aft_fam$data$learners()[[2]][[2]]$model$tuning_instance$archive$data()
aft_fam_ggp2 <- ggplot(aft_fam_path2, aes(
x = family,
y = surv.harrell_c,#, col = factor(family)
label = round(surv.harrell_c, 4)
)) +
geom_point(size = 3, color = "red") +
geom_line() +
geom_text_repel()
aft_fam_path3 <- bmr_aft_fam$data$learners()[[2]][[3]]$model$tuning_instance$archive$data()
aft_fam_ggp3 <- ggplot(aft_fam_path3, aes(
x = family,
y = surv.harrell_c, #, col = factor(family)
label = round(surv.harrell_c, 4)
)) +
geom_point(size = 3, color = "red") +
geom_line() +
geom_text_repel()
# arrange all the plots together
ggarrange(aft_fam_ggp1, aft_fam_ggp2, aft_fam_ggp3, common.legend = TRUE, legend = "bottom")
## Step_1_3_2: exclude "lognormal",and train between "weibull" and "loglog"
# load the learner with aft
lrn_aft <- lrn("surv.mboost", baselearner = 'bols', family = "weibull")
# train with "family"
family_type <- c("weibull", "loglog")
# set the search space
param_aft_fam <- ParamSet$new(params = list(
ParamFct$new("family", levels = family_type)
))
# create the AutoTuner
aft_fam <- AutoTuner$new(
learner = lrn_aft, resampling = inner_rsmp,
measure = msr("surv.cindex"), search_space = param_aft_fam,
terminator = trm("none"), tuner = tnr("grid_search", resolution = 5)
)
# design the benchmark with fam
design_aft_fam <- benchmark_grid(
tasks = tsks_train_d1,
learners = aft_fam,
resamplings = outer_rsmp
)
bmr_aft_fam <- run_benchmark(design_aft_fam)
# aggregate to get results of model when tuning with fam
bmr_aft_fam_results <- bmr_aft_fam$aggregate(measures = msr("surv.cindex"))
# plot the corresponding performances
aft_fam_path1 <- bmr_aft_fam$data$learners()[[2]][[1]]$model$tuning_instance$archive$data()
aft_fam_ggp1 <- ggplot(aft_fam_path1, aes(
x = family,
y = surv.harrell_c, #, col = factor(family)
label = round(surv.harrell_c, 4)
)) +
geom_point(size = 3, color = "red") +
geom_line() +
geom_text_repel()
aft_fam_path2 <- bmr_aft_fam$data$learners()[[2]][[2]]$model$tuning_instance$archive$data()
aft_fam_ggp2 <- ggplot(aft_fam_path2, aes(
x = family,
y = surv.harrell_c,#, col = factor(family)
label = round(surv.harrell_c, 4)
)) +
geom_point(size = 3, color = "red") +
geom_line() +
geom_text_repel()
aft_fam_path3 <- bmr_aft_fam$data$learners()[[2]][[3]]$model$tuning_instance$archive$data()
aft_fam_ggp3 <- ggplot(aft_fam_path3, aes(
x = family,
y = surv.harrell_c, #, col = factor(family)
label = round(surv.harrell_c, 4)
)) +
geom_point(size = 3, color = "red") +
geom_line() +
geom_text_repel()
# arrange all the plots together
ggarrange(aft_fam_ggp1, aft_fam_ggp2, aft_fam_ggp3, common.legend = TRUE, legend = "bottom")
# conclusion: go with "weibull", as it is reaching the highest performance "0.847", compared with the highest "loglog" ones, 0.844..
## Step_1_4: model tuning with the "mstop", early stopping,
# from 10 to 250, 50-250, 50-125, 50-90, 65-85, 65-75
# load the learner with aft
lrn_aft <- lrn("surv.mboost", baselearner = 'bols', family = "weibull")
# set the search space
param_aft_mstop <- ParamSet$new(params = list(
ParamInt$new("mstop", lower = 65, upper = 75)
))
# create the AutoTuner
aft_mstop <- AutoTuner$new(
learner = lrn_aft, resampling = inner_rsmp,
measure = msr("surv.cindex"), search_space = param_aft_mstop,
terminator = trm("none"), tuner = tnr("grid_search", resolution = 10)
)
# design the benchmark with bf
design_aft_mstop <- benchmark_grid(
tasks = tsks_train_d1,
learners = aft_mstop,
resamplings = outer_rsmp
)
bmr_aft_mstop <- run_benchmark(design_aft_mstop)
# aggregate to get results of model when tuning with bf
bmr_aft_mstop_results <- bmr_aft_mstop$aggregate(measures = msr("surv.cindex"))
# plot the corresponding performances
aft_mstop_path1 <- bmr_aft_mstop$data$learners()[[2]][[1]]$model$tuning_instance$archive$data()
aft_mstop_ggp1 <- ggplot(aft_mstop_path1, aes(
x = mstop,
y = surv.harrell_c, #, col = factor(family)
label = round(surv.harrell_c, 4)
)) +
geom_point(size = 3, color = "red") +
geom_line() +
geom_text_repel()
aft_mstop_path2 <- bmr_aft_mstop$data$learners()[[2]][[2]]$model$tuning_instance$archive$data()
aft_mstop_ggp2 <- ggplot(aft_mstop_path2, aes(
x = mstop,
y = surv.harrell_c, #, col = factor(family)
label = round(surv.harrell_c, 4)
)) +
geom_point(size = 3, color = "red") +
geom_line() +
geom_text_repel()
aft_mstop_path3 <- bmr_aft_mstop$data$learners()[[2]][[3]]$model$tuning_instance$archive$data()
aft_mstop_ggp3 <- ggplot(aft_mstop_path3, aes(
x = mstop,
y = surv.harrell_c, #, col = factor(family)
label = round(surv.harrell_c, 4)
)) +
geom_point(size = 3, color = "red") +
geom_line() +
geom_text_repel()
# arrange all the plots together
ggarrange(aft_mstop_ggp1, aft_mstop_ggp2, aft_mstop_ggp3, common.legend = TRUE, legend = "bottom")
# conclusion: set mstop to be 65
## Step_1_5_1: model tuning with the "nu",
# from 0.00 to 1.00, 0.1-1.0, 0.1-0.4, 0.1-0.2
# load the learner with aft
lrn_aft <- lrn("surv.mboost", baselearner = 'bols', family = "weibull", mstop = 65)
# set the search space
param_aft_nu <- ParamSet$new(params = list(
ParamDbl$new("nu", lower = 0.1, upper = 0.2)
))
# create the AutoTuner
aft_nu <- AutoTuner$new(
learner = lrn_aft, resampling = inner_rsmp,
measure = msr("surv.cindex"), search_space = param_aft_nu,
terminator = trm("none"), tuner = tnr("grid_search", resolution = 5)
)
# design the benchmark with bf
design_aft_nu <- benchmark_grid(
tasks = tsks_train_d1,
learners = aft_nu,
resamplings = outer_rsmp
)
bmr_aft_nu <- run_benchmark(design_aft_nu)
# aggregate to get results of model when tuning with bf
bmr_aft_nu_results <- bmr_aft_nu$aggregate(measures = msr("surv.cindex"))
# plot the corresponding performances
aft_nu_path1 <- bmr_aft_nu$data$learners()[[2]][[1]]$model$tuning_instance$archive$data()
aft_nu_ggp1 <- ggplot(aft_nu_path1, aes(
x = nu,
y = surv.harrell_c, #, col = factor(family)
label = round(surv.harrell_c, 4)
)) +
geom_point(size = 3, color = "red") +
geom_line() +
geom_text_repel()
aft_nu_path2 <- bmr_aft_nu$data$learners()[[2]][[2]]$model$tuning_instance$archive$data()
aft_nu_ggp2 <- ggplot(aft_nu_path2, aes(
x = nu,
y = surv.harrell_c, #, col = factor(family)
label = round(surv.harrell_c, 4)
)) +
geom_point(size = 3, color = "red") +
geom_line() +
geom_text_repel()
aft_nu_path3 <- bmr_aft_nu$data$learners()[[2]][[3]]$model$tuning_instance$archive$data()
aft_nu_ggp3 <- ggplot(aft_nu_path3, aes(
x = nu,
y = surv.harrell_c, #, col = factor(family)
label = round(surv.harrell_c, 4)
)) +
geom_point(size = 3, color = "red") +
geom_line() +
geom_text_repel()
# arrange all the plots together
ggarrange(aft_nu_ggp1, aft_nu_ggp2, aft_nu_ggp3, common.legend = TRUE, legend = "bottom")
# conclusion: nu 0.2
## final learner with aft model
lrn_aft <- lrn("surv.mboost", baselearner = 'bols', family = "weibull", mstop = 65, nu = 0.2)
design_aft <- benchmark_grid(
tasks = tsks_train_d1,
learners = lrn_aft,
resampling = rsmp("cv", folds = 3L)
)
# define function to start benchmark with fixed seed
run_benchmark <- function(design_aft){
set.seed(2021)
bmr <- benchmark(design_aft, store_models = TRUE)
run_benchmark <- bmr
}
## run benchmark and save the results
aft_bmr <- run_benchmark(design_aft)
# set the global evaluation metric
all_measures <- msr("surv.cindex")
aft_results <- aft_bmr$aggregate(measures = all_measures)
aft_results$surv.harrell_c # 0.839
## --- load the learner for the mboost and design the benchmark
## Step 2_1: Try with a default setting
design <- benchmark_grid(
tasks = tsks_train_d1,
learners = lrn("surv.mboost", baselearner = 'bols', family = "coxph"), # cannot compute ‘bbs’ for non-numeric variables; used ‘bols’ instead.
resampling = rsmp("cv", folds = 3L)
)
# define function to start benchmark with fixed seed
run_benchmark <- function(design){
set.seed(2021)
bmr <- benchmark(design, store_models = TRUE)
run_benchmark <- bmr
}
## run benchmark and save the results
aft_bmr <- run_benchmark(design)
# set the measure
time = train_data_d1$time[train_data_d1$status == 1]
quantile = quantile(time, probs = 0.5)
# set the global evaluation metric
all_measures <- msr("surv.cindex")
aft_results <- aft_bmr$aggregate(measures = all_measures)
aft_results$surv.harrell_c ## 0.839
## Step_2_2: model tuning with the "baselearner" and "family"
# load the learner with aft
lrn_mb <- lrn("surv.mboost", baselearner = 'bols', family = "coxph")
# train with "baselearner"
base_learner_type <- c("bols", "btree")
family_type <- c("coxph", "cindex")
# set the search space
param_mb_bl <- ParamSet$new(params = list(
ParamFct$new("baselearner", levels = base_learner_type),
ParamFct$new("family", levels = family_type)
))
# inner resampling set
inner_rsmp <- rsmp("cv", folds = 4L)
# create the AutoTuner
mb_bl <- AutoTuner$new(
learner = lrn_mb, resampling = inner_rsmp,
measure = msr("surv.cindex"), search_space = param_mb_bl,
terminator = trm("none"), tuner = tnr("grid_search", resolution = 5)
)
# set the outer resampling
outer_rsmp <- rsmp("cv", folds = 3L)
# design the benchmark with bf
design_mb_bl <- benchmark_grid(
tasks = tsks_train_d1,
learners = mb_bl,
resamplings = outer_rsmp
)
bmr_mb_bl <- run_benchmark(design_mb_bl)
# aggregate to get results of model when tuning with bf
bmr_mb_bl_results <- bmr_mb_bl$aggregate(measures = msr("surv.cindex"))
# plot the corresponding performances
mb_bl_path1 <- bmr_mb_bl$data$learners()[[2]][[1]]$model$tuning_instance$archive$data()
mb_bl_ggp1 <- ggplot(mb_bl_path1, aes(
x = baselearner,
y = surv.harrell_c, col = factor(family),
label = round(surv.harrell_c, 4)
)) +
geom_point(size = 3, color = "red") +
geom_line() +
geom_text_repel()
mb_bl_path2 <- bmr_mb_bl$data$learners()[[2]][[2]]$model$tuning_instance$archive$data()
mb_bl_ggp2 <- ggplot(mb_bl_path2, aes(
x = baselearner,
y = surv.harrell_c, col = factor(family),
label = round(surv.harrell_c, 4)
)) +
geom_point(size = 3, color = "red") +
geom_line() +
geom_text_repel()
mb_bl_path3 <- bmr_mb_bl$data$learners()[[2]][[3]]$model$tuning_instance$archive$data()
mb_bl_ggp3 <- ggplot(mb_bl_path3, aes(
x = baselearner,
y = surv.harrell_c, col = factor(family),
label = round(surv.harrell_c, 4)
)) +
geom_point(size = 3, color = "red") +
geom_line() +
geom_text_repel()
# arrange all the plots together
ggarrange(mb_bl_ggp1, mb_bl_ggp2, mb_bl_ggp3, common.legend = TRUE, legend = "bottom")
# conclusion: go with "coxph", and "bols".
## Step_2_3_1: model tuning with the "mstop", early stopping,
# from 10 to 300(default), 100-300, 150-300, 150-200, 155-185
# 160-180
# load the learner with aft
lrn_aft <- lrn("surv.mboost", baselearner = 'bols', family = "coxph")
# set the search space
param_aft_mstop <- ParamSet$new(params = list(
ParamInt$new("mstop", lower = 160, upper = 180)
))
# create the AutoTuner
aft_mstop <- AutoTuner$new(
learner = lrn_aft, resampling = inner_rsmp,
measure = msr("surv.cindex"), search_space = param_aft_mstop,
terminator = trm("none"), tuner = tnr("grid_search", resolution = 10)
)
# design the benchmark with bf
design_aft_mstop <- benchmark_grid(
tasks = tsks_train_d1,
learners = aft_mstop,
resamplings = outer_rsmp
)
bmr_aft_mstop <- run_benchmark(design_aft_mstop)
# aggregate to get results of model when tuning with bf
bmr_aft_mstop_results <- bmr_aft_mstop$aggregate(measures = msr("surv.cindex"))
# plot the corresponding performances
aft_mstop_path1 <- bmr_aft_mstop$data$learners()[[2]][[1]]$model$tuning_instance$archive$data()
aft_mstop_ggp1 <- ggplot(aft_mstop_path1, aes(
x = mstop,
y = surv.harrell_c, #, col = factor(family)
label = round(surv.harrell_c, 4)
)) +
geom_point(size = 3, color = "red") +
geom_line() +
geom_text_repel()
aft_mstop_path2 <- bmr_aft_mstop$data$learners()[[2]][[2]]$model$tuning_instance$archive$data()
aft_mstop_ggp2 <- ggplot(aft_mstop_path2, aes(
x = mstop,
y = surv.harrell_c, #, col = factor(family)
label = round(surv.harrell_c, 4)
)) +
geom_point(size = 3, color = "red") +
geom_line() +
geom_text_repel()
aft_mstop_path3 <- bmr_aft_mstop$data$learners()[[2]][[3]]$model$tuning_instance$archive$data()
aft_mstop_ggp3 <- ggplot(aft_mstop_path3, aes(
x = mstop,
y = surv.harrell_c, #, col = factor(family)
label = round(surv.harrell_c, 4)
)) +
geom_point(size = 3, color = "red") +
geom_line() +
geom_text_repel()
# arrange all the plots together
ggarrange(aft_mstop_ggp1, aft_mstop_ggp2, aft_mstop_ggp3, common.legend = TRUE, legend = "bottom")
# conclusion: mstop = 164
## Step_1_5_1: model tuning with the "nu",
# from 0.1 to 1, 0.1-0.4, 0.15-0.3
# load the learner with aft
lrn_aft <- lrn("surv.mboost", baselearner = 'bols', family = "coxph", mstop = 164)
# set the search space
param_aft_nu <- ParamSet$new(params = list(
ParamDbl$new("nu", lower = 0.16, upper = 0.3)
))
# create the AutoTuner
aft_nu <- AutoTuner$new(
learner = lrn_aft, resampling = inner_rsmp,
measure = msr("surv.cindex"), search_space = param_aft_nu,
terminator = trm("none"), tuner = tnr("grid_search", resolution = 15)
)
# design the benchmark with bf
design_aft_nu <- benchmark_grid(
tasks = tsks_train_d1,
learners = aft_nu,
resamplings = outer_rsmp
)
bmr_aft_nu <- run_benchmark(design_aft_nu)
# aggregate to get results of model when tuning with bf
bmr_aft_nu_results <- bmr_aft_nu$aggregate(measures = msr("surv.cindex"))
# plot the corresponding performances
aft_nu_path1 <- bmr_aft_nu$data$learners()[[2]][[1]]$model$tuning_instance$archive$data()
aft_nu_ggp1 <- ggplot(aft_nu_path1, aes(
x = nu,
y = surv.harrell_c, #, col = factor(family)
label = round(surv.harrell_c, 4)
)) +
geom_point(size = 3, color = "red") +
geom_line() +
geom_text_repel()
aft_nu_path2 <- bmr_aft_nu$data$learners()[[2]][[2]]$model$tuning_instance$archive$data()
aft_nu_ggp2 <- ggplot(aft_nu_path2, aes(
x = nu,
y = surv.harrell_c, #, col = factor(family)
label = round(surv.harrell_c, 4)
)) +
geom_point(size = 3, color = "red") +
geom_line() +
geom_text_repel()
aft_nu_path3 <- bmr_aft_nu$data$learners()[[2]][[3]]$model$tuning_instance$archive$data()
aft_nu_ggp3 <- ggplot(aft_nu_path3, aes(
x = nu,
y = surv.harrell_c, #, col = factor(family)
label = round(surv.harrell_c, 4)
)) +
geom_point(size = 3, color = "red") +
geom_line() +
geom_text_repel()
# arrange all the plots together
ggarrange(aft_nu_ggp1, aft_nu_ggp2, aft_nu_ggp3, common.legend = TRUE, legend = "bottom")
# conclusion nu = 0.25
## final learner with aft model
lrn_aft <- lrn("surv.mboost", baselearner = 'bols', family = "coxph", mstop = 164, nu = 0.25)
design_aft <- benchmark_grid(
tasks = tsks_train_d1,
learners = lrn_aft,
resampling = rsmp("cv", folds = 3L)
)
# define function to start benchmark with fixed seed
run_benchmark <- function(design_aft){
set.seed(2021)
bmr <- benchmark(design_aft, store_models = TRUE)
run_benchmark <- bmr
}
## run benchmark and save the results
aft_bmr <- run_benchmark(design_aft)
# set the global evaluation metric
all_measures <- msr("surv.cindex")
aft_results <- aft_bmr$aggregate(measures = all_measures)
aft_results$surv.harrell_c # 0.8321
## ---- Step 3: Boosting with CoxBoost
## load the learner for the CoxBoost boosting and design the benchmark
# read in the raw data
train_data_original <- readRDS("Data-20210107/train_data.Rds")
test_data <- readRDS("Data-20210107/test_list_x.Rds")
## get the corresponding dataframe1
train_data_d1 <- train_data_original$d1
# only V4 needs to be one-hot-encoded
## Step 3_1: change the factor type to one-hot encoded
## Data preparation using one hot encoder
library("dataPreparation")
# Compute encoding
train_onehot_d1 <- train_data_d1
encoding <- build_encoding(train_onehot_d1, cols = c("V4"), verbose = TRUE)
# Apply one hot encoding
train_onehot_d1 <- one_hot_encoder(train_onehot_d1, encoding = encoding, drop = TRUE)
str(train_onehot_d1)
## create the corresponding task as for dataframe 1 after one-hot-encoding
tsks_oh_d1 <- TaskSurv$new("df1", backend = train_onehot_d1, time = "time", event = "status")
tsks_oh_d1
## Step 3_1: Try with a default setting
design <- benchmark_grid(
tasks = tsks_oh_d1,
learners = lrn("surv.coxboost", stepno=100, penalty=100, criterion="hpscore"),
resampling = rsmp("cv", folds = 3L)
)
# lrn("surv.coxboost")$param_set
# define function to start benchmark with fixed seed
run_benchmark <- function(design){
set.seed(2021)
bmr <- benchmark(design, store_models = TRUE)
run_benchmark <- bmr
}
## run benchmark and save the results
coxboost_bmr <- run_benchmark(design)
# set the global evaluation metric
all_measures <- msr("surv.cindex")
coxboost_results <- coxboost_bmr$aggregate(measures = all_measures)
coxboost_results # 0.827
# plot the corresponding the performance
autoplot(coxboost_bmr)
## Step_3_2: model tuning with the "stepno"
# load the learner with CoxBoost
lrn_cb <- lrn("surv.coxboost", criterion="hpscore")
# from 50-250, 100-300, 150-250
# set the search space
param_cb_stepno <- ParamSet$new(params = list(
ParamInt$new("stepno", lower = 150, upper = 250)
))
# inner resampling set
inner_rsmp <- rsmp("cv", folds = 4L)
# create the AutoTuner
cb_stepno <- AutoTuner$new(
learner = lrn_cb, resampling = inner_rsmp,
measure = msr("surv.cindex"), search_space = param_cb_stepno,
terminator = trm("none"), tuner = tnr("grid_search", resolution = 11)
)
# set the outer resampling
outer_rsmp <- rsmp("cv", folds = 3L)
# design the benchmark with stepno
design_cb_stepno <- benchmark_grid(
tasks = tsks_oh_d1,
learners = cb_stepno,
resamplings = outer_rsmp
)
bmr_cb_stepno <- run_benchmark(design_cb_stepno)
# aggregate to get results of model when tuning with bf
bmr_cb_stepno_results <- bmr_cb_stepno$aggregate(measures = msr("surv.cindex"))
# load install.packages("ggrepel")
# plot the corresponding performances
cb_stepno_path1 <- bmr_cb_stepno$data$learners()[[2]][[1]]$model$tuning_instance$archive$data()
cb_stepno_ggp1 <- ggplot(cb_stepno_path1, aes(
x = stepno,
y = surv.harrell_c, #, col = factor(family)
label = round(surv.harrell_c, 4)
)) +
geom_point(size = 3, color = "red") +
geom_line() +
geom_text_repel()
cb_stepno_path2 <- bmr_cb_stepno$data$learners()[[2]][[2]]$model$tuning_instance$archive$data()
cb_stepno_ggp2 <- ggplot(cb_stepno_path2, aes(
x = stepno,
y = surv.harrell_c, #, col = factor(family)
label = round(surv.harrell_c, 4)
)) +
geom_point(size = 3, color = "red") +
geom_line() +
geom_text_repel()
cb_stepno_path3 <- bmr_cb_stepno$data$learners()[[2]][[3]]$model$tuning_instance$archive$data()
cb_stepno_ggp3 <- ggplot(cb_stepno_path3, aes(
x = stepno,
y = surv.harrell_c, #, col = factor(family)
label = round(surv.harrell_c, 4)
)) +
geom_point(size = 3, color = "red") +
geom_line() +
geom_text_repel()
# arrange all the plots together
ggarrange(cb_stepno_ggp1, cb_stepno_ggp2, cb_stepno_ggp3, common.legend = TRUE, legend = "bottom")
# conclusion: set stepno to be 150
## ## Step_3_3: model tuning with the "penalty"
# load the learner with CoxBoost
lrn_cb <- lrn("surv.coxboost", criterion = "hpscore", stepno = 150)
# from 100-500, 450-500, 450-490, 450-480
# set the search space
param_cb_penal <- ParamSet$new(params = list(
ParamDbl$new("penalty", lower = 450, upper = 480)
))
# inner resampling set
inner_rsmp <- rsmp("cv", folds = 4L)
# create the AutoTuner
cb_penal <- AutoTuner$new(
learner = lrn_cb, resampling = inner_rsmp,
measure = msr("surv.cindex"), search_space = param_cb_penal,
terminator = trm("none"), tuner = tnr("grid_search", resolution = 7)
)
# set the outer resampling
outer_rsmp <- rsmp("cv", folds = 3L)
# design the benchmark with stepno
design_cb_penal <- benchmark_grid(
tasks = tsks_oh_d1,
learners = cb_penal,
resamplings = outer_rsmp
)
bmr_cb_penal <- run_benchmark(design_cb_penal)
# aggregate to get results of model when tuning with bf
bmr_cb_penal_results <- bmr_cb_penal$aggregate(measures = msr("surv.cindex"))
# load install.packages("ggrepel")
# plot the corresponding performances
cb_penal_path1 <- bmr_cb_penal$data$learners()[[2]][[1]]$model$tuning_instance$archive$data()
cb_penal_ggp1 <- ggplot(cb_penal_path1, aes(
x = penalty,
y = surv.harrell_c, #, col = factor(family)
label = round(surv.harrell_c, 4)
)) +
geom_point(size = 3, color = "red") +
geom_line() +
geom_text_repel()
cb_penal_path2 <- bmr_cb_penal$data$learners()[[2]][[2]]$model$tuning_instance$archive$data()
cb_penal_ggp2 <- ggplot(cb_penal_path2, aes(
x = penalty,
y = surv.harrell_c, #, col = factor(family)
label = round(surv.harrell_c, 4)
)) +
geom_point(size = 3, color = "red") +
geom_line() +
geom_text_repel()
cb_penal_path3 <- bmr_cb_penal$data$learners()[[2]][[3]]$model$tuning_instance$archive$data()
cb_penal_ggp3 <- ggplot(cb_penal_path3, aes(
x = penalty,
y = surv.harrell_c, #, col = factor(family)
label = round(surv.harrell_c, 4)
)) +
geom_point(size = 3, color = "red") +
geom_line() +
geom_text_repel()
# arrange all the plots together
ggarrange(cb_penal_ggp1, cb_penal_ggp2, cb_penal_ggp3, common.legend = TRUE, legend = "bottom")
# conclusion: go with penalty = 470
## ## Step_3_4: model tuning with the "stepsize.factor"
# load the learner with CoxBoost
lrn_cb <- lrn("surv.coxboost", criterion = "hpscore", stepno = 150, penalty = 470)
# from 0.1-1, 1-10
# set the search space
param_cb_sf <- ParamSet$new(params = list(
ParamDbl$new("stepsize.factor", lower = 0.1, upper = 1)
))
# inner resampling set
inner_rsmp <- rsmp("cv", folds = 4L)
# create the AutoTuner
cb_sf <- AutoTuner$new(
learner = lrn_cb, resampling = inner_rsmp,
measure = msr("surv.cindex"), search_space = param_cb_sf,
terminator = trm("none"), tuner = tnr("grid_search", resolution = 10)
)
# set the outer resampling
outer_rsmp <- rsmp("cv", folds = 3L)
# design the benchmark with stepno
design_cb_sf <- benchmark_grid(
tasks = tsks_oh_d1,
learners = cb_sf,
resamplings = outer_rsmp
)
bmr_cb_sf <- run_benchmark(design_cb_sf)
# aggregate to get results of model when tuning with bf
bmr_cb_sf_results <- bmr_cb_sf$aggregate(measures = msr("surv.cindex"))
# load install.packages("ggrepel")
# plot the corresponding performances
cb_sf_path1 <- bmr_cb_sf$data$learners()[[2]][[1]]$model$tuning_instance$archive$data()
cb_sf_ggp1 <- ggplot(cb_sf_path1, aes(
x = stepsize.factor,
y = surv.harrell_c, #, col = factor(family)
label = round(surv.harrell_c, 4)
)) +
geom_point(size = 3, color = "red") +
geom_line() +
geom_text_repel()
cb_sf_path2 <- bmr_cb_sf$data$learners()[[2]][[2]]$model$tuning_instance$archive$data()
cb_sf_ggp2 <- ggplot(cb_sf_path2, aes(
x = stepsize.factor,
y = surv.harrell_c, #, col = factor(family)
label = round(surv.harrell_c, 4)
)) +
geom_point(size = 3, color = "red") +
geom_line() +
geom_text_repel()
cb_sf_path3 <- bmr_cb_sf$data$learners()[[2]][[3]]$model$tuning_instance$archive$data()
cb_sf_ggp3 <- ggplot(cb_sf_path3, aes(
x = stepsize.factor,
y = surv.harrell_c, #, col = factor(family)
label = round(surv.harrell_c, 4)
)) +
geom_point(size = 3, color = "red") +
geom_line() +
geom_text_repel()
# arrange all the plots together
ggarrange(cb_sf_ggp1, cb_sf_ggp2, cb_sf_ggp3, common.legend = TRUE, legend = "bottom")
# conclusion: go with sf=1
## final learner with CoxBoost model
lrn_cb <- lrn("surv.coxboost", criterion = "hpscore", stepno = 150, penalty = 470, stepsize.factor = 1)
design_cb <- benchmark_grid(
tasks = tsks_oh_d1,
learners = lrn_cb,
resampling = rsmp("cv", folds = 3L)
)
# define function to start benchmark with fixed seed
run_benchmark <- function(design_cb){
set.seed(2021)
bmr <- benchmark(design_cb, store_models = TRUE)
run_benchmark <- bmr
}
## run benchmark and save the results
cb_bmr <- run_benchmark(design_cb)
# set the global evaluation metric
all_measures <- msr("surv.cindex")
cb_results <- cb_bmr$aggregate(measures = all_measures)
cb_results$surv.harrell_c # 0.839
|
7d32eb8a18d5c2935954c81dcbda785448ede369
|
1c4caaa37a9aaeac8c0fa73a417503d6ed05ef53
|
/inst/scripts/md_postprocessing.R
|
6d41776006f847a5aa77b663b2a2e26fb474cd7d
|
[] |
no_license
|
metamaden/recountmethylation_manuscript_supplement
|
08996bf66fa0ad920b35107105d27b94663fb275
|
4a254783b8933411a80032e33549ae94c1544ad6
|
refs/heads/master
| 2020-12-27T09:22:39.405834
| 2020-10-09T06:27:08
| 2020-10-09T06:27:08
| 237,849,928
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 46,329
|
r
|
md_postprocessing.R
|
#!/usr/bin/env/R
# Describes postprocessing from coerced GEO GSM metadata.
# Notes on rules for regex matching:
# 1. var states having phrases separated by spaces, to be separated by underscores
# 2. variable with more than one state have states separated by semicolons
# 3. all var states tolower (lowercase only)
# 4. auto-populate redundant variable states
# 5. add negative match check for disease status (e.g. excludes "non-cancer" from "cancer" search)
library(data.table)
load("md-preprocess.rda") # coerced, partially annotated metadata
load("mdmap-gsm_35k.rda") # MetaSRA-pipeline, mapped and predicted labels
ccf = fread('ccformat.txt', sep = ' ', header = T) # formatted Cellosaurus records
load("prepmd.rda") # storage procedure annotations
md = md.preprocess
mdpost = md[,c(1, 2, 3)]
nfn = "md-postprocess.rda" # new file name
mdpost$sampletype = mdpost$tissue = mdpost$disease = "NA"
mdpost$arrayid_full = paste0(md$array_id, "_", md$sentrix_id)
mdpost$basename = md$basename
#-----------------
# helper functions
#-----------------
get_pstr = function(v){
# get automatic regex patterns
# does progressive capitalization on values separated by spaces
# for each value in v, appends flanking '.*' (matches any char)
# for each value in v, appends "|" OR conditional separator
# nfilt: Boolean, adds negative statement filter.
rs = ""
for(ci in 1:length(v)){
c = v[ci]
if(ci == 1){
rs = paste0(".*", c, ".*")
} else{
rs = paste(c(rs, paste0(".*", c, ".*")), collapse = "|")
}
uv = unlist(strsplit(c, " ")) # num space-sep units
# for each unit use lower- and uppercase
uvstr = c(paste(uv, collapse = " "))
uvl = list(uv)
for(i in 1:length(uv)){
uvi = c()
for(ui in 1:length(uv)){
chari = uv[ui]
if(ui <= i){
if(nchar(chari)>1){
ssi = paste0(toupper(substr(chari, 1, 1)),
substr(chari, 2, nchar(chari)))
} else{
ssi = paste0(toupper(substr(chari, 1, 1)))
}
}
else{
ssi = chari
}
uvi = c(uvi, ssi)
}
uvl[[length(uvl)+1]] = uvi
}
# append to new str
for(si in 1:length(uvl)){
s = uvl[[si]]
if(length(uv) > 1){
if(!si==1){
# space sep
rs = paste(c(rs, paste0(".*", paste(s, collapse = " "), ".*")), collapse = "|")
}
# underline sep
rs = paste(c(rs, paste0(".*", paste(s, collapse = "_"), ".*")), collapse = "|")
# dash sep
rs = paste(c(rs, paste0(".*", paste(s, collapse = "-"), ".*")), collapse = "|")
} else{
if(!si==1){
rs = paste(c(rs, paste0(".*", s, ".*")), collapse = "|")
}
}
}
}
return(rs)
}
get_pstr_neg = function(pstr){
# pstr: output of get_pstr
# returns patterns for exclusion/negative lookup
pstrg = gsub("\\.\\*", "_", pstr); pstrg = gsub("\\|", "", pstrg)
uv = unlist(strsplit(pstrg, "_")); uv = uv[!uv==""]
for(ui in 1:length(uv)){
s = uv[ui]
if(ui == 1){
ns = paste(paste0(".*non-", s, ".*"), paste0(".*Non-", s, ".*"), paste0(".*non ", s, ".*"),
paste0(".*Non ", s, ".*"), paste0(".*not ", s, ".*"), paste0(".*Not ", s, ".*"),
paste0(".*", s, "-free.*"), paste0(".*", s, "-Free.*"), paste0(".*", s, " free.*"),
paste0(".*", s, " Free.*"), sep = "|")
} else{
ns = paste(ns, paste0(".*non-", s, ".*"), paste0(".*Non-", s, ".*"), paste0(".*non ", s, ".*"),
paste0(".*Non ", s, ".*"), paste0(".*not ", s, ".*"), paste0(".*Not ", s, ".*"),
paste0(".*", s, "-free.*"), paste0(".*", s, "-Free.*"), paste0(".*", s, " free.*"),
paste0(".*", s, " Free.*"), sep = "|")
}
}
return(ns)
}
get_filt = function(v, filtrel = "|",
varl = c("gsm_title", "sample_type", "disease_state", "anatomic_location", "misc"),
nfilt = FALSE, ntfilt = "", ptfilt = "", m = md){
# Returns vector of conditionals corresponding to pattern match across vars in m
# v: Reg. ex. output of get_pstr
# nfilt: String output from 'get_pstr_neg', specifying negative lookup/exclusions
# tfilt: Character vector, excludes records on term match from 'get_pstr'
if(!filtrel %in% c("|", "&")){
message("Please provide a valid filter relation symbol.")
return(NULL)
}
# positive match filter
if(ptfilt == ""){
filtl = grepl(v, m[,colnames(m)==varl[1]])
} else{
filtl = grepl(get_pst(ptfilt), m[,colnames(m)==varl[1]])
filtl = grepl(v, m[,colnames(m)==varl[1]])
}
# negative match filter
if(nfilt){
message("Using negative lookup/exclusion filter...")
nfiltv = get_pstr_neg(v)
filtl = filtl & !grepl(nfiltv, m[,colnames(m)==varl[1]])
}
# term filter
if(!ntfilt == ""){
message("Using term lookup filter...")
filtl = filtl & !grepl(get_pstr(ntfilt), m[,colnames(m)==varl[1]])
}
# proceed if additional vars specified
if(length(varl)>1){
for(vi in varl[2:length(varl)]){
if(filtrel == "|"){
filtl = filtl | grepl(v, m[,colnames(m)==vi])
if(nfilt){
filtl = filtl & !grepl(nfiltv, m[,colnames(m)==vi])
}
}
if(filtrel == "&"){
filtl = filtl | grepl(v, m[,colnames(m)==vi])
if(nfilt){
filtl = filtl & !grepl(nfiltv, m[,colnames(m)==vi])
}
}
}
}
return(filtl)
}
appendvar = function(var, val, filtv, m = mdpost){
# Returns the var in m with the appended value
# Replaces NA terms, append new terms, does not append repeated terms
varr = m[, colnames(m) == var]
# get composite filter
filti = !grepl(paste0("(^|;)", val, "(;|$)"), varr); compfilt = filti & filtv
# assess filter results
if(length(compfilt[compfilt]) == 0){
message("No unique values to append. Returning var unchanged.")
return(varr)
} else{
varr[compfilt] = ifelse(varr[compfilt] == "NA", val,
paste(varr[compfilt], val, sep = ";")
)
message("Appended n = ", length(varr[compfilt]), " values")
return(varr)
}
}
#---------------
# disease status
#---------------
# Note: term matching also excludes negative matches with 'nfilt = T'
# e.g. non-cancer, not cancer, Not cancer, cancer-free, etc.
whichvar = c("gsm_title", "tissuevar", "disease_state", "anatomic_location") # colnames of variables to search in md
{
# general terms
{
# chronic
ssv = c("chronic"); sfilt = get_pstr(ssv); dfilt = get_filt(sfilt, nfilt = T, varl = whichvar)
mdpost$disease = appendvar("disease", "chronic", dfilt)
# acute
ssv = c("acute"); sfilt = get_pstr(ssv); dfilt = get_filt(sfilt, nfilt = T, varl = whichvar)
mdpost$disease = appendvar("disease", "acute", dfilt)
# syndrome
ssv = c("syndrome"); sfilt = get_pstr(ssv); dfilt = get_filt(sfilt, nfilt = T, varl = whichvar)
mdpost$disease = appendvar("disease", "syndrome", dfilt)
# disorder
ssv = c("disorder"); sfilt = get_pstr(ssv); dfilt = get_filt(sfilt, nfilt = T, varl = whichvar)
mdpost$disease = appendvar("disease", "disorder", dfilt)
# inflammation
ssv = c("inflam"); sfilt = get_pstr(ssv); dfilt = get_filt(sfilt, nfilt = T, varl = whichvar)
mdpost$disease = appendvar("disease", "inflammation", dfilt)
# disorder
ssv = c("disorder"); sfilt = get_pstr(ssv); dfilt = get_filt(sfilt, nfilt = T, varl = whichvar)
mdpost$disease = appendvar("disease", "disorder", dfilt)
}
# cancer, major terms
ssv = c("cancer")
sfilt = get_pstr(ssv); dfilt = get_filt(sfilt, nfilt = T, varl = whichvar)
mdpost$disease = appendvar("disease", "cancer", dfilt)
# study groups, inc. control, case, healthy, etc.
{
# normal
ssv = c("case"); sfilt = get_pstr(ssv); dfilt = get_filt(sfilt, nfilt = T, varl = whichvar)
mdpost$disease = appendvar("disease", "case", dfilt)
# normal
ssv = c("normal"); sfilt = get_pstr(ssv); dfilt = get_filt(sfilt, nfilt = T, varl = whichvar)
mdpost$disease = appendvar("disease", "normal", dfilt)
# healthy
ssv = c("healthy"); sfilt = get_pstr(ssv); dfilt = get_filt(sfilt, nfilt = T, varl = whichvar)
mdpost$disease = appendvar("disease", "healthy", dfilt)
# control
ssv = c("control"); sfilt = get_pstr(ssv); dfilt = get_filt(sfilt, nfilt = T, varl = whichvar)
mdpost$disease = appendvar("disease", "control", dfilt)
}
# Psychiatric and neurodegenerative
{
# psychiatric disorder
ssv = c("alzheimer's", "alzheimers", "dementia", "anxi", "depression", "attention deficit", "ADHD")
sfilt = get_pstr(ssv); dfilt = get_filt(sfilt, nfilt = T, varl = whichvar)
mdpost$disease = appendvar("disease", "alzheimers", dfilt)
# alzheimer's
ssv = c("alzheimer's", "alzheimers"); sfilt = get_pstr(ssv); dfilt = get_filt(sfilt, nfilt = T, varl = whichvar)
mdpost$disease = appendvar("disease", "alzheimers", dfilt)
# dementia
ssv = c("dementia"); sfilt = get_pstr(ssv); dfilt = get_filt(sfilt, nfilt = T, varl = whichvar)
mdpost$disease = appendvar("disease", "dementia", dfilt)
# anxiety
ssv = c("anxi"); sfilt = get_pstr(ssv); dfilt = get_filt(sfilt, nfilt = T, varl = whichvar)
mdpost$disease = appendvar("disease", "anxiety", dfilt)
# depression
ssv = c("depression"); sfilt = get_pstr(ssv); dfilt = get_filt(sfilt, nfilt = T, varl = whichvar)
mdpost$disease = appendvar("disease", "depression", dfilt)
# attention deficit
ssv = c("attention deficit", "ADHD"); sfilt = get_pstr(ssv); dfilt = get_filt(sfilt, nfilt = T, varl = whichvar)
mdpost$disease = appendvar("disease", "attention_deficit", dfilt)
}
# Arthritis, inc. fibromyalgia, gout, etc.
{
# arthritis
ssv = c("arthritis", "rheumatoid", "psoriatic", "fibromyalgia", "gout");
sfilt = get_pstr(ssv); dfilt = get_filt(sfilt, nfilt = T, varl = whichvar)
mdpost$disease = appendvar("disease", "arthritis", dfilt)
# rheumatoid arthritis
ssv = c("rheumatoid arthritis"); sfilt = get_pstr(ssv); dfilt = get_filt(sfilt, nfilt = T, varl = whichvar)
mdpost$disease = appendvar("disease", "rhematoid_arthritis", dfilt)
# osteoarthritis
ssv = c("osteoarthritis")
sfilt = get_pstr(ssv); dfilt = get_filt(sfilt, nfilt = T, varl = whichvar)
mdpost$disease = appendvar("disease", "osteoarthritis", dfilt)
# psoriatic arthritis
ssv = c("psoriatic")
sfilt = get_pstr(ssv); dfilt = get_filt(sfilt, nfilt = T, varl = whichvar)
mdpost$disease = appendvar("disease", "psoriatic_arthritis", dfilt)
# fibromyalgia
ssv = c("fibromyalgia")
sfilt = get_pstr(ssv); dfilt = get_filt(sfilt, nfilt = T, varl = whichvar)
mdpost$disease = appendvar("disease", "fibromyalgia", dfilt)
# gout
ssv = c("gout")
sfilt = get_pstr(ssv); dfilt = get_filt(sfilt, nfilt = T, varl = whichvar)
mdpost$disease = appendvar("disease", "gout", dfilt)
}
# Chromosomal abnormalities
{
ssv = c("trisomy", "monosomy", "triploidy",
"chromosomal duplication", "chromosomal deletion",
"down syndrome", "cri du chat",
"balanced translocation", "unbalanced translocation",
"pallister killian", "ring chromosome",
"deletion syndrome", "klinefelter", "XXY", "turner", "mosaicism", "XXY")
sfilt = get_pstr(ssv); dfilt = get_filt(sfilt, nfilt = T, varl = whichvar)
mdpost$disease = appendvar("disease", "chromosomal_abnormality", dfilt)
# Down syndrome
ssv = c("down syndrome", "trisomy 21", "trisomy twentyone", "trisomy twenty one")
sfilt = get_pstr(ssv); dfilt = get_filt(sfilt, nfilt = T, varl = whichvar)
mdpost$disease = appendvar("disease", "down_syndrome", dfilt)
# Klinefelter syndrome
ssv = c("klinefelter", "XXY")
sfilt = get_pstr(ssv); dfilt = get_filt(sfilt, nfilt = T, varl = whichvar)
mdpost$disease = appendvar("disease", "klinefelter_syndrome", dfilt)
}
# Genetic disorder
{
ssv = c("fragile x", "cystic fibrosis", "duane", "polycystic", "chrons",
"hemophelia", "haemophelia", "hemochromatosis", "huntington's", "huntingtons",
"thalassemia", "tay sachs", "tay sach", "parkinson's", "parkinsons",
"sickle cell", "marfan")
sfilt = get_pstr(ssv); dfilt = get_filt(sfilt, nfilt = T, varl = whichvar)
mdpost$disease = appendvar("disease", "genetic_disorder", dfilt)
# Fragile X
ssv = c("fragile x")
sfilt = get_pstr(ssv); dfilt = get_filt(sfilt, nfilt = T, varl = whichvar)
mdpost$disease = appendvar("disease", "fragile_x", dfilt)
# Cystic Fibrosis
ssv = c("cystic fibrosis")
sfilt = get_pstr(ssv); dfilt = get_filt(sfilt, nfilt = T, varl = whichvar)
mdpost$disease = appendvar("disease", "cystic_fibrosis", dfilt)
# Thalassemia
ssv = c("thalassemia")
sfilt = get_pstr(ssv); dfilt = get_filt(sfilt, nfilt = T, varl = whichvar)
mdpost$disease = appendvar("disease", "thalassemia", dfilt)
# Tay Sachs
ssv = c("tay sachs", "tay sach")
sfilt = get_pstr(ssv); dfilt = get_filt(sfilt, nfilt = T, varl = whichvar)
mdpost$disease = appendvar("disease", "tay_sachs", dfilt)
# Parkinson's
ssv = c("parkinson's", "parkinsons")
sfilt = get_pstr(ssv); dfilt = get_filt(sfilt, nfilt = T, varl = whichvar)
mdpost$disease = appendvar("disease", "parkinsons_disease", dfilt)
# Huntington's
ssv = c("huntington's", "huntingtons")
sfilt = get_pstr(ssv); dfilt = get_filt(sfilt, nfilt = T, varl = whichvar)
mdpost$disease = appendvar("disease", "huntingtons_disease", dfilt)
# Sickle Cell Anemia
ssv = c("sickle cell")
sfilt = get_pstr(ssv); dfilt = get_filt(sfilt, nfilt = T, varl = whichvar)
mdpost$disease = appendvar("disease", "sickle_cell_anemia", dfilt)
# Marfan
ssv = c("marfan")
sfilt = get_pstr(ssv); dfilt = get_filt(sfilt, nfilt = T, varl = whichvar)
mdpost$disease = appendvar("disease", "marfan_syndrome", dfilt)
# Hemophelia
ssv = c("hemophelia", "haemophilia")
sfilt = get_pstr(ssv); dfilt = get_filt(sfilt, nfilt = T, varl = whichvar)
mdpost$disease = appendvar("disease", "hemophelia", dfilt)
}
# Other common conditions
{
# Anemia
ssv = c("anemi")
sfilt = get_pstr(ssv); dfilt = get_filt(sfilt, nfilt = T, varl = whichvar)
mdpost$disease = appendvar("disease", "anemia", dfilt)
# Atrophy
ssv = c("atroph")
sfilt = get_pstr(ssv); dfilt = get_filt(sfilt, nfilt = T, varl = whichvar)
mdpost$disease = appendvar("disease", "atrophy", dfilt)
# Scoliosis
ssv = c("scoliosis")
sfilt = get_pstr(ssv); dfilt = get_filt(sfilt, nfilt = T, varl = whichvar)
mdpost$disease = appendvar("disease", "scoliosis", dfilt)
# Obesity
ssv = c("obese"); sfilt = get_pstr(ssv); dfilt = get_filt(sfilt, nfilt = T, varl = whichvar)
mdpost$disease = appendvar("disease", "obese", dfilt)
# Asthma
ssv = c("asthma"); sfilt = get_pstr(ssv); dfilt = get_filt(sfilt, nfilt = T, varl = whichvar)
mdpost$disease = appendvar("disease", "asthma", dfilt)
}
}
save(mdpost, file = nfn)
#------------------------------------
# tissue and disease, cancer subtypes
#------------------------------------
# Note: borrows heavily from types studied in TCGA
# Note: sped up by excluding 'nfilt = T' (less likely issue for specific-subtype details)
whichvar = c("gsm_title", "tissuevar", "disease_state", "anatomic_location")
{
# Cancer tissue types
{
# tumor
ssv = c("tumor", "tumour")
mdpost$tissue = appendvar("tissue", "tumor", get_filt(get_pstr(ssv), nfilt = T, varl = whichvar))
mdpost$disease = appendvar("disease", "cancer", get_filt(get_pstr(ssv), varl = whichvar))
# metastasis
ssv = c("metasta")
mdpost$tissue = appendvar("tissue", "metastasis", get_filt(get_pstr(ssv), nfilt = T, varl = whichvar))
mdpost$disease = appendvar("disease", "cancer", get_filt(get_pstr(ssv), varl = whichvar))
# carcinoma
ssv = c("carcinoma")
mdpost$tissue = appendvar("tissue", "matched", get_filt(get_pstr(ssv), nfilt = T, varl = whichvar))
mdpost$disease = appendvar("disease", "cancer", get_filt(get_pstr(ssv), varl = whichvar))
# sarcoma
ssv = c("sarcoma")
mdpost$tissue = appendvar("tissue", "matched", get_filt(get_pstr(ssv), nfilt = T, varl = whichvar))
mdpost$disease = appendvar("disease", "cancer", get_filt(get_pstr(ssv), varl = whichvar))
# neoplasm
ssv = c("neoplas")
mdpost$tissue = appendvar("tissue", "matched", get_filt(get_pstr(ssv), nfilt = T, varl = whichvar))
mdpost$disease = appendvar("disease", "cancer", get_filt(get_pstr(ssv), varl = whichvar))
# adenocarcinoma
ssv = c("adenocarcinoma")
mdpost$tissue = appendvar("tissue", "matched", get_filt(get_pstr(ssv), nfilt = T, varl = whichvar))
mdpost$disease = appendvar("disease", "cancer", get_filt(get_pstr(ssv), varl = whichvar))
}
# Cancer, disease subtypes
{
ssv = c("mesothelioma")
mdpost$disease = appendvar("disease", "mesothelioma", get_filt(get_pstr(ssv), varl = whichvar))
mdpost$disease = appendvar("disease", "cancer", get_filt(get_pstr(ssv), varl = whichvar))
ssv = c("melanoma", "skin cancer")
mdpost$disease = appendvar("disease", "skin_cancer", get_filt(get_pstr(ssv), varl = whichvar))
mdpost$disease = appendvar("disease", "cancer", get_filt(get_pstr(ssv), varl = whichvar))
ssv = c("glioblastoma", "glioma", "astrocytoma", "brain cancer")
mdpost$disease = appendvar("disease", "brain_cancer", get_filt(get_pstr(ssv), varl = whichvar))
mdpost$disease = appendvar("disease", "cancer", get_filt(get_pstr(ssv), varl = whichvar))
ssv = c("breast lobular carcinoma", "breast ductal carcinoma", "breast cancer")
mdpost$disease = appendvar("disease", "breast_cancer", get_filt(get_pstr(ssv), varl = whichvar))
mdpost$disease = appendvar("disease", "cancer", get_filt(get_pstr(ssv), varl = whichvar))
ssv = c("colorectal adeno", "colon cancer", "colorectal cancer", "rectal cancer")
mdpost$disease = appendvar("disease", "colorectal_cancer", get_filt(get_pstr(ssv), varl = whichvar))
mdpost$disease = appendvar("disease", "cancer", get_filt(get_pstr(ssv), varl = whichvar))
ssv = c("stomach adeno", "stomach cancer", "gastric cancer", "gastric adeno")
mdpost$disease = appendvar("disease", "stomach_cancer", get_filt(get_pstr(ssv), varl = whichvar))
mdpost$disease = appendvar("disease", "cancer", get_filt(get_pstr(ssv), varl = whichvar))
ssv = c("esophageal carcinoma", "esophageal adeno", "esophageal squamous cell carcinoma","oesophageal carcinoma", "oesophageal adeno", "oesophageal squamous cell carcinoma", "esophageal cancer", "oesophageal cancer")
mdpost$disease = appendvar("disease", "esophageal_cancer", get_filt(get_pstr(ssv), varl = whichvar))
mdpost$disease = appendvar("disease", "cancer", get_filt(get_pstr(ssv), varl = whichvar))
ssv = c("paraganglioma", "nerve cancer", "nerve cell cancer")
mdpost$disease = appendvar("disease", "nerve_cell_cancer", get_filt(get_pstr(ssv), varl = whichvar))
mdpost$disease = appendvar("disease", "cancer", get_filt(get_pstr(ssv), varl = whichvar))
ssv = c("cholangiocarcinoma", "bile duct cancer")
mdpost$disease = appendvar("disease", "bile_duct_cancer", get_filt(get_pstr(ssv), varl = whichvar))
mdpost$disease = appendvar("disease", "cancer", get_filt(get_pstr(ssv), varl = whichvar))
ssv = c("ovarian serous carcinoma", "ovarian epithelial cancer", "ovarian cancer")
mdpost$disease = appendvar("disease", "ovarian_cancer", get_filt(get_pstr(ssv), varl = whichvar))
mdpost$disease = appendvar("disease", "cancer", get_filt(get_pstr(ssv), varl = whichvar))
ssv = c("uterine carcinosarcoma", "uterine corpus endometrial carcinoma", "endometrial carcinoma", "uterine cancer")
mdpost$disease = appendvar("disease", "uterine_cancer", get_filt(get_pstr(ssv), varl = whichvar))
mdpost$disease = appendvar("disease", "cancer", get_filt(get_pstr(ssv), varl = whichvar))
ssv = c("cervical squamous cell carcinoma", "cervical squamous cell adenocarcinoma", "cervical cancer")
mdpost$disease = appendvar("disease", "cervical_cancer", get_filt(get_pstr(ssv), varl = whichvar))
mdpost$disease = appendvar("disease", "cancer", get_filt(get_pstr(ssv), varl = whichvar))
ssv = c("head and neck squamous cell carcinoma", "head and neck cancer")
mdpost$disease = appendvar("disease", "head_and_neck_cancer", get_filt(get_pstr(ssv), varl = whichvar))
mdpost$disease = appendvar("disease", "cancer", get_filt(get_pstr(ssv), varl = whichvar))
ssv = c("thyroid carcinoma", "thyroid cancer")
mdpost$disease = appendvar("disease", "thyroid_cancer", get_filt(get_pstr(ssv), varl = whichvar))
mdpost$disease = appendvar("disease", "cancer", get_filt(get_pstr(ssv), varl = whichvar))
ssv = c("lung adenocarcinoma", "lung squamous cell carcinoma", "lung cancer")
mdpost$disease = appendvar("disease", "lung_cancer", get_filt(get_pstr(ssv), varl = whichvar))
mdpost$disease = appendvar("disease", "cancer", get_filt(get_pstr(ssv), varl = whichvar))
ssv = c("clear cell renal cell carcinoma", "chromophobe renal cell carcinoma", "renal cancer", "kidney papillary carcinoma", "kidney cancer")
mdpost$disease = appendvar("disease", "kidney_cancer", get_filt(get_pstr(ssv), varl = whichvar))
mdpost$disease = appendvar("disease", "cancer", get_filt(get_pstr(ssv), varl = whichvar))
ssv = c("invasive urothelial bladder cancer", "bladder cancer")
mdpost$disease = appendvar("disease", "bladder_cancer", get_filt(get_pstr(ssv), varl = whichvar))
mdpost$disease = appendvar("disease", "cancer", get_filt(get_pstr(ssv), varl = whichvar))
ssv = c("prostate adenocarcinoma", "prostate cancer")
mdpost$disease = appendvar("disease", "prostate_cancer", get_filt(get_pstr(ssv), varl = whichvar))
mdpost$disease = appendvar("disease", "cancer", get_filt(get_pstr(ssv), varl = whichvar))
ssv = c("adrenocortical carcinoma", "pheochromocytoma", "adrenal cancer", "adrenal gland cancer")
mdpost$disease = appendvar("disease", "adrenal_gland_cancer", get_filt(get_pstr(ssv), varl = whichvar))
mdpost$disease = appendvar("disease", "cancer", get_filt(get_pstr(ssv), varl = whichvar))
ssv = c("liver hepatocellular carcinoma", "hepatoblastoma", "cholangiocarcinoma", "liver angiosarcoma", "liver cancer")
mdpost$disease = appendvar("disease", "liver_cancer", get_filt(get_pstr(ssv), varl = whichvar))
mdpost$disease = appendvar("disease", "cancer", get_filt(get_pstr(ssv), varl = whichvar))
ssv = c("pancreatic ductal adenocarcinoma", "pancreatic cancer")
mdpost$disease = appendvar("disease", "pancreatic_cancer", get_filt(get_pstr(ssv), varl = whichvar))
mdpost$disease = appendvar("disease", "cancer", get_filt(get_pstr(ssv), varl = whichvar))
ssv = c("uveal melanoma", "uveal lymphoma", "intraocular cancer", "retinoblastoma", "retinal cancer")
mdpost$disease = appendvar("disease", "eye_cancer", get_filt(get_pstr(ssv), varl = whichvar))
mdpost$disease = appendvar("disease", "cancer", get_filt(get_pstr(ssv), varl = whichvar))
ssv = c("thymoma", "thymus cancer", "thymus gland cancer", "thymic cancer")
mdpost$disease = appendvar("disease", "thymus_gland_cancer", get_filt(get_pstr(ssv), varl = whichvar))
mdpost$disease = appendvar("disease", "cancer", get_filt(get_pstr(ssv), varl = whichvar))
ssv = c("testicular germ cell cancer", "testicular cancer")
mdpost$disease = appendvar("disease", "testicular_cancer", get_filt(get_pstr(ssv), varl = whichvar))
mdpost$disease = appendvar("disease", "cancer", get_filt(get_pstr(ssv), varl = whichvar))
}
# Leukemias and subtypes
{
# note, use sample type and anatomic loc vars only, avoid non-cancers from cancer patients
# leukemia
ssv <- c("leukemia", "chronic leuk", "chronic myelo",
"acute leuk", "acute lympho", "acute myel",
"cml", "CML", "aml", "AML", "ALL")
mdpost$disease = appendvar("disease", "leukemia", get_filt(get_pstr(ssv), nfilt = T, varl = whichvar))
mdpost$disease = appendvar("disease", "cancer", get_filt(get_pstr(ssv), nfilt = T, varl = whichvar))
# chronic leukemias
ssv <- c("chronic leuk", "chronic leuk", "chronic myelo", "chronic lympho", "cml", "cll", "CML", "CLL")
mdpost$disease = appendvar("disease", "chronic_leukemia", get_filt(get_pstr(ssv), nfilt = T, varl = whichvar))
# CML
ssv <- c("chronic myelo", "cml", "CML")
mdpost$disease = appendvar("disease", "chronic_myeloid_leukemia", get_filt(get_pstr(ssv), nfilt = T, varl = whichvar))
# CLL
ssv <- c("chronic lympho", "cll", "CLL")
mdpost$disease = appendvar("disease", "chronic_lymphoblastic_leukemia", get_filt(get_pstr(ssv), nfilt = T, varl = whichvar))
# acute leukemias
ssv <- c("acute leuk","acute lympho", "acute myel", "aml", "AML", "ALL")
mdpost$disease = appendvar("disease", "acute_leukemia", get_filt(get_pstr(ssv), nfilt = T, varl = whichvar))
# AML
ssv <- c("acute myel", "aml", "AML")
mdpost$disease = appendvar("disease", "acute_myeloid_leukemia", get_filt(get_pstr(ssv), nfilt = T, varl = whichvar))
# ALL
ssv <- c("acute lympho", "ALL")
mdpost$disease = appendvar("disease", "acute_lymphoblastic_leukemia", get_filt(get_pstr(ssv), nfilt = T, varl = whichvar))
}
}
save(mdpost, file = nfn)
#-------------------
# tissue annotations
#-------------------
{
# Tissue position, relation
{
# adjacent
ssv = c("adjacent")
mdpost$tissue = appendvar("tissue", "adjacent", get_filt(get_pstr(ssv), nfilt = T, varl = whichvar))
# matched
ssv = c("match")
mdpost$tissue = appendvar("tissue", "matched", get_filt(get_pstr(ssv), nfilt = T, varl = whichvar))
# proximal
ssv = c("proximal")
mdpost$tissue = appendvar("tissue", "proximal", get_filt(get_pstr(ssv), nfilt = T, varl = whichvar))
# distal
ssv = c("distal")
mdpost$tissue = appendvar("tissue", "distal", get_filt(get_pstr(ssv), nfilt = T, varl = whichvar))
}
# GI tract, esophagus to rectum
{
# esophageal
ssv = c("esophag", "oesophag")
mdpost$tissue = appendvar("tissue", "esophagus", get_filt(get_pstr(ssv)))
# stomach
ssv = c("stomach", "gastric")
mdpost$tissue = appendvar("tissue", "stomach", get_filt(get_pstr(ssv)))
# small intestine
ssv = c("small intestine", "small bowel"); filti = get_filt(get_pstr(ssv))
mdpost$tissue = appendvar("tissue", "small_intestine", filti)
mdpost$tissue = appendvar("tissue", "intestine", filti)
# colorectal
ssv <- c("colorect"); filti = get_filt(get_pstr(ssv))
mdpost$tissue = appendvar("tissue", "colorectal", filti)
mdpost$tissue = appendvar("tissue", "intestine", filti)
# colon
ssv <- c("colon", "colorec", "large intestine", "cecum"); filti = get_filt(get_pstr(ssv))
mdpost$tissue = appendvar("tissue", "colon", filti)
mdpost$tissue = appendvar("tissue", "intestine", filti)
# rectum
ssv = c("colorec", "rectal", "rectum", "anus")
mdpost$tissue = appendvar("tissue", "rectum", get_filt(get_pstr(ssv)))
}
# Respiratory system
{
# respiratory system
ssv = c("lung", "bronchi", "alveol", "interstiti", "pleura",
"trachea", "windpipe", "wind pipe", "bronchi", "airway")
mdpost$tissue = appendvar("tissue", "respiratory_system", get_filt(get_pstr(ssv)))
# lung
ssv = c("lung", "bronchi", "alveol", "interstiti", "pleura")
mdpost$tissue = appendvar("tissue", "lung", get_filt(get_pstr(ssv)))
# windpipe
ssv = c("trachea", "windpipe", "wind pipe", "bronchi", "airway")
mdpost$tissue = appendvar("tissue", "windpipe", get_filt(get_pstr(ssv)))
}
# Nervous system (excluding glial and neuronal categories)
{
# respiratory system
ssv = c("astrocyte", "oligodendrocyte", "ependymal",
"schwann", "satellite cell")
mdpost$tissue = appendvar("tissue", "nervous_system", get_filt(get_pstr(ssv)))
}
# Chest
ssv = c("thorax", "chest")
mdpost$tissue = appendvar("tissue", "chest", get_filt(get_pstr(ssv)))
# Kidney
ssv <- c("kidney","renal", "chromophobe", "glomerul", "podocyt", "henle", "glomeruli",
"nephron", "nephrit", "calyx")
mdpost$tissue = appendvar("tissue", "kidney", get_filt(get_pstr(ssv)))
# Liver
ssv <- c("liver", "hepato", "kupff")
mdpost$tissue = appendvar("tissue", "liver", get_filt(get_pstr(ssv)))
# Bladder
ssv <- c("bladder", "urothel")
mdpost$tissue = appendvar("tissue", "bladder", get_filt(get_pstr(ssv)))
# Brain and brain region
{
ssv <- c("brain", "cerebel", "dorsolat", "medull", "lobe", "prefront", "occipital",
"falx", "meningeal", "supratentorial", "fossa", "sellar",
"grey matter", "gray matter", "white matter")
mdpost$tissue = appendvar("tissue", "brain", get_filt(get_pstr(ssv)))
# cerebellum
ssv <- c("cerebel")
mdpost$tissue = appendvar("tissue", "cerebellum", get_filt(get_pstr(ssv)))
# prefrontal lobe
ssv <- c("prefront")
mdpost$tissue = appendvar("tissue", "prefrontal_lobe", get_filt(get_pstr(ssv)))
# grey matter
ssv <- c("grey matter", "gray matter")
mdpost$tissue = appendvar("tissue", "gray_matter", get_filt(get_pstr(ssv)))
# white matter
ssv <- c("white matter")
mdpost$tissue = appendvar("tissue", "white_matter", get_filt(get_pstr(ssv)))
}
# Embryonic, prenatal tissues
{
# placenta
ssv = c("chorion", "villus", "placent")
mdpost$tissue = appendvar("tissue", "placenta", get_filt(get_pstr(ssv)))
# umbilical
ssv = c("umbilical", "cord")
mdpost$tissue = appendvar("tissue", "umbilical_cord", get_filt(get_pstr(ssv)))
}
# Gametes, gonads, sex-specific, reproductive, inc. cancers
{
# uterus
ssv <- c("uterus", "uteri", "endometr")
mdpost$tissue = appendvar("tissue", "uterus", get_filt(get_pstr(ssv)))
# cervix
ssv <- c("cervix")
mdpost$tissue = appendvar("tissue", "cervix", get_filt(get_pstr(ssv)))
# ovary
ssv <- c("ovary", "ovari")
mdpost$tissue = appendvar("tissue", "ovary", get_filt(get_pstr(ssv)))
# vagina
ssv <- c("vagin")
mdpost$tissue = appendvar("tissue", "vagina", get_filt(get_pstr(ssv)))
# labia
ssv <- c("labia")
mdpost$tissue = appendvar("tissue", "labia", get_filt(get_pstr(ssv)))
# fallopian tube
ssv <- c("fallop")
mdpost$tissue = appendvar("tissue", "fallopian_tube", get_filt(get_pstr(ssv)))
# penis
ssv <- c("penis", "penile")
mdpost$tissue = appendvar("tissue", "penis", get_filt(get_pstr(ssv)))
# scrotum
ssv <- c("scrotum")
mdpost$tissue = appendvar("tissue", "scrotum", get_filt(get_pstr(ssv)))
# epididymus
ssv <- c("epididym")
mdpost$tissue = appendvar("tissue", "epididymis", get_filt(get_pstr(ssv)))
# vas deferens
ssv <- c("vas deferens")
mdpost$tissue = appendvar("tissue", "vas_deferens", get_filt(get_pstr(ssv)))
# seminal vesicle
ssv <- c("seminal vesicle")
mdpost$tissue = appendvar("tissue", "seminal_vesicle", get_filt(get_pstr(ssv)))
# prostate
ssv <- c("prostate")
mdpost$tissue = appendvar("tissue", "prostate", get_filt(get_pstr(ssv)))
# testes
ssv <- c("testic", "teste")
mdpost$tissue = appendvar("tissue", "testic", get_filt(get_pstr(ssv)))
# urethra
ssv <- c("urethra")
mdpost$tissue = appendvar("tissue", "urethra", get_filt(get_pstr(ssv)))
# egg
ssv <- c("egg")
mdpost$tissue = appendvar("tissue", "egg", get_filt(get_pstr(ssv)))
# sperm
ssv <- c("sperm", "spermat")
mdpost$tissue = appendvar("tissue", "sperm", get_filt(get_pstr(ssv)))
}
# Neck, inc. thyroid
{
# neck
ssv = c("neck", "thyroid")
mdpost$tissue = appendvar("tissue", "neck", get_filt(get_pstr(ssv)))
# thyroid gland
ssv = c("thyroid")
mdpost$tissue = appendvar("tissue", "thyroid_gland", get_filt(get_pstr(ssv)))
}
# Eye/optical
{
# eye
ssv = c("eye", "uvea", "optic nerve", "cone", "rod", "retin")
mdpost$tissue = appendvar("tissue", "eye", get_filt(get_pstr(ssv)))
# optic nerve
ssv = c("optic nerve")
mdpost$tissue = appendvar("tissue", "optic_nerve", get_filt(get_pstr(ssv)))
# cone
ssv = c("cone")
mdpost$tissue = appendvar("tissue", "cone", get_filt(get_pstr(ssv)))
# rod
ssv = c("rod")
mdpost$tissue = appendvar("tissue", "rod", get_filt(get_pstr(ssv)))
# cancers
ssv = c("uveal cancer", "uveal melanoma", "retinoblastoma")
mdpost$tissue = appendvar("tissue", "cancer", get_filt(get_pstr(ssv)))
}
# endocrine glands, inc. pancreas and cancers
{
# endocrine
ssv = c("endocrine", "pineal", "pituitary", "pancreas", "pancreat", "adren", "thyroid", "hypothalamus",
"adrenal cortex", "adreno", "paraganglioma", "paraganglioma", "pheochromocytoma",
"zona", "glomerulosa", "fasciculata", "reticularis",
"ovary", "ovari", "testic", "teste")
mdpost$tissue = appendvar("tissue", "endocrine_system", get_filt(get_pstr(ssv)))
# pancreas
ssv = c("pancreas", "pancreat")
mdpost$tissue = appendvar("tissue", "pancreas", get_filt(get_pstr(ssv)))
}
# Skin
{
# skin
ssv <- c("skin", "epidermis", "keratinocyt")
mdpost$tissue = appendvar("tissue", "skin", get_filt(get_pstr(ssv)))
# keratinocyte
ssv <- c("keratinocyt")
mdpost$tissue = appendvar("tissue", "keratinocyte", get_filt(get_pstr(ssv)))
}
# Breast and adipose
{
ssv = c("breast")
mdpost$tissue = appendvar("tissue", "breast", get_filt(get_pstr(ssv)))
# adipose
ssv = c("adip", "fat")
mdpost$tissue = appendvar("tissue", "adipose", get_filt(get_pstr(ssv)))
}
# Lymphatic, inc. spleen and thymus
{
# lymphatic
ssv = c("lymph", "spleen", "thymus")
mdpost$tissue = appendvar("tissue", "lymphatic_system", get_filt(get_pstr(ssv)))
# thymus
ssv = c("thymus")
mdpost$tissue = appendvar("tissue", "thymus", get_filt(get_pstr(ssv)))
# spleen
ssv = c("spleen")
mdpost$tissue = appendvar("tissue", "spleen", get_filt(get_pstr(ssv)))
}
# Blood, inc. primary cells
{
# blood
ssv = c("blood", "hematopoiet", "haematopoiet",
"lymphoid", "myeloid", "natural killer", "nk", "NK",
"erythrocyte", "mast", "myeloblast", "plasma",
"monocyte", "lymphocyte", "eosinophil", "neutrophil", "basophil",
"macrophage", "megakaryocyte", "thrombocyte",
"wbc", "WBC", "rbc", "RBC",
"bcell", "b cell", "tcell", "t cell",
"cd4", "cd5", "cd8", "cytotoxic", "helper")
mdpost$tissue = appendvar("tissue", "blood", get_filt(get_pstr(ssv)))
# whole blood
ssv = c("whole blood")
mdpost$tissue = appendvar("tissue", "whole_blood", get_filt(get_pstr(ssv)))
# peripheral blood
ssv = c("peripheral blood")
mdpost$tissue = appendvar("tissue", "peripheral_blood", get_filt(get_pstr(ssv)))
# cord blood
ssv = c("cord blood")
mdpost$tissue = appendvar("tissue", "cord_blood", get_filt(get_pstr(ssv)))
# blood spot
ssv = c("blood spot")
mdpost$tissue = appendvar("tissue", "blood_spot", get_filt(get_pstr(ssv)))
# white blood cells
ssv = c("wbc", "WBC", "white blood cell",
"monocyte", "lymphocyte", "eosinophil", "neutrophil", "basophil",
"bcell", "b cell", "tcell", "t cell",
"cd4", "cd5", "cd8", "cytotoxic", "helper")
mdpost$tissue = appendvar("tissue", "white_blood_cell", get_filt(get_pstr(ssv)))
# Tcells
ssv = c("tcell", "t cell", "cd4", "cd5", "cd8", "cytotoxic", "helper")
mdpost$tissue = appendvar("tissue", "t_cell", get_filt(get_pstr(ssv)))
}
# Oral, inc. throat
{
# oral
ssv = c("oral", "buccal", "labial", "tongue", "lingual", "throat", "masticatory")
mdpost$tissue = appendvar("tissue", "oral", get_filt(get_pstr(ssv)))
# buccal
ssv = c("buccal")
mdpost$tissue = appendvar("tissue", "buccal", get_filt(get_pstr(ssv)))
# throat
ssv = c("buccal")
mdpost$tissue = appendvar("tissue", "throat", get_filt(get_pstr(ssv)))
# tongue
ssv = c("tongue")
mdpost$tissue = appendvar("tissue", "tongue", get_filt(get_pstr(ssv)))
}
# Nasal
ssv = c("nasal", "septum")
mdpost$tissue = appendvar("tissue", "nasal", get_filt(get_pstr(ssv)))
# Cell types, tissue layers, organ substructures, etc.
{
# neurons
ssv <- c("neur", "nerve", "dendrite", "axon")
mdpost$tissue = appendvar("tissue", "neuron", get_filt(get_pstr(ssv)))
# glial cells
ssv <- c("glia")
mdpost$tissue = appendvar("tissue", "glia", get_filt(get_pstr(ssv)))
# epithelial
ssv = c("epithel")
mdpost$tissue = appendvar("tissue", "epithelial", get_filt(get_pstr(ssv)))
# endothelium
ssv = c("endothel")
mdpost$tissue = appendvar("tissue", "endothelium", get_filt(get_pstr(ssv)))
# stem cells
ssv = c("stem cell", "pluripot", "ipsc", "iPSC")
mdpost$tissue = appendvar("tissue", "stem_cell", get_filt(get_pstr(ssv)))
# fibroblast
ssv = c("fibroblast")
mdpost$tissue = appendvar("tissue", "fibroblast", get_filt(get_pstr(ssv)))
# primary cells
ssv = c("primary cells")
mdpost$tissue = appendvar("tissue", "primary_cells", get_filt(get_pstr(ssv)))
# crypt
ssv = c("crypt")
mdpost$tissue = appendvar("tissue", "crypt", get_filt(get_pstr(ssv)))
# muscularis
ssv = c("muscularis")
mdpost$tissue = appendvar("tissue", "muscularis", get_filt(get_pstr(ssv)))
# lamina propria
ssv = c("lamina propria", "lamina_propria")
mdpost$tissue = appendvar("tissue", "lamina_propria", get_filt(get_pstr(ssv)))
# squamous
ssv = c("squamous")
mdpost$tissue = appendvar("tissue", "squamous", get_filt(get_pstr(ssv)))
# ectoderm
ssv = c("ectoderm")
mdpost$tissue = appendvar("tissue", "ectoderm", get_filt(get_pstr(ssv)))
# endoderm
ssv = c("endoderm")
mdpost$tissue = appendvar("tissue", "endoderm", get_filt(get_pstr(ssv)))
# mesoderm
ssv = c("mesoderm")
mdpost$tissue = appendvar("tissue", "mesoderm", get_filt(get_pstr(ssv)))
# melanocyte
ssv = c("melanocyte")
mdpost$tissue = appendvar("tissue", "melanocyte", get_filt(get_pstr(ssv)))
# mucosa
ssv = c("mucosa")
mdpost$tissue = appendvar("tissue", "mucosa", get_filt(get_pstr(ssv)))
# subcutaneous
ssv = c("subcutaneous")
mdpost$tissue = appendvar("tissue", "subcutaneous", get_filt(get_pstr(ssv)))
# visceral
ssv = c("visceral")
mdpost$tissue = appendvar("tissue", "visceral", get_filt(get_pstr(ssv)))
}
}
save(mdpost, file = nfn)
#-----------------------------------
# sample type, metasra-pipeline pred
#-----------------------------------
# append high-confidence meta-sra pipeline sample type predictions
{
gsmid = mdmap$gsmid
stype = gsub("'", "",
gsub(";.*", "",
gsub("^.*'sample type':", "", mdmap$msrap_flatjson)))
spred = as.numeric(gsub("'", "",
gsub(";.*", "",
gsub("^.*'sample-type confidence':", "", mdmap$msrap_flatjson))))
dfmd = data.frame(gsm = gsmid,
type = stype,
pred = round(as.numeric(spred), digits = 3),
stringsAsFactors = F)
dfmd = dfmd[dfmd$gsm %in% mdpost$gsm,]
dfmd = rbind(dfmd, data.frame(gsm = mdpost[!mdpost$gsm %in% dfmd$gsm,]$gsm,
type = rep("NA", nrow(mdpost[!mdpost$gsm %in% dfmd$gsm,])),
pred = rep("NA", nrow(mdpost[!mdpost$gsm %in% dfmd$gsm,])),
stringsAsFactors = F))
dfmd = dfmd[order(match(dfmd$gsm, mdpost$gsm)),]
identical(dfmd$gsm, mdpost$gsm)
mdpost$sampletype = paste(paste("msraptype", dfmd$type, sep =":"),
paste("msrapconf", dfmd$pred, sep =":"),
sep = ";")
}
save(mdpost, file = nfn)
#----------------------------------------
# sample type, cellosaurus cell line info
#----------------------------------------
# Note: references files described/generated from 'md_cell-lines.R'
# Note: include the mined cell line name with tag "namem:"
{
# get the most common cell line types from 'misc'
mdst = rep("NA", nrow(md))
miscdat = md$misc;
cll = ifelse(grepl(".*cell_line.*", miscdat) & !grepl(".*cell_line:NA.*", miscdat),
gsub("(;|$).*", "", gsub("(^|;).*cell_line: ", "", miscdat)),
"NA")
cll = cll[!cll=="NA"]
cllf = cll[cll %in% ccf$ID]
# length(cllf) # [1] 1062
# filt cellosaurus
ccff = ccf[ccf$ID %in% cllf,]
ccff$CA = tolower(substr(ccff$CA, 4, nchar(ccff$CA))) # fix group
# assign info
for(r in 1:nrow(ccff)){
dati = as.character(ccff[r,])
cf = get_filt(get_pstr(dati[1]))
if(length(cf[cf]) > 0){
mdpost$sampletype = appendvar("sampletype", "cell_line", cf)
mdpost$sampletype = appendvar("sampletype", paste0("ccid:", dati[1]), cf)
mdpost$sampletype = appendvar("sampletype", paste0("ccacc:", dati[2]), cf)
mdpost$sampletype = appendvar("sampletype", paste0("cccat:", dati[5]), cf)
}
message(r)
}
}
save(mdpost, file = nfn)
#----------------
# age, inc. units
#----------------
{
# mine age info from mdpre
xt = as.data.frame(table(unlist(strsplit(md$misc,";")))); xt = xt[rev(order(xt[,2])),]
aiterms = xt[grepl(".*age_info.*", xt[,1]), 1]
agedat = md$age
# format mined age
{
af = md$age # original value
aqvar = "age_info"
af = gsub("\\..*", "", gsub(" ", "", gsub(aqvar, "", af))) # rm units, spaces, decimals
af = ifelse(nchar(af) > 2 | nchar(af) == 0, "NA", af) # filter invalid entries
}
# age units
{
miscdat = md$misc; mdst = rep("", nrow(md))
# get mined ids and filt na
whichaa = which(grepl(".*age_info.*", miscdat))
# filt terms
ayr = c("year", "yr", "y")
ady = "day"
amo = c("month")
awk = c("week", "wk")
aqvar = get_pstr(c(ayr, ady, amo, awk))
aqyr = get_pstr(ayr); aqdy = get_pstr(ady); aqmo = get_pstr(amo); aqwk = get_pstr(awk)
for(i in whichaa){
aii = "NA"
aui = "NA"
sai = gsub(";.*", "", gsub(".*(|^;)age_info:", "", miscdat[i]))
if(grepl(aqvar, sai)){
auval = ifelse(grepl(aqyr, sai), "years",
ifelse(grepl(aqdy, sai), "days",
ifelse(grepl(aqmo, sai), "months",
ifelse(grepl(aqwk, sai), "weeks", "NA"))))
} else{
sai = agedat[i]
if(grepl(aqvar, sai)){
auval = ifelse(grepl(aqyr, sai), "years",
ifelse(grepl(aqdy, sai), "days",
ifelse(grepl(aqmo, sai), "months",
ifelse(grepl(aqwk, sai), "weeks", "NA"))))
}
}
aui = paste0("unitm:", auval)
mdst[i] = aui
message(i)
}
}
# add mined info as 'infom'
{
vstr = 'infom:'
iad = "adult"; ife = c("fetal", "foetal"); ipe = "pediatric"; ipr = "prepubescent"
iadq = get_pstr(iad); ifeq = get_pstr(ife); ipeq = get_pstr(ipe); iprq = get_pstr(ipr)
infoq = get_pstr(c(iad, ife, ipe, ipr))
whichinfo = which(grepl(infoq, agedat)) # 1068
mdsi = rep("", length(agedat))
for(i in whichinfo){
di = agedat[i]
mdsi[i] = ifelse(grepl(iadq, di), paste0(vstr, "adult"),
ifelse(grepl(ifeq, di), paste0(vstr, "fetal"),
ifelse(grepl(ipeq, di), paste0(vstr, "pediatric"),
ifelse(grepl(ipr, di), paste0(vstr, "prepubescent"), ""))))
}
}
# export for predage inference
{
mdage = mdpost
mdage$age = paste0("valm:", gsub(" ", "", af))
mdage$age = ifelse(!mdst=="", paste(mdage$age, mdst, sep = ";"), mdage$age)
mdage$age = ifelse(!mdsi == "", paste(mdage$age, mdsi, sep = ";"), mdage$age)
mdage$predage = md$predage
}
save(mdage, file = "mdage.rda")
}
# make final age var
# notes on tags:
# unitm: mined unit
# infom: mined info
# valm: mined value
# unitp: ref-based predicted unit from predage/horvath est.
mdpost$age = mdage$age
mdpost$predage = round(as.numeric(as.character(md$predage)), digits = 2)
save(mdpost, file = nfn)
#----------------
# Sex and predsex
#----------------
{
mdpost$sex = "NA"
mdpost$disease = appendvar("disease",
"chromosomal_abnormality",
get_filt(get_pstr(c("XXX", "XXY")),
varl = "gender"))
ssv = c("klinefelter", "XXY")
sfilt = get_pstr(ssv); dfilt = get_filt(sfilt, varl = "gender", nfilt = T)
mdpost$disease = appendvar("disease", "klinefelter_syndrome", dfilt)
mdpost$sex = ifelse(grepl(get_pstr(c("female", "f", "FEMALE")), md$gender), "F",
ifelse(grepl(get_pstr(c("male", "MALE", "m")), md$gender),
"M", "NA"))
mdpost$predsex = md$predsex
}
save(mdpost, file = nfn)
#----------------
# Cell comp. pred
#----------------
cncellcomp = colnames(md)[grepl(".*predcell.*", colnames(md))]
for(clp in cncellcomp){
cn = colnames(mdpost)
mdpost = cbind(mdpost, round(as.numeric(md[,clp]), digits = 2))
colnames(mdpost) = c(cn, clp)
}
save(mdpost, file = nfn)
#--------------------------------------
# Preparation, inc. Fresh Frozen, FFPE
#--------------------------------------
# from prepd object
{
mdpost$storage = "NA"
prepd$storage = prepd$preparation
prepd$storage = ifelse(!grepl(".*FFPE.*", prepd$storage),
"F;frozen",
"FFPE;formalin_fixed_paraffin_embedded")
prepd = prepd[,c("gsm", "storage")]
mf = mdpost[,c("gsm", "storage")]
prepd = rbind(prepd, mf[!mf$gsm %in% prepd$gsm,])
prepd = prepd[order(match(prepd$gsm, mf$gsm)),]
identical(prepd$gsm, mf$gsm)
mdpost$storage = prepd$storage
}
save(mdpost, file = nfn)
#-----
# Save
#-----
save(mdpost, file = nfn)
write.csv(mdpost, file = paste0(substr(nfn, 1, nchar(nfn)-4), ".csv"))
|
666fd32c652d0c3b8cd3a46a7533eb66ad124618
|
410a46ae079aca50fdb9b81242cca0668869e153
|
/R/exporta100primeros.R
|
2b16efbacb2cbc4eaa0053e8666f8e310a115e02
|
[] |
no_license
|
amanosalva/r-utils
|
2e25e9bdc0f4da73faac88643afd3c07da062352
|
051cc9bb804b4f46e16aa0cb1e63d3670b69a3bf
|
refs/heads/master
| 2021-09-03T07:27:22.496533
| 2018-01-07T02:00:07
| 2018-01-07T02:00:07
| 114,258,530
| 1
| 0
| null | null | null | null |
WINDOWS-1250
|
R
| false
| false
| 768
|
r
|
exporta100primeros.R
|
#' @title Exporta head de un dataframe
#' @description Función que exporta un .csv con los 100 primeros registros de un dataframe en el path indicado
#' @param dataset Dataframe
#' @param path Lugar donde se exportará el archivo
#' @details Esta función fue creada principalmente para facilitar el trabajo de exploración de datos.
#' @return Retorna un dataframe con los 100 primeros registros del dataset
#' @examples
#' dataset <- V4DATOFIN_v1
#' path <- "C:/Byte/Machine Learning/Predictivo Port Out - Oficial/Dataset/v.1/V4DATOFIN_v1_HEAD.csv"
#' view <- exporta100primeros(dataset, path)
#' @export
exporta100primeros <- function(dataset, path){
dataset_head <- as.data.frame(head(dataset, n=100L))
write.csv(dataset_head,path)
return(dataset_head)
}
|
f58ecd6c841a429ede1307d698dfd0adbd2782b1
|
b021f3022f1a2e3a243400374a2134df5b170ede
|
/man/tripadvisor_places.Rd
|
c8ad824f0a7f5b97b87f1eeee32ef596c7278f63
|
[] |
no_license
|
cran/sgat
|
6ca1c369e06d58842cb9bd4634913f255779241d
|
7089a23463138d99c4be97c0740c6d7c00cb7073
|
refs/heads/master
| 2023-03-12T01:00:43.059494
| 2021-02-26T08:00:02
| 2021-02-26T08:00:02
| 342,620,503
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 786
|
rd
|
tripadvisor_places.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tripadvisor_places.R
\name{tripadvisor_places}
\alias{tripadvisor_places}
\title{Search for the n most popular places in a city according to Tripadvisor}
\usage{
tripadvisor_places(ciudad, n.resultados = Inf)
}
\arguments{
\item{ciudad}{City or area where you want the places' information}
\item{n.resultados}{Maximum number of results to retrieve. If not specified, all results will be retrieved}
}
\value{
Character vector with the names of the most popular places of the searched city or area according to Tripadvisor
}
\description{
Search for the n most popular places in a city according to Tripadvisor
}
\examples{
\dontrun{
tripadvisor_places("Pinamar, Argentina", 10)
}
}
|
ca10c4c041a96a1b766ccc2af5fb9cd39723348f
|
be031ab4888373eaab2bff677c414335231380e7
|
/man/replaceNaDS.Rd
|
f4e4fe8a49daa542b0ba2c3ef70e5f82d4fff386
|
[] |
no_license
|
ginberg/dsBase
|
3957d9183e3a3eb5c621452cea7b5da3d276195d
|
bfacea02bc5d05a101c4c5c674b56689770324f1
|
refs/heads/master
| 2020-05-07T18:11:15.355150
| 2015-03-30T13:31:45
| 2015-03-30T13:31:45
| 180,757,909
| 0
| 1
| null | 2019-04-11T09:21:35
| 2019-04-11T09:21:35
| null |
UTF-8
|
R
| false
| false
| 994
|
rd
|
replaceNaDS.Rd
|
\name{replaceNaDS}
\alias{replaceNaDS}
\title{Replaces the missing values in a vector}
\usage{
replaceNaDS(xvect, replacements)
}
\arguments{
\item{xvect}{a character, the name of the vector to
process.}
\item{replacements}{a vector which contains the
replacement value(s), a vector one or more values for
each study.}
}
\value{
a new vector without missing values
}
\description{
This function identifies missing values and replaces them
by a value or values specified by the analyst.
}
\details{
This function is used when the analyst prefer or requires
complete vectors. It is then possible the specify one value
for each missing value by first returning the number of
missing values using the function \code{numNaDS} but in
most cases it might be more sensible to replace all missing
values by one specific value e.g. replace all missing
values in a vector by the mean or median value. Once the
missing values have been replaced a new vector is created.
}
\author{
Gaye, A.
}
|
b27b89132f2b379ca035d8c53938e9b1cc08a36b
|
3cc53f8e991301aa9b051d1bd95af9b0086a45b1
|
/man/blblm-package.Rd
|
8d424ea6f0b6d3a782b605b12f1e1bf06a4e911d
|
[
"MIT"
] |
permissive
|
ahui187/blblm
|
a876f5747e9aed912e63e5662af660e57ec63f91
|
628cf1e448b6d6336b9adb41b8465c4f6d56e70a
|
refs/heads/master
| 2022-10-12T16:53:51.882938
| 2020-06-09T02:42:20
| 2020-06-09T02:42:20
| 269,492,717
| 0
| 0
| null | 2020-06-05T00:20:06
| 2020-06-05T00:20:05
| null |
UTF-8
|
R
| false
| true
| 672
|
rd
|
blblm-package.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/blblm.R
\docType{package}
\name{blblm-package}
\alias{blblm}
\alias{blblm-package}
\title{blblm: Bag of Little Bootstraps}
\description{
The data is first separated into m subsamples. Then for each subsample,
we resample until we have the size of the original data r times, find the bootstap
statistic(linear regression in this case), and compute the desired statistic(linear regression
coefficients and residual standard deviation) from the bootstrap statistics. Then the average of
the m statistics are averaged.
}
\details{
Linear Regression with Little Bag of Bootstraps
}
|
fb15a51543abe40c28bfff75033f586873b1b4a0
|
31e0e9951808a594a16b2c91b0d67ad8db8ca070
|
/scripts/chapter_5.R
|
460b566a43c711b43517a3fdef88bb695a88bc7b
|
[] |
no_license
|
DrJohan/R
|
50c9c81de5d868de183477363ae63c2a687e48b9
|
813762488f622ff8127426616946618d5359600f
|
refs/heads/master
| 2020-03-29T08:26:43.844179
| 2016-05-13T11:21:24
| 2016-05-13T11:21:24
| null | 0
| 0
| null | null | null | null |
ISO-8859-1
|
R
| false
| false
| 9,685
|
r
|
chapter_5.R
|
######################################################################
# Biostatistiques et analyse des données de santé avec R
# chapter_5.tex
# R version 3.2.3 (2015-12-10) -- "Wooden Christmas-Tree"
# Christophe Lalanne, Mounir Mesbah
######################################################################
######################################################################
## Statistiques descriptives
######################################################################
> summary(birthwt[,c("bwt","lwt")])
> summary(birthwt[,c(10,3)])
> summary(subset(birthwt, select=c(bwt, lwt)))
> sapply(birthwt[,c("bwt","lwt")], summary)
> apply(birthwt[,c(10,3)], 2, summary)
bwt lwt
Min. 709 80.0
1st Qu. 2414 110.0
Median 2977 121.0
Mean 2945 129.8
3rd Qu. 3487 140.0
Max. 4990 250.0
> histogram(~ bwt + lwt, data=birthwt, breaks=NULL, xlab="",
scales=list(x=list(relation="free")))
######################################################################
## Diagramme de dispersion et courbe loess
######################################################################
> birthwt$lwt <- birthwt$lwt/2.2
> xyplot(bwt ~ lwt, data=birthwt, type=c("p","g","smooth"))
######################################################################
## Mesures d'association paramétrique et non paramétrique
######################################################################
> with(birthwt, cor(bwt, lwt))
[1] 0.1857333
> with(birthwt, cor(bwt, lwt, method="spearman"))
[1] 0.2488882
> cor(birthwt[,c("age","lwt","bwt")])
age lwt bwt
age 1.00000000 0.1800732 0.09031781
lwt 0.18007315 1.0000000 0.18573328
bwt 0.09031781 0.1857333 1.00000000
######################################################################
## Estimation par intervalles et test d'inférence
######################################################################
> with(birthwt, cor.test(bwt, lwt))
Pearson's product-moment correlation
data: bwt and lwt
t = 2.5848, df = 187, p-value = 0.0105
alternative hypothesis: true correlation is not equal to 0
95 percent confidence interval:
0.04417405 0.31998094
sample estimates:
cor
0.1857333
> cor.test(~ bwt + lwt, data=birthwt, subset=bwt > 2500)
Pearson's product-moment correlation
data: bwt and lwt
t = 1.6616, df = 128, p-value = 0.09905
alternative hypothesis: true correlation is not equal to 0
95 percent confidence interval:
-0.02757158 0.30974090
sample estimates:
cor
0.1453043
######################################################################
## Droite de régression
######################################################################
> lm(bwt ~ lwt, data=birthwt)
Call:
lm(formula = bwt ~ lwt, data = birthwt)
Coefficients:
(Intercept) lwt
2369.624 9.744
> r <- lm(bwt ~ lwt, data=birthwt)
> summary(r)
Call:
lm(formula = bwt ~ lwt, data = birthwt)
Residuals:
Min 1Q Median 3Q Max
-2192.12 -497.97 -3.84 508.32 2075.60
Coefficients:
Estimate Std. Error t value Pr(>|t|)
(Intercept) 2369.624 228.493 10.371 <2e-16 ***
lwt 9.744 3.770 2.585 0.0105 *
---
Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
Residual standard error: 718.4 on 187 degrees of freedom
Multiple R-squared: 0.0345, Adjusted R-squared: 0.02933
F-statistic: 6.681 on 1 and 187 DF, p-value: 0.0105
> xyplot(bwt ~ lwt, data=birthwt, type=c("p","g","r"))
######################################################################
## Estimation par intervalles et tableau d'analyse de variance
######################################################################
> confint(r)
2.5 % 97.5 %
(Intercept) 1918.867879 2820.37916
lwt 2.307459 17.18061
> res <- cbind(coef(r), confint(r))
> colnames(res)[1] <- "Coef"
> round(res, 3)
Coef 2.5 % 97.5 %
(Intercept) 2369.624 1918.868 2820.379
lwt 9.744 2.307 17.181
> anova(r)
Analysis of Variance Table
Response: bwt
Df Sum Sq Mean Sq F value Pr(>F)
lwt 1 3448639 3448639 6.6814 0.0105 *
Residuals 187 96521017 516155
---
Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
######################################################################
## Prédictions à partir du modèle de régression
######################################################################
> head(fitted(r))
85 86 87 88 89 91
3175.721 3056.135 2834.680 2847.967 2843.538 2918.833
> head(cbind(birthwt$bwt, fitted(r)))
[,1] [,2]
85 2523 3175.721
86 2551 3056.135
87 2557 2834.680
88 2594 2847.967
89 2600 2843.538
91 2622 2918.833
> xyplot(bwt + fitted(r) ~ lwt, data=birthwt)
> dp <- data.frame(lwt=seq(35, 120, by=5))
> bwtpred <- predict(r, newdata=dp)
> dp$bwt <- bwtpred
> head(dp)
lwt bwt
1 35 2710.665
2 40 2759.385
3 45 2808.105
4 50 2856.825
5 55 2905.546
6 60 2954.266
######################################################################
## Diagnostic du modèle et analyse des résidus
######################################################################
> head(resid(r))
85 86 87 88 89 91
-652.7211 -505.1352 -277.6798 -253.9671 -243.5380 -296.8329
> head(birthwt$bwt - fitted(r))
85 86 87 88 89 91
-652.7211 -505.1352 -277.6798 -253.9671 -243.5380 -296.8329
> xyplot(resid(r) ~ fitted(r), type=c("p","g"))
######################################################################
## Lien avec l'ANOVA
######################################################################
> lm(bwt ~ race, data=birthwt)
Call:
lm(formula = bwt ~ race, data = birthwt)
Coefficients:
(Intercept) raceBlack raceOther
3102.7 -383.0 -297.4
> bwtmeans <- with(birthwt, tapply(bwt, race, mean))
> bwtmeans
White Black Other
3102.719 2719.692 2805.284
> bwtmeans[2:3] - bwtmeans[1]
Black Other
-383.0264 -297.4352
> lm(bwt ~ race - 1, data=birthwt)
Call:
lm(formula = bwt ~ race - 1, data = birthwt)
Coefficients:
raceWhite raceBlack raceOther
3103 2720 2805
> anova(lm(bwt ~ race, data=birthwt))
Analysis of Variance Table
Response: bwt
Df Sum Sq Mean Sq F value Pr(>F)
race 2 5015725 2507863 4.9125 0.008336 **
Residuals 186 94953931 510505
---
Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
######################################################################
## Régression linéaire multiple
######################################################################
> lm(bwt ~ scale(lwt, scale=FALSE) + ftv, data=birthwt)
######################################################################
## Applications
######################################################################
> cystic <- read.table("cystic.dat", header=TRUE)
> str(cystic)
> summary(cystic)
> cystic$Sex <- factor(cystic$Sex, labels=c("M","F"))
> table(cystic$Sex)
> with(cystic, cor(PEmax, Weight))
> with(cystic, cor.test(PEmax, Weight))
> splom(cystic[,-c(1,3)], varname.cex=0.7, cex=.8)
> round(cor(cystic[,-3]), 3)
> round(cor(cystic[,-3], method="spearman"), 3)
> library(ppcor)
> with(cystic, pcor.test(PEmax, Weight, Age))
> cystic$Age.ter <- cut(cystic$Age,
breaks=quantile(cystic$Age,
c(0,0.33,0.66,1)),
include.lowest=TRUE)
> cystic2 <- subset(cystic, as.numeric(Age.ter) %in% c(1,3))
> cystic2$Age.ter <- factor(cystic2$Age.ter)
> xyplot(PEmax ~ Weight, data=cystic2, groups=Age.ter,
auto.key=list(corner=c(0,1)))
> fram <- read.csv("data/Framingham.csv")
> head(fram)
> str(fram)
> table(fram$sex)
> fram$sex <- factor(fram$sex, labels=c("M","F"))
> apply(fram, 2, function(x) sum(is.na(x)))
> with(fram, table(sex[!is.na(bmi)]))
> xyplot(sbp ~ bmi | sex, data=fram, type=c("p","g"), alpha=0.5,
cex=0.7, pch=19)
> with(subset(fram, sex=="M"), cor(sbp, bmi, use="pair"))
> with(subset(fram, sex=="F"), cor(sbp, bmi, use="pair"))
> library(psych)
> r.test(n=2047, r12=0.23644, n2=2643, r34=0.37362)
> library(gridExtra)
> p1 <- histogram(~ bmi, data=fram)
> p2 <- histogram(~ log(bmi), data=fram)
> p3 <- histogram(~ sbp, data=fram)
> p4 <- histogram(~ log(sbp), data=fram)
> grid.arrange(p1, p2, p3, p4)
> reg.resM <- lm(log(sbp) ~ log(bmi), data=fram, subset=sex=="M")
> reg.resF <- lm(log(sbp) ~ log(bmi), data=fram, subset=sex=="F")
> summary(reg.resM) # Hommes
> confint(reg.resM)
> summary(reg.resF) # Femmes
> confint(reg.resF)
> res <- data.frame(pente=c(coef(reg.resM)[2],
coef(reg.resF)[2]),
rbind(confint(reg.resM)[2,],
confint(reg.resF)[2,]))
> rownames(res) <- c("M","F")
> colnames(res)[2:3] <- c("2.5 %", "97.5 %")
> round(res, 3)
> options(contrasts=c("contr.sum", "contr.poly"))
> xyplot(bwt ~ lwt | race, data=birthwt, layout=c(3,1),
type=c("p","g"), aspect=0.8)
> xyplot(bwt ~ lwt, data=birthwt, groups=race)
> reg.res <- lm(bwt ~ scale(lwt, scale=FALSE), data=birthwt)
> summary(reg.res)
> reg.res2 <- lm(bwt ~ race, data=birthwt)
> summary(reg.res2)
> aov.res <- aov(bwt ~ race, data=birthwt)
> summary(aov.res)
> summary.lm(aov.res)
> anova(reg.res2)
> grp.means <- with(birthwt, tapply(bwt, race, mean))
> grp.means[2:3] - grp.means[1] # modèle de régression reg.res2
> m <- lm(bwt ~ lwt, data=birthwt)
> d <- data.frame(lwt=60)
> predict(m, newdata=d, interval="confidence")
|
4597fd2f1de56b78c737b64d144f8dd551568656
|
6a42708adb6688ffce02234a55bf3ced346b4d0f
|
/tests/testthat/test-encoding.R
|
2334591287c40715e7e963bb7b73ef3fd48cf9db
|
[
"MIT"
] |
permissive
|
r-lib/keyring
|
3e9bf3739fbde6ec66cdfbe42db64fd633bcfbdc
|
83bf966826c617cf49222946b39015e64f4e1c64
|
refs/heads/main
| 2023-08-07T14:36:05.672103
| 2023-07-20T08:52:55
| 2023-07-20T08:52:55
| 80,223,347
| 162
| 32
|
NOASSERTION
| 2023-07-20T08:52:56
| 2017-01-27T16:18:55
|
R
|
UTF-8
|
R
| false
| false
| 4,100
|
r
|
test-encoding.R
|
context("Testing encoding retrieval function")
test_that("No option/env var set returns auto", {
skip_if_not_win()
withr::local_options(keyring.encoding_windows = NULL)
withr::local_envvar(KEYRING_ENCODING_WINDOWS = NA_character_)
encoding <- get_encoding_opt()
expect_equal(encoding, "auto")
})
test_that("Option encoding set and env unset returns option encoding", {
skip_if_not_win()
withr::local_options(keyring.encoding_windows = "UTF-16LE")
withr::local_envvar(KEYRING_ENCODING_WINDOWS = NA_character_)
encoding <- get_encoding_opt()
expect_equal(encoding, "UTF-16LE")
})
test_that("Option encoding unset and env set returns env encoding", {
skip_if_not_win()
withr::local_options(keyring.encoding_windows = NULL)
withr::local_envvar("KEYRING_ENCODING_WINDOWS" = "UTF-8")
encoding <- get_encoding_opt()
expect_equal(encoding, "UTF-8")
})
test_that("Option encoding set and env var set and EQUAL returns expected value", {
skip_if_not_win()
withr::local_options(keyring.encoding_windows = "UTF-16LE")
withr::local_envvar("KEYRING_ENCODING_WINDOWS" = "UTF-16LE")
encoding <- get_encoding_opt()
expect_equal(encoding, "UTF-16LE")
})
test_that("Invalid encoding (not in iconvlist) returns error", {
skip_if_not_win()
withr::local_options(keyring.encoding_windows = "doesnotexist")
withr::local_envvar("KEYRING_ENCODING_WINDOWS" = "doesnotexist")
expect_error(get_encoding_opt())
})
test_that("iconv suggestion works as expected", {
skip_if_not_win()
withr::local_options(keyring.encoding_windows = "UTF-16LP")
withr::local_envvar("KEYRING_ENCODING_WINDOWS" = NA_character_)
expect_error(
get_encoding_opt(),
"Encoding not found in iconvlist(). Did you mean UTF-16LE?",
fixed = TRUE
)
})
test_that("Option has precedence", {
skip_if_not_win()
withr::local_options(keyring.encoding_windows = iconvlist()[1])
withr::local_envvar("KEYRING_ENCODING_WINDOWS" = iconvlist()[2])
expect_identical(get_encoding_opt(), iconvlist()[1])
})
test_that("Set key with UTF-16LE encoding", {
skip_if_not_win()
skip_on_cran()
service <- random_service()
user <- random_username()
pass <- random_password()
# Now, set a key with UTF-16LE encoding using new options
withr::local_options(keyring.encoding_windows = NULL)
withr::local_envvar("KEYRING_ENCODING_WINDOWS" = "UTF-16LE")
keyring::key_set_with_value(service = service, username = user, password = pass)
# Get the password
expect_equal(keyring::key_get(service = service, username = user), pass)
# Show that it is UTF-16LE
raw_password <- keyring:::b_wincred_i_get(target = paste0(":", service, ":", user))
expect_equal(iconv(list(raw_password), from = "UTF-16LE", to = ""), pass)
key_delete(service = service, username = user)
})
test_that("Set key with UTF-16LE encoding plus a keyring", {
skip_if_not_win()
skip_on_cran()
withr::local_options(keyring.encoding_windows = NULL)
withr::local_envvar("KEYRING_ENCODING_WINDOWS" = "UTF-16LE")
keyring <- random_keyring()
kb <- backend_wincred$new(keyring = keyring)
kb$.__enclos_env__$private$keyring_create_direct(keyring, "secret123!")
expect_true(keyring %in% kb$keyring_list()$keyring)
list <- kb$list()
expect_equal(nrow(list), 0)
service <- random_service()
username <- random_username()
password <- random_password()
expect_silent(
kb$set_with_value(service, username, password)
)
expect_equal(kb$get(service, username), password)
expect_silent(kb$delete(service, username))
expect_silent(kb$keyring_delete(keyring = keyring))
expect_false(keyring %in% kb$keyring_list()$keyring)
})
test_that("marked UTF-8 strings work", {
skip_if_not_win()
skip_on_cran()
withr::local_options(keyring.encoding_windows = "UTF-8")
service <- random_service()
user <- random_username()
pass <- "this is ok: \u00bc"
keyring::key_set_with_value(service = service, username = user, password = pass)
# Get the password
expect_equal(keyring::key_get(service = service, username = user), pass)
key_delete(service = service, username = user)
})
|
aed0093c6fc34443882ddf2cc4212f5dc736fe9f
|
7a5d49b43c5d6f9a8131e9e38178d39ecc068081
|
/R/ExportFehler.R
|
db87377e893e92e036e6a4616199aada322ebe4c
|
[] |
no_license
|
statistikat/mzR
|
85ac986612990e6fd1dfd337a657a90ba271fd4e
|
dca98366ffa9032c0e926abe5579a1d1053424c7
|
refs/heads/master
| 2023-08-16T06:05:36.632937
| 2023-08-07T10:46:01
| 2023-08-07T10:46:01
| 57,034,956
| 2
| 0
| null | 2019-06-04T13:52:18
| 2016-04-25T11:03:05
|
R
|
UTF-8
|
R
| false
| false
| 4,004
|
r
|
ExportFehler.R
|
#' @export
#' @rdname ExportFehler
export <- function(x,outFilePath=getwd(),outFileName=NULL)
UseMethod("export")
#' Export der Schaetz-und Fehlerrechnungsergebnisse.
#'
#' Funktion exportiert die Ergebnisse der Schaetz-und Fehlerrechnungsfunktionen
#' in ein .csv-File.
#'
#'
#' @aliases export.mzR export
#' @param x Ergebnis von GroupSize, GroupRate, Total oder Mean.
#' @param outFilePath Pfad, wo die CSV-Datei gespeichert werden soll. Default
#' ist das aktuelle Work Directory.
#' @param outFileName Name der CSV-Datei in die exportiert werden soll.
#' @return Ein .csv-File mit den Ergebnissen wird in \code{out_file_pfad}
#' abgespeichert.
#' @seealso
#' \code{\link{ImportData},\link{IndivImportData},\link{GetLabels},\link{GroupSize},\link{GroupRate},\link{Total},\link{Mean}}
#' @export
#' @rdname ExportFehler
#' @examples
#' # Daten laden (oder Daten einlesen mit ImportData() bzw. IndivImportData())
#' data(mzTestData)
#'
#' # Arbeitslosenzahlen (Absolutwerte)
#' ergebnis <- GroupSize(mzTestData,TFstring="xerwstat==2&balt>=15&balt<=74")
#' \dontrun{
#' # Arbeitslosenzahlen (Absolutwerte) ins Working Directory (zu erfragen mit getwd()) exportieren
#' export(ergebnis)
#' # Erwerbsstatus nach Bundesland ins Working Directory exportieren
#' export(GroupSize(dat,each="xerwstat+xnuts2"))
#' }
#'
export.mzR <- function(x,outFilePath=getwd(),outFileName=NULL){
attr(x,"thousands_separator") <- FALSE
digits <- attr(x,"digits")
each <- attr(x,"each")
ergType <- attr(x,"ergType")
TFstring <- attr(x,"TFstring")
TFstring2 <- attr(x,"TFstring2")
ergVar <- attr(x,"var")
if(isTRUE(TFstring)){
TFstring <- "Gesamtbev\u00F6lkerung"
}
if(is.null(each)){
date <- paste(unlist(x[grep("date",names(x),value=TRUE)]),collapse=" ")
}else{
date <- paste(unlist(x[[1]][grep("date",names(x[[1]]),value=TRUE)]),collapse=" ")
}
cat("\nNachfolgende Ergebnisse werden exportiert...\n")
if(ergType=="GroupSize" | ergType=="Total"){
cat("\nAbsolutwerte f\u00FCr ",TFstring,":\n\n",sep="")
if(!is.null(ergVar)){
est_Info <- c("est_Info:",paste("Absolutwerte",ergVar),TFstring,date)
}else{
est_Info <- c("est_Info:","Absolutwerte",TFstring,date)
}
}
if(ergType=="GroupRate"){
cat("\nProzentwerte f\u00FCr ",TFstring," im Z\u00E4hler \n",sep="")
if(!is.null(TFstring2)){
cat("und ",TFstring2," im Nenner:\n\n",sep="")
est_Info <- c("est_Info:","Prozentwerte",paste("Z\u00E4hler:",TFstring),paste("Nenner:",TFstring2),date)
}else{
cat("und der Gesamtbev\u00F6lkerung im Nenner:\n\n",sep="")
est_Info <- c("est_Info:","Prozentwerte",paste("Z\u00E4hler:",TFstring),paste("Nenner:","Gesamtbev\u00F6lkerung"),date)
}
}
if(ergType=="Mean"){
cat("\nMittelwerte f\u00FCr ",TFstring,":\n\n",sep="")
est_Info <- c(TFstring,rep("",4))
if(!is.null(ergVar)){
est_Info <- c("est_Info:",paste("Mittelwerte",ergVar),TFstring,date)
}else{
est_Info <- c("est_Info:","Mittelwerte",TFstring,date)
}
}
if(is.null(each)){
x <- as.matrix(print(x))
}else{
x <- as.matrix(do.call(rbind,print(x)))
if(nchar(date)<10){
rownames(x) <- paste(rownames(x),date,sep=".")
}
}
est_Info_matrix <- matrix(NA,length(est_Info)+1,ncol(x))
rownames(est_Info_matrix) <- c("",est_Info)
x <- rbind(x,est_Info_matrix)
if(is.null(outFileName)){
if(ergType=="GroupSize" | ergType=="Total")
outFileName <- paste0("Fehlerrechnung Absolutwerte ",date)
if(ergType=="GroupRate")
outFileName <- paste0("Fehlerrechnung Prozentwerte ",date)
if(ergType=="Mean")
outFileName <- paste0("Fehlerrechnung Mittelwerte ",date)
}
write.csv2(x,paste0(outFilePath,"/",outFileName,".csv"),na="",fileEncoding="Latin1")
cat("\n'",paste0(outFileName,".csv'"), " wurde in ", outFilePath," abgespeichert.\n",sep="")
}
|
5ce4fcb43e7a3507de5513e76006c2f0678399e0
|
93a2c39a2aad7d772d7367fb271784b2bb380a0c
|
/R/multinomial.glm.R
|
010429246b24d8e72f1cc20152b4e22db06a7d2f
|
[] |
no_license
|
duncanplee/CARBayes
|
5741455e02985c1590144e9a3ac0331c2d1b7a55
|
a856f72c9ca5000e34c66f7d492e02a63fc98cfe
|
refs/heads/master
| 2022-09-20T02:05:36.835476
| 2021-09-30T07:17:26
| 2021-09-30T07:17:26
| 147,368,109
| 14
| 6
| null | 2022-09-06T14:29:23
| 2018-09-04T15:31:18
|
R
|
UTF-8
|
R
| false
| false
| 11,173
|
r
|
multinomial.glm.R
|
multinomial.glm <- function(formula, data=NULL, trials, burnin, n.sample, thin=1, prior.mean.beta=NULL, prior.var.beta=NULL, verbose=TRUE)
{
##############################################
#### Format the arguments and check for errors
##############################################
#### Verbose
a <- common.verbose(verbose)
#### Frame object
frame.results <- common.frame(formula, data, "multinomial")
K <- frame.results$n
p <- frame.results$p
X <- frame.results$X
X.standardised <- frame.results$X.standardised
X.sd <- frame.results$X.sd
X.mean <- frame.results$X.mean
X.indicator <- frame.results$X.indicator
offset <- frame.results$offset
Y <- frame.results$Y
which.miss <- frame.results$which.miss
n.miss <- frame.results$n.miss
J <- ncol(Y)
N.all <- K * J
#### If only one element in Y is missing then fix it as we know the total number of trials
which.miss.row <- J-apply(which.miss,1,sum)
which.miss.1 <- which(which.miss.row==1)
if(length(length(which.miss.1))>0)
{
for(r in 1:length(which.miss.1))
{
which.miss[which.miss.1[r], is.na(Y[which.miss.1[r], ])] <- 1
Y[which.miss.1[r], is.na(Y[which.miss.1[r], ])] <- trials[which.miss.1[r]] - sum(Y[which.miss.1[r], ], na.rm=T)
}
n.miss <- sum(is.na(Y))
which.miss.row <- J-apply(which.miss,1,sum)
}else
{}
Y.DA <- Y
const.like <- lfactorial(trials[which.miss.row==0]) - apply(lfactorial(Y[which.miss.row==0, ]),1,sum)
K.present <- sum(which.miss.row==0)
#### Determine which rows have missing values
if(n.miss>0) which.miss.row2 <- which(which.miss.row>0)
#### Check and format the trials argument
if(sum(is.na(trials))>0) stop("the numbers of trials has missing 'NA' values.", call.=FALSE)
if(!is.numeric(trials)) stop("the numbers of trials has non-numeric values.", call.=FALSE)
int.check <- K-sum(ceiling(trials)==floor(trials))
if(int.check > 0) stop("the numbers of trials has non-integer values.", call.=FALSE)
if(min(trials)<=0) stop("the numbers of trials has zero or negative values.", call.=FALSE)
diffs <- apply(Y, 1, sum, na.rm=T) - trials
if(max(diffs)>0) stop("the response variable has larger values that the numbers of trials.", call.=FALSE)
#### Priors
if(is.null(prior.mean.beta)) prior.mean.beta <- rep(0, p)
if(is.null(prior.var.beta)) prior.var.beta <- rep(100000, p)
common.prior.beta.check(prior.mean.beta, prior.var.beta, p)
#### Compute the blocking structure for beta
block.temp <- common.betablock(p, 5)
beta.beg <- block.temp[[1]]
beta.fin <- block.temp[[2]]
n.beta.block <- block.temp[[3]]
list.block <- as.list(rep(NA, n.beta.block*2))
for(r in 1:n.beta.block)
{
list.block[[r]] <- beta.beg[r]:beta.fin[r]-1
list.block[[r+n.beta.block]] <- length(list.block[[r]])
}
#### MCMC quantities - burnin, n.sample, thin
common.burnin.nsample.thin.check(burnin, n.sample, thin)
#############################
#### Initial parameter values
#############################
beta <- array(NA, c(p, (J-1)))
for(i in 2:J)
{
mod.glm <- glm(cbind(Y[ ,i], trials - Y[ ,i])~X.standardised-1, offset=offset[ ,(i-1)], family="quasibinomial")
beta.mean <- mod.glm$coefficients
beta.sd <- sqrt(diag(summary(mod.glm)$cov.scaled))
beta[ ,(i-1)] <- rnorm(n=p, mean=beta.mean, sd=beta.sd)
}
regression <- X.standardised %*% beta
###############################
#### Set up the MCMC quantities
###############################
#### Matrices to store samples
n.keep <- floor((n.sample - burnin)/thin)
samples.beta <- array(NA, c(n.keep, (J-1)*p))
samples.loglike <- array(NA, c(n.keep, K.present))
samples.fitted <- array(NA, c(n.keep, N.all))
if(n.miss>0) samples.Y <- array(NA, c(n.keep, n.miss))
#### Metropolis quantities
accept.beta <- rep(0,2*(J-1))
proposal.sd.beta <- rep(0.01, (J-1))
###########################
#### Run the Bayesian model
###########################
#### Start timer
if(verbose)
{
cat("Generating", n.keep, "post burnin and thinned (if requested) samples.\n", sep = " ")
progressBar <- txtProgressBar(style = 3)
percentage.points<-round((1:100/100)*n.sample)
}else
{
percentage.points<-round((1:100/100)*n.sample)
}
#### Create the MCMC samples
for(j in 1:n.sample)
{
####################################
## Sample from Y - data augmentation
####################################
if(n.miss>0)
{
for(g in 1:length(which.miss.row2))
{
## Determine which row (area) of Y to update
row <- which.miss.row2[g]
## Compute the vector of probabilities for that row
lp <- c(0, regression[row, ] + offset[row, ])
prob <- exp(lp) / sum(exp(lp))
## Do the multinomial data augmentation
if(which.miss.row[row]==J)
{
## All the Ys are missing
Y.DA[row, ] <- as.numeric(rmultinom(n=1, size=trials[row], prob=prob))
}else
{
## Not all the Ys are missing
## Re-normalise the probabilities
prob[!is.na(Y[row, ])] <- 0
prob <- prob / sum(prob)
temp <- as.numeric(rmultinom(n=1, size=trials[row]-sum(Y[row, ], na.rm=T), prob=prob))
Y.DA[row, which.miss[row, ]==0] <- temp[which.miss[row, ]==0]
}
}
}else
{}
###################
## Sample from beta
###################
for(r in 1:(J-1))
{
temp <- multinomialbetaupdateRW(X.standardised, K, J, p, r, beta, offset, Y.DA, prior.mean.beta, prior.var.beta, n.beta.block, proposal.sd.beta[r], list.block, rep(0, K))
beta[ ,r] <- temp[[1]][ ,r]
accept.beta[r] <- accept.beta[r] + temp[[2]]
accept.beta[(r+J-1)] <- accept.beta[(r+J-1)] + n.beta.block
}
regression <- X.standardised %*% beta
#########################
## Calculate the deviance
#########################
lp <- regression + offset
lp <- cbind(rep(0,K), lp)
prob <- exp(lp) / apply(exp(lp),1,sum)
fitted <- prob * trials
loglike <- const.like + apply(Y[which.miss.row==0, ] * log(prob[which.miss.row==0, ]),1,sum)
###################
## Save the results
###################
if(j > burnin & (j-burnin)%%thin==0)
{
ele <- (j - burnin) / thin
samples.beta[ele, ] <- as.numeric(beta)
samples.loglike[ele, ] <- loglike
samples.fitted[ele, ] <- as.numeric(t(fitted))
if(n.miss>0) samples.Y[ele, ] <- t(Y.DA)[is.na(t(Y))]
}else
{}
########################################
## Self tune the acceptance probabilties
########################################
if(ceiling(j/100)==floor(j/100) & j < burnin)
{
#### Update the proposal sds
for(r in 1:(J-1))
{
if(p>2)
{
proposal.sd.beta[r] <- common.accceptrates1(accept.beta[c(r, (r+J-1))], proposal.sd.beta[r], 40, 50)
}else
{
proposal.sd.beta[r] <- common.accceptrates1(accept.beta[c(r, (r+J-1))], proposal.sd.beta[r], 30, 40)
}
}
accept.beta <- rep(0,2*(J-1))
}else
{}
################################
## print progress to the console
################################
if(j %in% percentage.points & verbose)
{
setTxtProgressBar(progressBar, j/n.sample)
}
}
##### end timer
if(verbose)
{
cat("\nSummarising results.")
close(progressBar)
}else
{}
###################################
#### Summarise and save the results
###################################
#### Compute the acceptance rates
accept.beta <- 100 * sum(accept.beta[1:(J-1)]) / sum(accept.beta[(J:(2*(J-1)))])
accept.final <- accept.beta
names(accept.final) <- c("beta")
#### Compute the fitted deviance
mean.beta <- matrix(apply(samples.beta, 2, mean), nrow=p, ncol=(J-1), byrow=F)
mean.logit <- X.standardised %*% mean.beta + offset
mean.logit <- cbind(rep(0,K), mean.logit)
mean.prob <- exp(mean.logit) / apply(exp(mean.logit),1,sum)
deviance.fitted <- -2* sum(const.like + apply(Y[which.miss.row==0, ] * log(mean.prob[which.miss.row==0, ]),1,sum))
#### Model fit criteria
modelfit <- common.modelfit(samples.loglike, deviance.fitted)
#### transform the parameters back to the origianl covariate scale.
samples.beta.orig <- samples.beta
for(r in 1:(J-1))
{
samples.beta.orig[ ,((r-1)*p+1):(r*p)] <- common.betatransform(samples.beta[ ,((r-1)*p+1):(r*p)], X.indicator, X.mean, X.sd, p, FALSE)
}
#### Create a summary object
samples.beta.orig <- mcmc(samples.beta.orig)
summary.beta <- t(apply(samples.beta.orig, 2, quantile, c(0.5, 0.025, 0.975)))
summary.beta <- cbind(summary.beta, rep(n.keep, p), rep(accept.beta,(J-1)*p), effectiveSize(samples.beta.orig), geweke.diag(samples.beta.orig)$z)
col.name <- rep(NA, p*(J-1))
if(is.null(colnames(Y)))
{
for(r in 1:(J-1))
{
col.name[((r-1)*p+1):(r*p)] <- paste("Category ", r+1, " - ", colnames(X), sep="")
}
}else
{
for(r in 1:(J-1))
{
col.name[((r-1)*p+1):(r*p)] <- paste(colnames(Y)[(r+1)], " - ", colnames(X), sep="")
}
}
rownames(summary.beta) <- col.name
colnames(summary.beta) <- c("Median", "2.5%", "97.5%", "n.sample", "% accept", "n.effective", "Geweke.diag")
summary.results <- summary.beta
summary.results[ , 1:3] <- round(summary.results[ , 1:3], 4)
summary.results[ , 4:7] <- round(summary.results[ , 4:7], 1)
#### Create the fitted values and residuals
fitted.values <- matrix(apply(samples.fitted, 2, mean), nrow=K, ncol=J, byrow=T)
response.residuals <- Y - fitted.values
var.y <- fitted.values * (1-fitted.values / trials)
## Pearson is (observed - fitted) / sd
pearson.residuals <- response.residuals / sqrt(var.y)
residuals <- list(response=response.residuals, pearson=pearson.residuals)
#### Compile and return the results
model.string <- c("Likelihood model - Multinomial (logit link function)", "\nRandom effects model - None\n")
if(n.miss==0) samples.Y = NA
samples <- list(beta=samples.beta.orig, fitted=mcmc(samples.fitted), Y=mcmc(samples.Y))
results <- list(summary.results=summary.results, samples=samples, fitted.values=fitted.values, residuals=residuals, modelfit=modelfit, accept=accept.final, localised.structure=NULL, formula=formula, model=model.string, X=X)
class(results) <- "CARBayes"
#### Finish by stating the time taken
if(verbose)
{
b<-proc.time()
cat("Finished in ", round(b[3]-a[3], 1), "seconds.\n")
}else
{}
return(results)
}
|
a11052ce3789f28c5fa00ca24d061aba8c86b1e9
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/gsw/examples/gsw_entropy_ice.Rd.R
|
a10e90e6c1edc23872e1c990ccc4973a49034884
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 443
|
r
|
gsw_entropy_ice.Rd.R
|
library(gsw)
### Name: gsw_entropy_ice
### Title: Entropy of ice
### Aliases: gsw_entropy_ice
### ** Examples
t <- c(-10.7856, -13.4329, -12.8103, -12.2600, -10.8863, -8.4036)
p <- c( 10, 50, 125, 250, 600, 1000)
e <- gsw_entropy_ice(t, p)
expect_equal(e/1e3, c(-1.303663820598987, -1.324090218294577, -1.319426394193644,
-1.315402956671801, -1.305426590579231, -1.287021035328113))
|
ff00e6964ea79c0a08b29f1b690f19b7b4b59963
|
9d5ad6c558e943486c357d190410a06b9ba9b74e
|
/R/fullRepertoire.R
|
71b5d7df17c7fa6d43e208a22fb5ca5428d40686
|
[] |
no_license
|
fabio-t/AbSim
|
db715b89295ab85c3d61036b11f5560a87b69f38
|
043cd42f520e27ccde022d73c5067848c8c289b7
|
refs/heads/master
| 2022-01-26T14:32:27.325209
| 2018-08-17T08:30:03
| 2018-08-17T08:30:03
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 19,860
|
r
|
fullRepertoire.R
|
#' Simulates full heavy chain antibody repertoires for either human or mice.
#' @param max.tree.num Integer value describing maximum number of trees allowed
#' to generate the core sequences of the repertoire. Each of these trees is started
#' by an independent VDJ recombination event.
#' @inheritParams singleLineage
#' @return Returns a nested list. output[[1]][[1]] is an array of the simulated sequences
#' output[[2]][[1]] is an array names corresponding to each sequence. For example, output[[2]][[1]][1]
#' is the name of the sequence corresponding to output[[1]][[1]][1]. The simulated tree of this is found in
#' output[[3]][[1]]. The length of the output list is determined by the number of sampling points
#' Thus if you have two sampling points, output[[4]][[1]] would be a character array holding the sequences
#' with output[[5]][[1]] as a character array holding the corresponding names. Then the sequences recovered
#' second sampling point would be stored at output[[6]][[1]], with the names at output[[7]][[1]]. This
#' nested list was designed for full antibody repertoire simulations, and thus, may seem unintuitive
#' for the single lineage function. The first sequence and name corresponds to the germline sequence
#' that served as the root of the tree. See vignette for comprehensive example
#' @export
#' @seealso singleLineage
#' @examples
# fullRepertoire(max.seq.num=51,max.timer=150,
# SHM.method="naive",baseline.mut = 0.0008,
# SHM.branch.prob = "identical", SHM.branch.param = 0.05,
# SHM.nuc.prob = 15/350,species="mus",
# VDJ.branch.prob = 0.1,proportion.sampled = 1,
# sample.time = 50,max.tree.num=3, chain.type="heavy",vdj.model="naive",
# vdj.insertion.mean=4, vdj.insertion.stdv=2)
fullRepertoire <- function(max.seq.num,
max.timer,
SHM.method,
baseline.mut,
SHM.branch.prob,
SHM.branch.param,
SHM.nuc.prob,
species,
VDJ.branch.prob,
proportion.sampled,
sample.time,
max.tree.num,
chain.type,
vdj.model,
vdj.insertion.mean,
vdj.insertion.stdv
){
tree_list <- c()
tree_list[1:max.tree.num] <- "(0,1)L;"
seq_list <- list()
name_list <- list()
seq_per_tree <- rep(1,max.tree.num)
germline_vseq <- c()
germline_dseq <- c()
germline_jseq <- c()
germline_seq <- c()
germline_name <- c()
#tree_list <- list()
max.seq.num <- max.seq.num + 1
output_list <- list()
max_SHM <- max.seq.num - max.tree.num
if(species=="mus" || species=="mouse" || species=="blc6" && chain.type=="heavy"){
indV <- sample(x = 1:nrow(ighv_mus_df),size = 1,replace = FALSE)
indD <- sample(x = 1:nrow(ighd_mus_df),size=1,replace=FALSE)
indJ <- sample(x = 1:nrow(ighj_mus_df),size=1,replace=FALSE)
germline_name[1] <- paste(as.character(ighv_mus_df[[1]][indV]),as.character(ighd_mus_df[[1]][indD]),as.character(ighj_mus_df[[1]][indJ]),sep="")
germline_vseq[1] <- as.character(ighv_mus_df[[2]][indV])
germline_dseq[1] <- as.character(ighd_mus_df[[2]][indD])
germline_jseq[1] <- as.character(ighj_mus_df[[2]][indJ])
germline_seq[1] <- paste(as.character(ighv_mus_df[[2]][indV]),as.character(ighd_mus_df[[2]][indD]),as.character(ighj_mus_df[[2]][indJ]),sep="")
seq_list[[1]] <- as.character(.VDJ_RECOMBIN_FUNCTION(as.character(ighv_mus_df[[2]][indV]), as.character(ighd_mus_df[[2]][indD]),as.character(ighj_mus_df[[2]][indJ]),
method=vdj.model,
chain.type=chain.type,
species=species,
vdj.insertion.mean=vdj.insertion.mean,
vdj.insertion.stdv=vdj.insertion.stdv))
name_list[[1]] <- paste(paste("S","1", sep=""),1,sep="_")
# indV <- sample(x = 1:nrow(igkv_mus_df),size = 1,replace = FALSE)
# indD <- sample(x = 1:nrow(ighd_mus_df),size=1,replace=FA)
# indJ <- sample(x = 1:nrow(ighj_mus_df),size=1,replace=TRUE)
# vseq <- as.character(ighv_mus_df$seq[indV])
# dseq <- as.character(ighd_mus_df$seq[indD])
# jseq <- as.character(ighj_mus_df$seq[indJ])
# germline <- paste(as.character(ighv_mus_df[[2]][indV]),as.character(ighd_mus_df[[2]][indD]),as.character(ighj_mus_df[[2]][indJ]),sep="")
# germline_v <- as.character(ighv_mus_df[[1]][indV])
# germline_d <- as.character(ighd_mus_df[[1]][indD])
# germline_j <- as.character(ighj_mus_df[[1]][indJ])
}
else if(species=="hum" || species== "human" && chain.type=="heavy"){
indV <- sample(x = 1:nrow(ighv_hum_df),size = 1,replace = FALSE)
indD <- sample(x = 1:nrow(ighd_hum_df),size=1,replace=FALSE)
indJ <- sample(x = 1:nrow(ighj_hum_df),size=1,replace=FALSE)
germline_name[1] <- paste(as.character(ighv_hum_df[[1]][indV]),as.character(ighd_hum_df[[1]][indD]),as.character(ighj_hum_df[[1]][indJ]),sep="")
germline_vseq[1] <- as.character(ighv_hum_df[[2]][indV])
germline_dseq[1] <- as.character(ighd_hum_df[[2]][indD])
germline_jseq[1] <- as.character(ighj_hum_df[[2]][indJ])
germline_seq[1] <- paste(as.character(ighv_hum_df[[2]][indV]),as.character(ighd_hum_df[[2]][indD]),as.character(ighj_hum_df[[2]][indJ]),sep="")
seq_list[[1]] <- as.character(.VDJ_RECOMBIN_FUNCTION(as.character(ighv_hum_df[[2]][indV]), as.character(ighd_hum_df[[2]][indD]),as.character(ighj_hum_df[[2]][indJ]),
method=vdj.model,
chain.type=chain.type,
species=species,
vdj.insertion.mean=vdj.insertion.mean,
vdj.insertion.stdv=vdj.insertion.stdv))
name_list[[1]] <- paste(paste("S","1", sep=""),1,sep="_")
}
### light chains
else if(species=="hum" || species== "human" && chain.type=="light"){
indV <- sample(x = 1:nrow(rbind(iglv_hum_df,igkv_hum_df)),size = 1,replace = FALSE)
#indD <- sample(x = 1:nrow(rbind(iglv_mus_df,igkv_mus_df)),size=1,replace=FALSE)
indJ <- sample(x = 1:nrow(rbind(iglj_hum_df,igkj_hum_df)),size=1,replace=FALSE)
germline_name[1] <- paste(as.character(rbind(iglv_hum_df,igkv_hum_df)[[1]][indV]),"",as.character(rbind(iglv_hum_df,igkv_hum_df)[[1]][indJ]),sep="")
germline_vseq[1] <- as.character(rbind(iglv_hum_df,igkv_hum_df)[[2]][indV])
germline_dseq[1] <- "" #as.character(ighd_hum_df[[2]][indD])
germline_jseq[1] <- as.character(rbind(iglj_hum_df,igkj_hum_df)[[2]][indJ])
germline_seq[1] <- paste(as.character(rbind(iglv_hum_df,igkv_hum_df)[[2]][indV]),"",as.character(rbind(iglv_hum_df,igkv_hum_df)[[2]][indJ]),sep="")
seq_list[[1]] <- as.character(.VDJ_RECOMBIN_FUNCTION(as.character(rbind(iglv_mus_df,igkv_mus_df)[[2]][indV]), "",as.character(rbind(iglj_mus_df,igkj_mus_df)[[2]][indJ]),
method=vdj.model,
chain.type=chain.type,
species=species,
vdj.insertion.mean=vdj.insertion.mean,
vdj.insertion.stdv=vdj.insertion.stdv))
name_list[[1]] <- paste(paste("S","1", sep=""),1,sep="_")
}
else if(species=="mus" || species=="mouse" || species=="blc6" && chain.type=="light"){
indV <- sample(x = 1:nrow(rbind(iglv_mus_df,igkv_mus_df)),size = 1,replace = FALSE)
#indD <- sample(x = 1:nrow(rbind(iglv_mus_df,igkv_mus_df)),size=1,replace=FALSE)
indJ <- sample(x = 1:nrow(rbind(iglj_mus_df,igkj_mus_df)),size=1,replace=FALSE)
germline_name[1] <- paste(as.character(rbind(iglv_mus_df,igkv_mus_df)[[1]][indV]),"",as.character(rbind(iglj_mus_df,igkj_mus_df)[[1]][indJ]),sep="")
germline_vseq[1] <- as.character(rbind(iglv_mus_df,igkv_mus_df)[[2]][indV])
germline_dseq[1] <- "" #as.character(rbind(iglv_mus_df,igkv_mus_df)[[2]][indD])
germline_jseq[1] <- as.character(rbind(iglv_mus_df,igkv_mus_df)[[2]][indJ])
germline_seq[1] <- paste(as.character(rbind(iglv_mus_df,igkv_mus_df)[[2]][indV]),"",as.character(rbind(iglj_mus_df,igkj_mus_df)[[2]][indJ]),sep="")
seq_list[[1]] <- as.character(.VDJ_RECOMBIN_FUNCTION(as.character(rbind(iglv_mus_df,igkv_mus_df)[[2]][indV]),"",as.character(rbind(iglj_mus_df,igkj_mus_df)[[2]][indJ]),
method=vdj.model,
chain.type=chain.type,
species=species,
vdj.insertion.mean=vdj.insertion.mean,
vdj.insertion.stdv=vdj.insertion.stdv))
name_list[[1]] <- paste(paste("S","1", sep=""),1,sep="_")
}
current_seq_count <- 1
VDJ_count <- 1
SHM_count <- 0
next_node <- 2
output_list <- list()
if(class(sample.time)=="numeric" && length(sample.time)>0){
for(i in seq(1,length(sample.time),2)){
output_list[[i+3]] <- list()
output_list[[i+4]] <- list()
}
}
seq_type <- c()
new_VDJ_prob <- VDJ.branch.prob
if(SHM.branch.prob=="identical"){
new_SHM_prob <- rep(SHM.branch.param,max.seq.num)
}
else if(SHM.branch.prob=="uniform"){
new_SHM_prob <- stats::runif(n=max.seq.num,min=SHM.branch.param[1],max=SHM.branch.param[2])
}
else if(SHM.branch.prob=="exponential"|| SHM.branch.prob=="exp"){
new_SHM_prob <- stats::rexp(n=max.seq.num,rate=SHM.branch.param)
}
else if(SHM.branch.prob=="lognorm"|| SHM.branch.prob=="lognormal"){
new_SHM_prob <- stats::rlnorm(n=max.seq.num,meanlog = SHM.branch.param[1],sdlog = SHM.branch.param[2])
}
else if(SHM.branch.prob=="normal"|| SHM.branch.prob=="norm"){
new_SHM_prob <- stats::rnorm(n=max.seq.num,mean=SHM.branch.param[1],sd=SHM.branch.param[2])
}
sample_seq_list <- list()
sample_names_list <- list()
output_tree_text <- "(0,1)L;"
sample_index <- 1
sample.time <- sort(sample.time,decreasing = FALSE)
current_tree_num <- 1
for(i in 1:max.timer){
if(length(unlist(seq_list))>=max.seq.num) break
if(i==sample.time[sample_index] && length(unlist(seq_list)) >= 1){
for(z in 1:length(seq_list)){
current_len <- length(seq_list[[z]])
holding_size <- as.integer(proportion.sampled * current_len)
holding_ind <- sample.int(n = current_len,
size=holding_size,
replace = FALSE,
prob = rep(x = 1/current_len,current_len))
output_list[[2*sample_index+2]][[z]] <- seq_list[[z]][holding_ind]
output_list[[2*sample_index+3]][[z]] <- paste(name_list[[z]][holding_ind],sample.time[sample_index],sep="_")
}
if(length(sample.time) > sample_index) sample_index <- sample_index + 1
}
if(length(unlist(seq_list))>=2){
for(u in 1:length(seq_list)){
seq_list[[u]] <- .applyBaseLine(seq_list[[u]],baseline.mut)
}
}
is_new_VDJ <- sample(x=c(0,1), replace=TRUE,size = 1, prob=c(VDJ.branch.prob,
1- VDJ.branch.prob))
if(is_new_VDJ==0 && current_seq_count<max.seq.num && current_tree_num<max.tree.num){
if(species=="mus" || species=="mouse" || species=="blc6" && chain.type=="heavy"){
indV <- sample(x = 1:nrow(ighv_mus_df),size = 1,replace = FALSE)
indD <- sample(x = 1:nrow(ighd_mus_df),size=1,replace=FALSE)
indJ <- sample(x = 1:nrow(ighj_mus_df),size=1,replace=FALSE)
germline_name[current_tree_num+1] <- paste(as.character(ighv_mus_df[[1]][indV]),as.character(ighd_mus_df[[1]][indD]),as.character(ighj_mus_df[[1]][indJ]),sep="")
germline_seq[current_tree_num+1] <- paste(as.character(ighv_mus_df[[2]][indV]),as.character(ighd_mus_df[[2]][indD]),as.character(ighj_mus_df[[2]][indJ]),sep="")
seq_list[[current_tree_num+1]] <- as.character(.VDJ_RECOMBIN_FUNCTION(as.character(ighv_mus_df[[2]][indV]), as.character(ighd_mus_df[[2]][indD]),as.character(ighj_mus_df[[2]][indJ]),
method=vdj.model,
chain.type=chain.type,
species=species,
vdj.insertion.mean=vdj.insertion.mean,
vdj.insertion.stdv=vdj.insertion.stdv))
}
else if(species=="hum" || species== "human" && chain.type=="heavy"){
indV <- sample(x = 1:nrow(ighv_hum_df),size = 1,replace = FALSE)
indD <- sample(x = 1:nrow(ighd_hum_df),size=1,replace=FALSE)
indJ <- sample(x = 1:nrow(ighj_hum_df),size=1,replace=FALSE)
germline_name[current_tree_num+1] <- paste(as.character(ighv_hum_df[[1]][indV]),as.character(ighd_hum_df[[1]][indD]),as.character(ighj_hum_df[[1]][indJ]),sep="")
germline_seq[current_tree_num+1] <- paste(as.character(ighv_hum_df[[2]][indV]),as.character(ighd_hum_df[[2]][indD]),as.character(ighj_hum_df[[2]][indJ]),sep="")
seq_list[[current_tree_num+1]] <- as.character(.VDJ_RECOMBIN_FUNCTION(as.character(ighv_hum_df[[2]][indV]), as.character(ighd_hum_df[[2]][indD]),as.character(ighj_hum_df[[2]][indJ]),
method=vdj.model,
chain.type=chain.type,
species=species,
vdj.insertion.mean=vdj.insertion.mean,
vdj.insertion.stdv=vdj.insertion.stdv))
}
else if(species=="mus" || species=="mouse" || species=="blc6" && chain.type=="light"){
indV <- sample(x = 1:nrow(rbind(iglv_mus_df,igkv_mus_df)),size = 1,replace = FALSE)
#indD <- sample(x = 1:nrow(blc6_d_df),size=1,replace=FALSE)
indJ <- sample(x = 1:nrow(rbind(iglj_mus_df,igkj_mus_df)),size=1,replace=FALSE)
germline_name[current_tree_num+1] <- paste(as.character(rbind(iglv_mus_df,igkv_mus_df)[[1]][indV]),"",as.character(rbind(iglj_mus_df,igkj_mus_df)[[1]][indJ]),sep="")
germline_seq[current_tree_num+1] <- paste(as.character(rbind(iglv_mus_df,igkv_mus_df)[[2]][indV]),"",as.character(rbind(iglj_mus_df,igkj_mus_df)[[2]][indJ]),sep="")
seq_list[[current_tree_num+1]] <- as.character(.VDJ_RECOMBIN_FUNCTION(as.character(rbind(iglv_mus_df,igkv_mus_df)[[2]][indV]), "",as.character(rbind(iglj_mus_df,igkj_mus_df)[[2]][indJ]),
method=vdj.model,
chain.type=chain.type,
species=species,
vdj.insertion.mean=vdj.insertion.mean,
vdj.insertion.stdv=vdj.insertion.stdv))
}
else if(species=="hum" || species== "human" && chain.type=="light"){
indV <- sample(x = 1:nrow(rbind(iglv_hum_df,igkv_hum_df)),size = 1,replace = FALSE)
#indD <- #sample(x = 1:nrow(rbind(iglv_hum_df,igkv_hum_df)),size=1,replace=FALSE)
indJ <- sample(x = 1:nrow(rbind(iglj_hum_df,igkj_hum_df)),size=1,replace=FALSE)
germline_name[current_tree_num+1] <- paste(as.character(rbind(iglv_hum_df,igkv_hum_df)[[1]][indV]),"",as.character(rbind(iglj_hum_df,igkj_hum_df)[[1]][indJ]),sep="")
germline_seq[current_tree_num+1] <- paste(as.character(rbind(iglv_hum_df,igkv_hum_df)[[2]][indV]),"",as.character(rbind(iglj_hum_df,igkj_hum_df)[[2]][indJ]),sep="")
seq_list[[current_tree_num+1]] <- as.character(.VDJ_RECOMBIN_FUNCTION(as.character(rbind(iglv_hum_df,igkv_hum_df)[[2]][indV]),"",as.character(rbind(iglj_hum_df,igkj_hum_df)[[2]][indJ]),
method=vdj.model,
chain.type=chain.type,
species=species,
vdj.insertion.mean=vdj.insertion.mean,
vdj.insertion.stdv=vdj.insertion.stdv))
}
name_list[[current_tree_num+1]] <- paste(paste("S",next_node, sep=""),i,sep="_")
current_seq_count <- current_seq_count + 1
current_tree_num <- current_tree_num + 1
next_node <- next_node + 1
seq_per_tree[current_tree_num+1] <- seq_per_tree[current_tree_num+1]
tree_list[current_tree_num] <- gsub(pattern="1",replacement = as.character(next_node-1),x=tree_list[current_tree_num])
}
if(next_node>= max.seq.num) break
if(current_seq_count>=1){
for(o in 1:length(seq_list)){
for(j in 1:length(seq_list[[o]])){
is_new_SHM <- sample(x=c(0,1),size=1, replace=TRUE, prob=c(new_SHM_prob[next_node], 1- new_SHM_prob[next_node]))
if (is_new_SHM==0 && next_node<max.seq.num && SHM_count<max_SHM ){
holding_jsub <- gsub("_.*","",name_list[[o]][j])
holding_jsub <- gsub("[^0-9]","",holding_jsub)
seq_list[[o]][seq_per_tree[o]+1] <- .SHM_FUNCTION_SEQUENCE4(seq_list[[o]][j],
SHM.method,germline_vseq[o],germline_dseq[o],germline_jseq[o],SHM.nuc.prob)
name_list[[o]][seq_per_tree[o]+1] <- paste(paste("S",next_node, sep=""),i,sep="_")
tree_list[o] <- .branchingProcess3(tree_list[o],holding_jsub,next_node,"SHM")
next_node <- next_node + 1
current_seq_count <- current_seq_count + 1
SHM_count <- SHM_count + 1
seq_per_tree[o] <- seq_per_tree[o] + 1
}
holding_no_nas <- unlist(seq_list)
if(length(holding_no_nas[is.na(holding_no_nas)==FALSE])>=max.seq.num) break
}
holding_no_nas <- unlist(seq_list)
if(length(holding_no_nas[is.na(holding_no_nas)==FALSE])>=max.seq.num) break }
}
holding_no_nas <- unlist(seq_list)
if(length(holding_no_nas[is.na(holding_no_nas)==FALSE])>=max.seq.num) break
}
for(i1 in 1:length(seq_list)){
output_list[[1]][[i1]] <- append(germline_seq[i1],seq_list[[i1]])
output_list[[2]][[i1]] <- append(germline_name[i1],name_list[[i1]])
temp.tree <- ape::read.tree(text=tree_list[i1])
output_list[[3]][[i1]] <- temp.tree
for(i2 in 2:length(output_list[[3]][[i1]]$tip.label)){
output_list[[3]][[i1]]$tip.label[1] <- output_list[[2]][[i1]][1]
output_list[[3]][[i1]]$tip.label[i2] <- output_list[[2]][[i1]][grep(output_list[[2]][[i1]],pattern=paste("S",output_list[[3]][[i1]]$tip.label[i2],"_",sep=""))]
}
}
return(output_list)
}
|
6131cfc35c4a6a72fc0773b594b9f00be3aaed21
|
418fde940e0197bc8c3c8a023ed742fd7b40f271
|
/motifAnalysisBellPaegle.R
|
810cec7591f61a0aaf739fe4b0079435839bae62
|
[] |
no_license
|
avbell/MotifsBellPaegle
|
7af72764a826a14036c290b21dea0ae3113b202e
|
a26f82a727fcbfa0044b7d9055705063d7b8b1d9
|
refs/heads/master
| 2021-12-15T10:45:38.341384
| 2021-11-30T15:11:32
| 2021-11-30T15:11:32
| 221,515,911
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,567
|
r
|
motifAnalysisBellPaegle.R
|
# ----------------------
# ANALYSIS FOR BELL & PAEGLE
# Ethnic Markers and How to Find Them: An Ethnographic Investigation of Marker Presence, Recognition, and Transmission
# University of Utah
# ----------------------
# packages you will need
require(jpeg)
require(graphics)
require(stats)
# graphics rendered on a mac
# so edit the graphics device
# call accordingly if on other OS
# data directory REPLACE TEXT WITH THE CORRECT DIRECTORY
datadir <- function(x="") paste0("Directory where the data files are","/",x)
setwd(datadir())
d <- read.csv( file=datadir("DesignsSurveyTonga2016short.csv"), header=TRUE, stringsAsFactors=FALSE, nrows=70)
# summary statistics of participants
sum( d$age >=1 & d$age<=2 )
numAgeClass <- function(x,l,u) {
ans <- rep(NA,length(l))
for( i in 1:length(l)) ans[i] <- sum( (x>=l[i] & x<=u[i]) )
ans
}
low <- c(18,31,51)
up <- c( 30, 50, 100)
numAgeClass( d$age, low,up)
# frequency of exposure by motif
freqY <- function(x) sum(x=="y")/(sum(x=="y")+sum(x=="n"))
freqY( d$des1seen )
freqY( d$des2seen )
seens <- paste0( "des",1:15,"seen")
named <- paste0( "des",1:15,"name")
freqsall <- sapply( 1:15, function(z) freqY( d[,seens[z]] ) )
namedmotif <- function( z ) sum( !d[,named[z] ]=="no" & !d[,named[z] ]=="" )/sum( !d[,named[z] ]=="" )
plotfreqs <- function( freqs, label=NULL ){
or <- order(freqs, decreasing =TRUE)
quartz(width=8.5, height=5)
par( mar=c(0,10,1,1), las=1)
barplot( freqs[or], width=0.82, xlab="", ylim=c(-0.3,1), axes=FALSE, main=label )
axis( side=2, at=c(0,1), las=1)
mtext( "Fraction of sample\nrecognizing motif", side=2, las=1, line=9.5, at=0.5, adj=0)
for ( i in 1:15 ){
img<-readJPEG(datadir(paste0("m",or[i],".jpeg")))
rasterImage(img,i-0.8,-0.1,i-0.2,-0.03)
mtext( text=format( 100*namedmotif( or[i] ), digits=2), side=1, at=i-0.4, line=-3.5, adj=1, cex=0.7 )
mtext( text="% named", side=1, line=-3.5, at=-1.5, cex=0.8)
}
}
# FIGURE 2
plotfreqs( freqsall )
# ---------------------------------
# CLASSIFICATION
# ---------------------------------
# --------------------
# Classification tasks
# --------------------
# Load the motif classification data from Tonga
# loads object d.all.tonga
load(datadir("tongaALLtriad2018.rdata"))
names(d.all.tonga)
names(d.all.tonga[[1]])
d.all.tonga[[1]]
# gender count
gc <- unlist(sapply( 1:length(d.all.tonga), function(z) d.all.tonga[[z]]$gender ))
sum( gc=="Tangata" )
sum( gc=="Fefine" )
length(d.all.tonga)
# age range
yob <- unlist(sapply( 1:length(d.all.tonga), function(z) d.all.tonga[[z]]$yob ))
# correct those that put age instead of year of birth, and large numbers
yob[82] <- 2018-26
yob[106] <- 2018-24
yob[114] <- 2018-20
yob[108] <- NA
yob[109] <- NA
ages <- 2018 - yob
summary(ages)
# Loads the classification data from Tonga
# loads object d.not
load(datadir("/utahtriad2018.rdata"))
names(d.not)
names(d.not[[1]])
# rename a nested data that matches
# the list names and structure of the Tonga data
for( i in 1:length(d.not) ){
d.not[[i]]$motif_triad <- d.not[[i]]$motif_triad_data[[1]]
d.not[[i]]$motif_tricomb <- d.not[[i]]$motif_tricomb[[1]]
}
d.not[[1]]
# ---------------------------
# Calculate the round-by-round differences
# or average level of agreement
# function to count choice agreement
# between two triad tasks
# yields a number (Equation 1 in the manuscript)
triad.sim.count <- function( y1, t1, y2, t2){
nObj <- 6
tricomb <- t(combn(1:nObj,3))
nr <- dim(tricomb)[1]
y1tf <- ifelse( y1==1, TRUE, FALSE)
y2tf <- ifelse( y2==1, TRUE, FALSE)
ans <- rep(NA,nr)
for( i in 1:nr ){
p1 <- tricomb[i,1]
p2 <- tricomb[i,2]
p3 <- tricomb[i,3]
add <- sapply( 1:nr, function(z) sum( sum(t1[z,] == p1), sum(t1[z,] == p2), sum(t1[z,] == p3 ) ) )
rw <- which( add== 3 ) # which row
choice1 <- t1[rw,y1tf[rw,2:4]]
choice2 <- t2[rw,y2tf[rw,2:4]]
ans[i] <- ifelse( choice1==choice2, 1, 0 )
}
sum(ans)/20 # 20 total possible agreements
}
# function to organize the counting of triad task
# similarity counting between individuals
# yields an NxN matrix of counts
triad.sim <- function( d, dmn="motif" ){
z <- paste0(dmn,"_triad")
z1 <- paste0(dmn,"_tricomb")
nObs <- length(d)
diff <- matrix( rep(NA,(nObs)^2), nrow=nObs )
for( i in 1:nObs ){
for( j in 1:nObs ) {
y1 <- d[[i]][z][[1]]
triad1 <- d[[i]][z1][[1]]
y2 <- d[[j]][z][[1]]
triad2 <- d[[j]][z1][[1]]
if( !is.null(y1) & !is.null(triad1) & !is.null(y2) & !is.null(triad2) ) {
diff[i,j] <- triad.sim.count( y1 = y1, t1 = triad1, y2 = y2, t2 = triad2 )
}else{
diff[i,j] <- NA
}
}
}
diff
}
# create similarity matrix bewteen US and Tonga participants
# takes about a minute or so
mtf.Tonga.US <- rep( list(0),sum( length(d.not), length(d.all.tonga ) ) )
for( i in 1:sum( length(d.not), length(d.all.tonga ) ) ){
if( i<=length(d.not) ){
mtf.Tonga.US[[i]]$motif_triad <- d.not[[i]]$motif_triad
mtf.Tonga.US[[i]]$motif_tricomb <- d.not[[i]]$motif_tricomb
}else{
mtf.Tonga.US[[i]]$motif_triad <- d.all.tonga[[i-length(d.not)]]$motif_triad
mtf.Tonga.US[[i]]$motif_tricomb <- d.all.tonga[[i-length(d.not)]]$motif_tricomb
}
}
length(mtf.Tonga.US)
mtf.Tonga.US[[1]]
mtf.tonga.us.mtrx <- triad.sim( d=mtf.Tonga.US, "motif" )
# Distance map
# Yes I know R has a built-in MDS function
# but I like to do it "by hand" just to check
DistanceMap = function( D, sub.names ){
# Eigenvalues and eigenvectors
decomp = eigen( D )
decomp$values
decomp$vectors
# ----------------------------------------
# MAP THE RELATIONSHIPS BETWEEN POPULATIONS
# ----------------------------------------
x.points = decomp$vectors[,1] * sqrt( decomp$values[1] ) # along first scaled eigenvector
y.points = decomp$vectors[,2] * sqrt( decomp$values[2] ) # along second scaled eigenvector
quartz(width=6, height=6)
par( mar=c(3,3,3,3), bty="o" ) # no margins
lim = max( x.points, y.points )
xcenter <- median(x.points)
xdist <-( max(x.points) - min(x.points) ) / 2
ycenter <- median(y.points)
ydist <-( max(y.points) - min(y.points) ) / 2
plot( x.points, y.points, axes=F, type="n", ylab="", xlab="", xlim=c(xcenter-xdist,xcenter+xdist), ylim=c(ycenter-ydist,ycenter+ydist))
axis( side=2, pos=xcenter )
axis( side=1, pos=ycenter )
points( x.points[sub.names=="T"], y.points[sub.names=="T"], lty=2, pch=1, xpd=TRUE)
points( x.points[sub.names=="US"], y.points[sub.names=="US"], lty=2, pch=19, xpd=TRUE)
legend( "topright", box.lwd=0, pch=c(1,19), legend=c("Tonga","US"))
}
mtxlabels <- c(rep("US",length(d.not)),rep("T",length(d.all.tonga) ) )
DistanceMap( 1 - mtf.tonga.us.mtrx, sub.names <- mtxlabels )
# Use other method for multidimensional scaling
loc <- cmdscale(1-mtf.tonga.us.mtrx)
x <- loc[, 1]
y <- -loc[, 2]
# visualize
# FIGURE 3
plot(x, y, type = "n", xlab = "", ylab = "", asp = 1, axes = FALSE,
main = "")
points(x, y, pch=ifelse(mtxlabels=="T", 1, 19), cex = 1)
legend( "topright", box.lwd=0.3, pch=c(1,19), legend=c("Tonga","US"))
axis(1)
axis(2)
# A reviewer suggested using a formal clustering method
# another package
require(fpc)
dissim <- 1-mtf.tonga.us.mtrx # dissimilarity matrix
cl <- pamk( dissim ) # returns a clustering vector, identifying two clusters
# the clusters returning from above, visualized by color:
plot(x, y, type = "n", xlab = "", ylab = "", asp = 1, axes = FALSE,
main = "")
points(x, y, pch=ifelse(mtxlabels=="T", 1, 19), cex = 1, col=ifelse(cl$pamobject$clustering==1, "black", "red"))
legend( "topright", box.lwd=0.3, pch=c(1,19), legend=c("Tonga","US"))
axis(1)
axis(2)
|
a0d7770de843f553f17cdea807ff1367baccb030
|
85560c1235e67df38c1dc54cf0474490a54646d4
|
/man/setName.Rd
|
e129e1c7b18e6759c96ff16d9c00889bea0239cd
|
[] |
no_license
|
Huansi/MetCirc
|
c913a9606b1356abbf4a1db68747d35b9fdef509
|
efc2edd6a55259bff8d583ece69fce91beaee964
|
refs/heads/master
| 2021-01-12T05:17:29.389295
| 2016-12-11T15:12:46
| 2016-12-11T15:12:46
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 888
|
rd
|
setName.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/convert2MSP.R
\docType{methods}
\name{setName}
\alias{setName}
\alias{setName,character,MSP-method}
\title{setName sets names in MSP objects}
\format{An object of class \code{NULL} of length 0.}
\usage{
setName(object, name)
}
\arguments{
\item{object}{object of class MSP}
\item{name}{character, a vector with new names}
}
\value{
MSP
}
\description{
setName sets names in MSP objects. To set names pass a vector
with names to the argument \code{class}.
}
\section{Functions}{
\itemize{
\item \code{setName}: sets names in MSP objects
}}
\examples{
data("sd02_deconvoluted", package = "MetCirc")
finalMSP <- convert2MSP(sd02_deconvoluted, split = "_ ",
splitIndMZ = 2, splitIndRT = NULL)
setMetaboliteName(finalMSP, c(rep("unknown", 358), "name1", "name2"))
}
\keyword{datasets}
|
0f38cc9f8a2f9814593bd7165b4f8fb596a39b03
|
850fb65a0435402667af1475072f36391fcb1627
|
/wrangle.R
|
dfac4958903591a4f63906aeda6677d32354138b
|
[] |
no_license
|
HunterRatliff1/Texas_Health_Professions
|
04cd84fa659128b34459071db5d414dbe4d2304d
|
739f8848a0e16dca2cf3716d7322e58700b436f8
|
refs/heads/master
| 2021-01-10T17:55:41.100355
| 2017-05-10T22:03:45
| 2017-05-10T22:03:45
| 49,854,939
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,978
|
r
|
wrangle.R
|
## Load the required packages, and name
## a few globals
require(dplyr)
require(maptools) # required for rgdal to work correctly
require(tigris)
require(stringr)
require(tidyr)
require(googlesheets)
require(reshape2)
require(leaflet)
# require(acs)
# SOURCE: http://healthdata.dshs.texas.gov/Population
Population <- gs_title("Texas Medical Specialties") %>%
gs_read_csv("Population") %>%
# Rename some columns
rename(
RE=Race..Ethnicity,
MSA=Metropolitan.Statistical.Area,
PHR=Public.Health.Region,
County = Counties) %>%
# Melt the data.frame
melt(id.vars = c("FIPS", "County", "RE", "MSA", "PHR", "Year", "Sex"),
variable.name="Age", value.name="Count") %>%
# Clean up the age category vector
mutate(Age = gsub("AGE_", "", Age)) %>%
# Split the age category
separate(Age, into=c("Age.lower", "Age.upper"), sep="\\.", remove=F) %>%
mutate(Age = gsub("\\.", "-", Age)) %>%
tbl_df()
# Make the Age groups ordered factors
Population$Age <- factor(Population$Age, ordered = T,
levels = c("00-01", "01-04", "05-09","10-14", "15-19", "20-24", "25-29",
"30-34", "35-39", "40-44", "45-49", "50-54", "55-59",
"60-64", "65-69", "70-74", "75-79", "80-84", "85-100"))
# Save the population as a RDS & CSV file
Population %>% saveRDS("Data/Population-full.RDS")
Population %>% write.csv("Data/Population-full.csv")
Population %>%
filter(Year==2013) %>%
group_by(Age, Sex) %>% summarise(Count = sum(Count)) %>%
ggplot(aes(x=Age, y=Count, fill=Sex)) + geom_bar(stat="identity")
Population %>%
filter(Year==2013, Age != "00-01") %>%
group_by(Age, County) %>% summarise(Count = sum(Count)) %>%
ggplot(aes(x=Age, y=Count)) + geom_path(aes(group=County)) + coord_flip()
# Get shapefiles of all Texas Counties
TX_Counties <- counties(state = "TX", cb = T)
saveRDS(TX_Counties, "Data/TX_Counties.RDS") # save as RDS file
# Pull the Texas Medical Specialties data from the Google Sheet
gs_title("Texas Medical Specialties") %>%
gs_read_csv("2015") %>%
melt(id.vars = c("County", "Population"), variable.name="HPO", value.name="Count") %>%
mutate(Per100k = round(Count / (Population/100000), 2)) %>%
tbl_df()
select(-RNs) %>%
write.csv(file = "Data/HPO.csv")
# Find as a rate of HPO per 100,000 people
HPOs_per100k <- HPOs %>%
melt(id.vars = c("County", "Population")) %>%
mutate(value = round(value / (Population/100000), 2)) %>%
dcast(County+Population ~ variable) %>%
tbl_df()
HPOs %>%
melt(id.vars = c("County", "Population")) %>%
group_by(variable) %>%
mutate(
value = value / sum(value),
value = round(cume_dist(value), 2)) %>% ungroup() %>%
dcast(County+Population ~ variable) %>%
tbl_df()
# Join 'HPOs' data.frame to the TX_Counties shapefile
HPOs <- geo_join(
spatial_data = TX_Counties,
data_frame = HPOs,
by_sp = "NAME",
by_df = "County")
# Join 'HPOs_per100k' data.frame to the TX_Counties shapefile
HPOs_per100k <- geo_join(
spatial_data = TX_Counties,
data_frame = HPOs_per100k,
by_sp = "NAME",
by_df = "County")
# Define palette
pal_abs <- colorNumeric(palette = "YlGnBu", domain = NULL)
# # Define pop-up
# popup <- paste0(
# "County: ", data_frame$County, "<hr>",
# "GEOID: ", data_frame$GEOID, "<br>",
# "Percentage of people ", age_group, " with: <br>",
# "Private health insurance: <code>", round(data_frame$Private,2), "% </code><br>",
# "Public health insurance: <code>", round(data_frame$Public,2), "% </code><br>",
# "Private & public health insurance: <code>", round(data_frame$Both,2), "% </code><br>",
# "No health insurance: <code>", round(data_frame$Neither,2), "% </code><br>")
# Define the leaflet map
leaflet() %>%
## Add the base tiles
addProviderTiles("CartoDB.Positron") %>%
## Add the four polygons (i.e. types of coverage)
addPolygons(data = HPOs_per100k,
group = "Veterinarians", fillColor = ~pal_abs(DVM),
# popup=popup,
color="#b2aeae", fillOpacity=0.5, weight=1, smoothFactor=0.2) %>%
addPolygons(data = HPOs_per100k,
group = "Pharmacists", fillColor = ~pal_abs(PharmD),
color="#b2aeae", fillOpacity=0.5, weight=1, smoothFactor=0.2) %>%
addPolygons(data = HPOs_per100k,
group = "Social Workers", fillColor = ~pal_abs(SWs),
color="#b2aeae", fillOpacity=0.5, weight=1, smoothFactor=0.2) %>%
addPolygons(data = HPOs_per100k,
group = "PTs", fillColor = ~pal_abs(PTs),
color="#b2aeae", fillOpacity=0.5, weight=1, smoothFactor=0.2) %>%
addPolygons(data = HPOs_per100k,
group = "PAs", fillColor = ~pal_abs(PAs),
color="#b2aeae", fillOpacity=0.5, weight=1, smoothFactor=0.2) %>%
addPolygons(data = HPOs_per100k,
group = "Primary Care Physicians", fillColor = ~pal_abs(PCPs),
color="#b2aeae", fillOpacity=0.5, weight=1, smoothFactor=0.2) %>%
addPolygons(data = HPOs_per100k,
group = "Psychiatrists", fillColor = ~pal_abs(Psychiatrist),
color="#b2aeae", fillOpacity=0.5, weight=1, smoothFactor=0.2) %>%
addPolygons(data = HPOs_per100k,
group = "Family Medicine", fillColor = ~pal_abs(FM),
color="#b2aeae", fillOpacity=0.5, weight=1, smoothFactor=0.2) %>%
addPolygons(data = HPOs_per100k,
group = "General Practice", fillColor = ~pal_abs(General.Practice),
color="#b2aeae", fillOpacity=0.5, weight=1, smoothFactor=0.2) %>%
addPolygons(data = HPOs_per100k,
group = "Geriatrics", fillColor = ~pal_abs(Geriatrics),
color="#b2aeae", fillOpacity=0.5, weight=1, smoothFactor=0.2) %>%
addPolygons(data = HPOs_per100k,
group = "Internal Medicine", fillColor = ~pal_abs(IM),
color="#b2aeae", fillOpacity=0.5, weight=1, smoothFactor=0.2) %>%
addPolygons(data = HPOs_per100k,
group = "Women's Health", fillColor = ~pal_abs(OB.GYN),
color="#b2aeae", fillOpacity=0.5, weight=1, smoothFactor=0.2) %>%
addPolygons(data = HPOs_per100k,
group = "Pediatrics", fillColor = ~pal_abs(Pediatrics),
color="#b2aeae", fillOpacity=0.5, weight=1, smoothFactor=0.2) %>%
# ## Add the legand
# addLegend(pal = pal_abs, values = c(0,100),
# title = paste0("Precentage (", age_group, ")"),
# position="bottomright", labFormat=labelFormat(suffix = "%")) %>%
## Add layers control
addLayersControl(
baseGroups = c("Veterinarians", "Pharmacists", "Social Workers", "PTs", "PAs",
"Primary Care Physicians", "Psychiatrists", "Family Medicine",
"General Practice", "Geriatrics", "Internal Medicine",
"Women's Health", "Pediatrics"),
options = layersControlOptions(collapsed = FALSE))
x@data %>%
select(County:Peds) %>%
melt(id.vars = c("County", "Population")) %>%
filter(variable==input$HP_type) %>%
ggvis(~County, ~value) %>%
layer_points()
x <- bind_rows(
(gs_title("TX Births") %>% gs_read_csv("Table10")),
(gs_title("TX Births") %>% gs_read_csv("Table11"))) %>%
filter(!grepl("REGION", Location)) %>%
mutate(Location = str_to_title(Location)) %>%
rename(County=Location) %>%
melt(measure.vars=c("X2013", "X2012", "X2011", "X2010", "X2009",
"X2008", "X2007", "X2006", "X2005", "X2004"),
variable.name="Year") %>%
mutate(Year = as.numeric(gsub("X", "", Year))) %>%
mutate(value = as.numeric(value)) %>%
tbl_df()
x %>%
filter(Statistic=="Mothers 17 Years of Age and Younger") %>%
ggplot(aes(x=Year, y=value, group=County, color=Race.Ethnicity)) +
geom_line() + geom_point()
ggplot(aes(x=Location, y=value, size=Year, color=Race.Ethnicity, shape=Statistic)) +
geom_point()
|
ae6fd94974b08d04af2ad68a6a0584463b403848
|
ca0fb42b56f0a01b05e70b8cc8efc9fb3753bd31
|
/scripts/GridLMM_hp_blup.R
|
fa41b4688c5bf7afbe5728f531c63a8ba06616a4
|
[] |
no_license
|
sarahodell/biogemma
|
873910b91d27dd539b3a8b1992de66673660c804
|
88d49a4f4974eacf01464547e5cf443191fb2f70
|
refs/heads/master
| 2022-09-22T00:48:43.637061
| 2022-08-16T23:52:21
| 2022-08-16T23:52:21
| 244,755,602
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,454
|
r
|
GridLMM_hp_blup.R
|
#!/usr/bin/env Rscript
#### Run GridLMM on a phenotype for and environment across all h haplotype groups for one chromosome
args=commandArgs(trailingOnly=T)
pheno=as.character(args[[1]])
chr=as.numeric(args[[2]])
h=as.numeric(args[[3]])
cores=as.numeric(args[[4]])
library('GridLMM')
library('data.table')
library('dplyr')
library('lme4')
xfile=sprintf('../../genotypes/probabilities/haplotype_probs/RefinedIBD_600K/bg%.0f_filtered_haplogroup%.0f_probs.rds',chr,h)
if(file.exists(xfile)){
X_list=readRDS(xfile)
# Read in Kinship Matrix
K=fread(sprintf('../K_matrices/K_matrix_chr%.0f.txt',chr),data.table=F)
rownames(K)=K[,1]
rownames(K)=gsub("-",".",rownames(K))
K=as.matrix(K[,-1])
colnames(K)=rownames(K)
# Read in phenotypes
# Grab the phenotype of interest and drop the genotypes not in the K matrix
phenotypes=fread('../phenotypes_asi.csv',data.table=F)
phenotypes=phenotypes[,c('Genotype_code','Loc.Year.Treat',pheno)]
phenotypes$Genotype_code=gsub('-','.',phenotypes$Genotype_code)
phenotypes=phenotypes[phenotypes$Genotype_code %in% rownames(K),]
data=data.frame(ID=phenotypes$Genotype_code,ID2=phenotypes$Genotype_code,Loc.Year.Treat=phenotypes$Loc.Year.Treat,y=phenotypes[,c(pheno)],stringsAsFactors=F)
data=data[!is.na(data$y),]
data$y=data$y - mean(data$y)
m1=lmer(y~Loc.Year.Treat + (1|ID2),data)
data_blup = as.data.frame(ranef(m1)$ID2)
data_blup$ID = rownames(data_blup)
data_blup$y=data_blup$`(Intercept)`
data_blup=data_blup[,c('ID','y')]
inds=rownames(X_list[[1]])
i=intersect(data_blup$ID,inds)
# Read in the haplotype group probabilities
# Filter genotypes that are not in the K matrix
K=K[i,i]
data_blup=data_blup[i,]
# Run GridLMM
null_model = GridLMM_ML(y~1+(1|ID),data_blup,relmat=list(ID=K),ML=T,REML=F)
h2_start=null_model$results[,grepl('.ML',colnames(null_model$results),fixed=T),drop=FALSE]
names(h2_start) = sapply(names(h2_start),function(x) strsplit(x,'.',fixed=T)[[1]][1])
h2_start
V_setup=null_model$setup
Y=as.matrix(data_blup$y)
X_cov=null_model$lmod$X
dimx=dim(X_list[[1]])[2]
X_list_ordered=lapply(X_list,function(x) array(x[i,],dim=c(length(i),dimx),dimnames=list(i,dimnames(X_list[[1]])[[2]])))
X_list_null=NULL
gwas=run_GridLMM_GWAS(Y,X_cov,X_list_ordered[-1],X_list_null,V_setup=V_setup,h2_start=h2_start,method='ML',mc.cores=cores,verbose=F)
hinfo=data.frame(method="Haplotype_probs",phenotype=pheno,environment="ALL",chr=chr,h2=h2_start,hap=h,stringsAsFactors=F)
fwrite(hinfo,'../heritabilities.txt',quote=F,sep='\t',row.names=F,append=T)
saveRDS(gwas,sprintf('models/Biogemma_chr%.0f_haplogrp%.0f_%s_x_ALL.rds',chr,h,pheno))
}else{
print(sprintf("No markers at %s haplogroup %.0f",chr,h))
}
# Convert all very high and very low probabilities to 1 and 0, respectively
#X_list_full = lapply(X_list_ordered,function(x) sapply(seq(1,dim(x)[2]), function(i) ifelse(x[,i]>=0.99,1,ifelse(x[,i]<=1e-2,0,x[,i]))))
#for(i in 1:h){dimnames(X_list_full[[i]])[[2]]=dimnames(X_list_ordered[[i]])[[2]]}
#gwas_adjusted=gwas
#sums=lapply(X_list_full,function(x) colSums(x))
#for(i in 1:h){
# s=sums[[i]]
# t=dim(X_list_full[[i]])[1]-2
# l=2
# grab=which(s>t,s)
# grab=c(grab,which(s<l,s))
# grab=sort(grab)
# beta=sprintf('beta.%.0f',seq(1,h))
# gwas_adjusted[grab,beta]=0
# print(grab)
#}
#saveRDS(gwas_adjusted,sprintf('models/Biogemma_chr%s_haplogrp%.0f_%s_x_ALL_adjusted.rds',chr,h,pheno))
|
a1cc9dff0d9bc8a08882e8258c3872a5ccb346a9
|
2a63bbb53d797c916ae959dde41fe263a7ae8d44
|
/best_draft1.R
|
4e265ce461ce7555ffb973e0e976a481075afbbd
|
[] |
no_license
|
emiliehwolf/prog3
|
0b0e77a7456fa6e83f172bb2370ebbf8893eef74
|
29ca28a61d87795be67639ef031ad636f89a888b
|
refs/heads/master
| 2020-04-06T07:04:08.765064
| 2017-02-12T09:56:31
| 2017-02-12T09:56:31
| 33,079,881
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 986
|
r
|
best_draft1.R
|
best <- function(state, outcome) {
## Read outcome data
df <- read.csv("outcome-of-care-measures.csv", colClasses = "character", na.strings = "Not Available")
dfslim <- data.frame(df[,2],df[,7],df[,11],df[,17],df[,23])
##colnames(df) <- c("hosp","state","ha","hf","pn") ##unneccessary
if(outcome=="heart attack") x<-3
if(outcome=="heart failure") x<-4
if(outcome=="pneumonia") x<-5
##else {
## stop() ###
##}
##if(any(df[,2]==state)==FALSE) {
## stop() ###
##}
dfstate <- dfslim[dfslim[,2]==state,] ##subsetting just the states in question
minamount <- min(as.numeric(dfstate[,x]), na.rm=TRUE)
## lowest <- df[which.min(dfstate[,x]),2]
besthospitals <- vector(mode="character", length=0)
for (i in nrow(dfstate)) {
if(dfstate[i,x]==minamount) {
besthospitals <- c(besthospitals,dfstate[i,1])
}
}
lowest <- min(besthospitals)
lowest
}
|
5cbd7198b3f95478be59538110d91a0ca71d763c
|
0c10b1c41c23cab19282e93703d9c1cdae684555
|
/1-D/visual/animate.R
|
9230eda48ad4f1a2382015f23a1de7dc95fb747c
|
[] |
no_license
|
hlokavarapu/Stokes
|
00e677a5345b8631f2f705d79569a147280d13a5
|
7368284ee2d3ea23192f222a685608909d2e0b3b
|
refs/heads/master
| 2020-04-09T05:51:06.399666
| 2014-12-18T23:44:54
| 2014-12-18T23:44:54
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 685
|
r
|
animate.R
|
#!/usr/bin/R
library(animation)
ts_ctl <- length(unique(data$ts))
pltData <- function() {
#for(i in 0:(ts_ctl-1)) {
for(i in 0:(100)) {
# pdf(paste("Upwind_h256",i,".pdf",sep=""),height=4,width=6.5)
points <- subset(data, ts==(ts_ctl-1))
x <- points$x
y <- points$u
plot(x, y, type='l', ylim=c(0,1), main=)
# dev.off()
}
}
makePlot <- function(outputFile, inputFile ) {
data <- read.table(inputFile, col.names=c("ts", "x", "u"), sep=',')
oopt = ani.options(interval = 0, nmax = (ts_ctl-1))
saveGIF(pltData(), movie.name = outputFile, interval = 0.1, width = 580, height = 400)
# saveGIF(pltData(), interval = 0.1, width = 580, height = 400)
ani.options(oopt)
}
|
b31435210401906ce3011a0b9798cbb0044c64f3
|
9b54d76011ab40f9b9bef2c20d082552598e570f
|
/analysis/power/fix_task.R
|
1206ef109286ce2074290d9bf2e9fe9857add9b1
|
[
"MIT"
] |
permissive
|
TimS70/WebET_Analysis
|
a507f0f368dde907005bbcb354b65ce62eef0d5a
|
32fc2e1b70c2dad5637ee1614a6a651bc8d458b4
|
refs/heads/main
| 2023-06-29T19:39:59.864107
| 2021-07-23T12:47:46
| 2021-07-23T12:47:46
| 329,843,183
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,046
|
r
|
fix_task.R
|
source(file=file.path('utils', 'r', 'get_packages.R'))
source(file=file.path('analysis', 'power', 'load_data.R'))
get_packages(c(
"knitr",
"simr",
"dplyr",
"rsq",
"tictoc",
"tinytex")
)
# Random Slope Model
model_fix_task <- function(chin_effect) {
# Intercept and slopes for intervention, time1, time2, intervention:time1, intervention:time2
# Effects based on Semmelmann & Weigelt, 2018 - We take the other effects * 3
fixed <- c(0.15, # Intercept
0.001, # Trial
chin_effect, # chin
0.02, # glasses
0.005, # short
0.005, # long
0.001, # progressive
0.005, # y-pos
0.005, # fps
0.0007, # Black
0.007 # Asian
)
# Random intercepts for participants clustered by class, Variance from above
# https://rstudio-pubs-static.s3.amazonaws.com/484106_6b51212f20164fdd88cd7cce89bdef79.html
rand <- matrix(c(0.025^2, -0.0005, -0.0005, 0.025^2), 2, 2)
# Extract residual sd
res <- (1.218e-12^0.5)
m <- makeLmer(offset ~ trial + chin + glasses + visualAid + y_pos + fps + ethnic + (1 + chin |subj),
fixef=fixed, VarCorr=rand, sigma=res, data=load_data(kTrial=9, nsubj = 300))
return(m)
}
simulate_fix_task <- function(chin_effect,
myNSim,
myBreaks,
n_min,
n_max) {
output <- list(rep(0, length(chin_effect)))
for(i in seq_along(chin_effect)) {
m <- model_fix_task(chin_effect=chin_effect[i])
m <- extend(m, along="subj", n=500)
pc <- powerCurve(
m,
along="subj",
test = fixed("chin", method="t"),
breaks=seq(n_min, n_max, myBreaks),
nsim=myNSim
)
output[[i]] <- list(m, pc)
names(output[[i]]) <- c("Model", "Power Curve")
}
print(output)
return(output)
}
|
0698ce757c16ff1c7374c8d073d32a99986f77ae
|
976694398b80400e6cef7b643a4af1f14a0b29b1
|
/MixingModel.R
|
f1a7238753b7f20b9e98c3b86d79710c0d666aec
|
[] |
no_license
|
bradleylab/DsrAB_enzyme_models
|
2b2a6e562c01b6513ac25c79f44a4d50d69adc2c
|
1131a52d2c2cd9c263a562c079759d95afad0aa1
|
refs/heads/master
| 2021-01-02T08:20:21.328289
| 2014-11-30T22:04:31
| 2014-11-30T22:39:28
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 988
|
r
|
MixingModel.R
|
###############################
### Mixing of isotope effects ####
##first to set up the dataframe
MixMod<-data.frame(Dsr.34Rs$SO3)
MixMod<-cbind(MixMod, Dsr.34Rs$ox)
MixMod<-cbind(MixMod, Dsr.34Rs$red)
names(MixMod)[1] <- "Rso3"
names(MixMod)[2] <- "Rox"
names(MixMod)[3] <- "Rred"
## based on product Rayleigh
##Mix.alpha = 0.98467919
##based on reactant Rayleigh
#Mix.alpha = 0.98478247
#set Mix.alpha depending on which Rayleigh model you choose
Mix.alpha = 0.98467919
MixMod<-Reliable(MixMod)
MixMod.complete = MixMod[complete.cases(MixMod),]
Xs <- matrix(seq(0.01,0.99,by=0.01),nrow=1)
XsM <- Xs[rep(1:1,length(MixMod.complete[,1])),]
MixMod.complete<-cbind(MixMod.complete,XsM)
f.reliable <-Reliable(f)
f.MM.complete <-f.reliable[complete.cases(MixMod)]
n=dim(MixMod.complete)[2]
MixMod.complete[,4:n] = (MixMod.complete$Rred-MixMod.complete[,4:n]*MixMod.complete$Rso3*Mix.alpha)/(MixMod.complete$Rox*(1-MixMod.complete[,4:n]))
#plot the results
# See Code_for_figures.R
|
b9f06f6e8876457c22d0401de0382ff3c2300f73
|
fa823f5ca33456e33198a45955fcc6e80ea926c4
|
/R/database.R
|
12292054cfbce74eb2094212fb38da183a878c88
|
[] |
no_license
|
Python3pkg/BatchExperiments
|
2692a2c849869ad1dc5d4a0c5bd4e9048c980d6b
|
1479652d233863171208c01fb1f57534fc28971c
|
refs/heads/master
| 2021-01-21T09:24:11.618977
| 2017-01-02T09:59:45
| 2017-01-02T09:59:45
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,372
|
r
|
database.R
|
#' @export
dbCreateJobDefTable.ExperimentRegistry = function(reg) {
query = sprintf(paste("CREATE TABLE %s_job_def (job_def_id INTEGER PRIMARY KEY,",
"prob_id TEXT, prob_pars TEXT, algo_id TEXT, algo_pars TEXT,",
"UNIQUE(prob_id, prob_pars, algo_id, algo_pars))"), reg$id)
batchQuery(reg, query, flags = "rwc")
}
dbCreateExtraTables = function(reg) {
query = sprintf("CREATE TABLE %s_prob_def (prob_id TEXT PRIMARY KEY, pseed INTEGER)", reg$id)
batchQuery(reg, query, flags = "rwc")
query = sprintf("CREATE TABLE %s_algo_def (algo_id TEXT PRIMARY KEY)", reg$id)
batchQuery(reg, query, flags = "rwc")
}
dbCreateExpandedJobsViewBE = function(reg) {
query = sprintf(paste("CREATE VIEW %1$s_expanded_jobs AS",
"SELECT * FROM %1$s_job_status AS job_status",
"LEFT JOIN %1$s_job_def AS job_def USING(job_def_id)",
"LEFT JOIN %1$s_prob_def AS prob_def USING (prob_id)"), reg$id)
batchQuery(reg, query, flags = "rw")
}
dbSelectWithIds = function(reg, query, ids, where = TRUE, group.by, reorder = TRUE) {
if(!missing(ids))
query = sprintf("%s %s job_id IN (%s)", query, ifelse(where, "WHERE", "AND"), collapse(ids))
if(!missing(group.by))
query = sprintf("%s GROUP BY %s", query, collapse(group.by))
res = batchQuery(reg, query)
if(missing(ids) || !reorder)
return(res)
return(res[na.omit(match(ids, res$job_id)),, drop = FALSE])
}
#' @method dbGetJobs ExperimentRegistry
#' @export
dbGetJobs.ExperimentRegistry = function(reg, ids) {
query = sprintf("SELECT job_id, prob_id, prob_pars, algo_id, algo_pars, seed, prob_seed, repl FROM %s_expanded_jobs", reg$id)
tab = dbSelectWithIds(reg, query, ids)
lapply(seq_row(tab), function(i) {
x = tab[i,]
prob.pars = unserialize(charToRaw(x$prob_pars))
algo.pars = unserialize(charToRaw(x$algo_pars))
makeExperimentJob(id = x$job_id, prob.id = x$prob_id, prob.pars = prob.pars,
algo.id = x$algo_id, algo.pars = algo.pars, seed = x$seed, repl = x$repl, prob.seed = x$prob_seed)
})
}
dbSummarizeExperiments = function(reg, ids, show) {
if (all(show %in% c("prob", "algo", "repl"))) {
cols = setNames(c("prob_id", "algo_id", "repl"), c("prob", "algo", "repl"))
cols = cols[match(show, names(cols))]
query = sprintf("SELECT %s, COUNT(job_id) FROM %s_expanded_jobs", collapse(cols), reg$id)
summary = setNames(dbSelectWithIds(reg, query, ids, group.by = cols, reorder = FALSE),
c(show, ".count"))
} else {
uc = function(x) unserialize(charToRaw(x))
query = sprintf("SELECT job_id, prob_id AS prob, prob_pars, algo_id AS algo, algo_pars, repl FROM %s_expanded_jobs", reg$id)
tab = as.data.table(dbSelectWithIds(reg, query, ids, reorder = FALSE))
pars = rbindlist(lapply(tab$prob_pars, uc), fill = TRUE)
if (nrow(pars) > 0L)
tab = cbind(tab, pars)
pars = rbindlist(lapply(tab$algo_pars, uc), fill = TRUE)
if (nrow(pars) > 0L)
tab = cbind(tab, pars)
diff = setdiff(show, colnames(tab))
if (length(diff) > 0L)
stopf("Trying to select columns in arg 'show' which are not available: %s", collapse(diff))
summary = as.data.frame(tab[, list(.count = .N), by = show])
}
summary
}
dbFindExperiments = function(reg, ids, prob.pattern, algo.pattern, repls, like = TRUE, regexp = FALSE) {
clause = character(0L)
if (!missing(repls))
clause = c(clause, sprintf("repl IN (%s)", collapse(repls)))
if (regexp) {
query = sprintf("SELECT job_id, prob_id, algo_id from %s_expanded_jobs", reg$id)
tab = dbSelectWithIds(reg, query, ids, where = TRUE)
ss = rep(TRUE, nrow(tab))
if (!missing(prob.pattern))
ss = ss & grepl(prob.pattern, tab$prob_id)
if (!missing(algo.pattern))
ss = ss & grepl(algo.pattern, tab$algo_id)
return(tab$job_id[ss])
}
if (!missing(prob.pattern)) {
if (like)
clause = c(clause, sprintf("prob_id LIKE '%%%s%%'", prob.pattern))
else
clause = c(clause, sprintf("prob_id = '%s'", prob.pattern))
}
if (!missing(algo.pattern)) {
if (like)
clause = c(clause, sprintf("algo_id LIKE '%%%s%%'", algo.pattern))
else
clause = c(clause, sprintf("algo_id = '%s'", algo.pattern))
}
query = sprintf("SELECT job_id from %s_expanded_jobs", reg$id)
if (length(clause) > 0L)
query = paste(query, "WHERE", collapse(clause, sep = " AND "))
dbSelectWithIds(reg, query, ids, where = length(clause) == 0L)$job_id
}
dbAddProblem = function(reg, id, seed) {
#FIXME: replace OR REPLACE with an option, this is not supported by all DBMS
query = sprintf("INSERT OR REPLACE INTO %s_prob_def (prob_id, pseed) VALUES ('%s', %s)",
reg$id, id, ifelse(is.null(seed), "NULL", seed))
batchQuery(reg, query, flags = "rw")
}
dbAddAlgorithm = function(reg, id) {
#FIXME: replace OR REPLACE with an option, this is not supported by all DBMS
query = sprintf("INSERT OR REPLACE INTO %s_algo_def (algo_id) VALUES ('%s')", reg$id, id)
batchQuery(reg, query, flags = "rw")
}
dbRemoveProblem = function(reg, id) {
query = sprintf("DELETE FROM %s_prob_def WHERE prob_id='%s'", reg$id, id)
batchQuery(reg, query, flags = "rw")
}
dbRemoveAlgorithm = function(reg, id) {
query = sprintf("DELETE FROM %s_algo_def WHERE algo_id='%s'", reg$id, id)
batchQuery(reg, query, flags = "rw")
}
dbGetAllProblemIds = function(reg) {
query = sprintf("SELECT prob_id FROM %s_prob_def", reg$id)
batchQuery(reg, query)$prob_id
}
dbGetAllAlgorithmIds = function(reg) {
query = sprintf("SELECT algo_id FROM %s_algo_def", reg$id)
batchQuery(reg, query)$algo_id
}
dbGetProblemIds = function(reg, ids) {
query = sprintf("SELECT job_id, prob_id FROM %s_expanded_jobs", reg$id)
dbSelectWithIds(reg, query, ids)$prob_id
}
dbGetAlgorithmIds = function(reg, ids) {
query = sprintf("SELECT job_id, prob_id FROM %s_expanded_jobs", reg$id)
dbSelectWithIds(reg, query, ids)$algo_id
}
dbRemoveJobs = function(reg, ids) {
query = sprintf("DELETE FROM %s_job_status WHERE job_id IN (%s)", reg$id, collapse(ids))
batchQuery(reg, query, flags = "rw")
query = sprintf("DELETE FROM %1$s_job_def WHERE job_def_id NOT IN (SELECT DISTINCT job_def_id FROM %1$s_job_status)", reg$id)
batchQuery(reg, query, flags = "rw")
return(invisible(TRUE))
}
|
6b59b29effffe90f3479c33a3f10b67bfd58a47b
|
60bab87f32843c4055ed98ee046e0e5492576ebd
|
/DataSet1.R
|
2578bfaea002064e78d2d26c697514427ebce97f
|
[] |
no_license
|
rafalopezv/Assignment1
|
c9b4c1ce7804e1632e1612d35c2dd22dd9d9e4af
|
c8e9fdba7622c012ae92c3e663d361c28a066865
|
refs/heads/master
| 2021-01-10T05:39:56.774766
| 2016-03-04T15:32:25
| 2016-03-04T15:32:25
| 52,889,748
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,033
|
r
|
DataSet1.R
|
####################################
# R source to analyze data set 'women'
# Emilia Sicari
# March 1st 2016
# Introduction to collaborative social science data analysis - Hertie School of Governance
####################################
#Setting the working directory
getwd()
#setwd("/Users/emiliasicari/Desktop/Assignment1/Assignment1/")
#Installing packages, or loading packages if already installed
#Loading the ggplot2 package
library(ggplot2)
#Installing package corrgram
#install.packages(corrgram)
#Loading the installed package corrgramm
library(corrgram)
#Installing package dplyr
#install.packages("dplyr")
#Loading package
library(dplyr)
#Listing internal data set
data()
#Loading the data set 'woman'
data(women)
#Finding data description
?women
#Finding variables names
names(women)
#Finding the class of the variable height
class(women$height)
#Finding the class of the variable weight
class(women$weight)
#Showing the content of the data set
head(women)
#Converting height from inches to meters
hm<-women$height/39.37
#Converting weight from pounds to kilograms
wk<-women$weight/2.205
#Description of the distribution of the numeric variable 'height'
summary(women$height)
boxplot(women$height)
hist(women$height)
hist(women$height,
main = 'Average heights of American women',
xlab = '')
hist(women$height, freq = FALSE)
#Measuring central tendency of the variable 'height'
#Creating the function mean
fun_mean <- function(x){
sum(x) / length(x)
}
#mean
mean(women$height)
#median
median(women$height)
#mode
t<-table(women$height)
t[t==max(t)]
#Measuring dispersion of variable 'height'
#range
range(women$height)
#Interquartile range
IQR(women$height)
#variance
var(women$height)
#standard deviation
sd(women$height)
#standardizing the variable
hz1<-scale(women$height)
#Description of the distribution of the numeric variable 'weight'
summary(women$weight)
boxplot(women$weight)
hist(women$weight)
hist(women$weight,
main = 'Average heights of American women',
xlab = '')
hist(women$weight, freq = FALSE)
#Measuring central tendency of the variable 'weight'
#mean
mean(women$weight)
#median
median(women$weight)
#mode
t<-table(women$weight)
t[t==max(t)]
#Measuring dispersion of variable 'weight'
#range
range(women$weight)
#Interquartile range
IQR(women$weight)
#variance
var(women$weight)
#standard deviation
sd(women$weight)
#standardizing the varibale
hz1<-scale(women$height)
#Measuring the association between the two variable
cor.test(log(women$height), women$weight)
#correlation graph
corrgram(women)
plot(log(women$height), women$weight,
main = 'Correlation between weight and height of American Women',
xlab = '')
#Showing the relation between height and weight
ggplot(women, aes(height, weight)) + geom_path()
citation()
# R Core Team (2015). R: A language and environment for statistical
# computing. R Foundation for Statistical Computing, Vienna, Austria. URL
# https://www.R-project.org/.
#Data Source: McNeil, D. R. (1977) Interactive Data Analysis. Wiley.
|
aa22d31d59d2ba47982cd40fbb87fa05d901f223
|
288cef50e5114bede0c494c1bb23db2bd437261f
|
/D_Module9_Krystalle.R
|
9a0eec88719af5bf551b37af917d7bdcb56301cf
|
[] |
no_license
|
ksdiaz/NR995_Mod9
|
d9447f64801e4919adb7b9e5ac642164f2b436a5
|
c75e09a927f53d4efea30dfc0724ac9b567aff75
|
refs/heads/master
| 2021-07-18T05:02:15.749987
| 2017-10-24T19:56:13
| 2017-10-24T19:56:13
| 108,018,436
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,933
|
r
|
D_Module9_Krystalle.R
|
#NR 995 Fall2017
#Module 9 HW
#23 October 2017
#Group D
#Krystalle Diaz
#setwd("/Users/krystallediaz/Dropbox/Classes/NR 995/Coding/")
####1) The mammal sleep dataset is included in the ggplot2 package and provides information about the sleeping
#habits of 83 species of mammals. Load the dataset (library(ggplot2); data(msleep); ?msleep).
library("ggplot2")
data(msleep)
?msleep
#How many diet type (i.e., vore) categories are there?
length(unique(msleep$vore)) #There are 5 diet type ("vore) categories- carni, omni,herbi, insecti,
#and an NA category.
#Visually investigate whether daily sleep totals vary with diet type: Make a boxplot comparing the daily sleep
#totals across the vore categories, excluding rows that do not have data in the vore column. Remember to include
#informative axis labels. Briefly describe in 1-2 sentences the major patterns in the plot.
ggplot(data=msleep[!is.na(msleep$vore), ], aes(x = vore, y = sleep_total)) + geom_boxplot(stat="boxplot") +
labs(x= "Diet Type (-vore)", y = "Sleep Total (hours)") + ggtitle("Daily Sleep by Diet Type")
#Carnivores, omnivores, and insectivores have a wide range of values, with most of these values falling into a
#wide range of statiscal values. Insectivores are similar, but values falling outside of the scope of the quartile
#statistics still cling tightly to those statistical values, and the mean is rather high. Omnivores have a tight
#range of quartile statistics, with what could be 4 outlier values.
####2) Using the mammal sleep dataset, use plot() to show the relationship between the natural log of body size
#and the length of the sleep cycle, labelling axes appropriately.
msleep$ln_bodywt <- log(msleep$bodywt) #from ?log : "log computes logarithms, by default natural logarithms"
plot(msleep$sleep_cycle, msleep$ln_bodywt, type="p", xlab="Sleep Total (hours)", ylab="ln (Body Weight (kg))",
main="Sleep by ln(Body Weight)")
#ggplot version:
ggplot(data=na.omit(msleep), aes(x=sleep_total, y=ln_bodywt)) + geom_point(aes(color=order)) +
facet_wrap(~conservation, nrow=2) + stat_smooth(method="lm", se=F) #used facet_wrap for the panels because it's easier
#Only domesticated and LC (least concerned) species have more than one point of data. For these, the data seem to
#have a rather loose pattern, with LC looking like it could be random, or at least affected by other factors that
#are not body weight. So there doesn't seem to be enough data to make that sort of conclusion.
#Got these codes from the Wikipedia article on conservation status, which is where ?msleep says that data is from.
#nt = near threatened
#en = endangered
#lc = least concern
#vu = vulnerable
####3) How does the ratio of brain weight to body weight (i.e., brainwt/bodywt) vary by diet type?
msleep.sub <- msleep[!is.na(msleep$brainwt) & !is.na(msleep$bodywt) & !is.na(msleep$vore), ]
#function to return a data frame summarizing brain to body weight ratio by diet type
brain_body_ratio <- function(x.data) {
x.data <- x.data[!is.na(x.data$brainwt) & !is.na(x.data$bodywt) & !is.na(x.data$vore), ] #remove NAs for needed rows
brain_body_mean <- as.data.frame(tapply(x.data$brainwt/x.data$bodywt, x.data$vore, mean)) #calculate means according to diet type
new.x <- as.data.frame(rownames(brain_body_mean)) #initialize data frame
new.x$brain_body_mean <- brain_body_mean$`tapply(x.data$brainwt/x.data$bodywt, x.data$vore, mean)` #get corresponding values
colnames(new.x) <- c("vore", "brain_body_mean") #fix those ugly column names
new.x$brain_body_se <- new.x$brain_body_mean/sqrt(length(new.x$vore)) #column for standard error
return(new.x)
}
diet_stats <- brain_body_ratio(msleep)
#In 1-2 sentences, identify the contribution of each group member to the assignment. Upload a link
#to your group’s GitHub repository (i.e., http://github.com/username/reponame) to submit your assignment in myCourses, one per group.
|
c71fb4f83967075fce656e258d0e067c8e2b54c9
|
c53e367a5a155cfb1ee3a41e8b0351aeaa8d331d
|
/snpStats/doc/tdt-vignette.R
|
3ed0ff0f4b633989a0047e1fd81524ac08638a64
|
[
"MIT"
] |
permissive
|
solgenomics/R_libs
|
bcf34e00bf2edef54894f6295c4f38f1e480b3fc
|
e8cdf30fd5f32babf39c76a01df5f5544062224e
|
refs/heads/master
| 2023-07-08T10:06:04.304775
| 2022-05-09T15:41:26
| 2022-05-09T15:41:26
| 186,859,606
| 0
| 2
|
MIT
| 2023-03-07T08:59:16
| 2019-05-15T15:57:13
|
C++
|
UTF-8
|
R
| false
| false
| 1,440
|
r
|
tdt-vignette.R
|
### R code from vignette source 'tdt-vignette.Rnw'
###################################################
### code chunk number 1: family-data
###################################################
require(snpStats)
data(families)
genotypes
head(pedData)
###################################################
### code chunk number 2: mis-inheritances
###################################################
mis <- misinherits(data=pedData, snp.data=genotypes)
dim(mis)
###################################################
### code chunk number 3: per-subj-snp
###################################################
per.subj <- apply(mis, 1, sum, na.rm=TRUE)
per.snp <- apply(mis, 2, sum, na.rm=TRUE)
par(mfrow = c(1, 2))
hist(per.subj,main='Histogram per Subject', xlab='Subject')
hist(per.snp,main='Histogram per SNP', xlab='SNP')
###################################################
### code chunk number 4: per-family
###################################################
fam <- pedData[rownames(mis), "familyid"]
per.fam <- tapply(per.subj, fam, sum)
par(mfrow = c(1, 1))
hist(per.fam, main='Histogram per Family', xlab='Family')
###################################################
### code chunk number 5: tdt-tests
###################################################
tests <- tdt.snp(data = pedData, snp.data = genotypes)
cbind(p.values.1df = p.value(tests, 1),
p.values.2df = p.value(tests, 2))
qq.chisq(chi.squared(tests, 1), df = 1)
|
b9de23aef9551dfc9997e0111341b8ee571ddcda
|
8456e8ad892c85518b4c509d05b89f88dd6eadd1
|
/exercise-2/exercise.R
|
a5d749ea4c3406e03f1cda751e04ecc12a46cc4e
|
[
"MIT"
] |
permissive
|
HyunnyKim/m8-dataframes
|
a0a83e0a4d84d60396403fbbaad1086411974886
|
9912e3f800906989fa3a57d74cf7b07a1819cd2c
|
refs/heads/master
| 2021-01-10T23:12:10.643747
| 2016-10-13T22:06:57
| 2016-10-13T22:06:57
| 70,634,394
| 0
| 0
| null | 2016-10-11T20:52:51
| 2016-10-11T20:52:50
| null |
UTF-8
|
R
| false
| false
| 806
|
r
|
exercise.R
|
# Exercise 2: Creating data frames
# Create a vector of the number of points the Seahawks have scored in each game this season (google "Seahawks")
Seahawks.win <- c(12, 3, 37, 27)
# Create a vector of the number of points the Seahwaks have allowed to be scored against them in each game this season
Opponent.win <- c(10, 9, 18, 17)
# Combine your two vectors into a dataframe
games <- data.frame(Seahawks.win, Opponent.win)
# Create a new column "diff" that is the difference in points
games$diff <- games$Seahawks.win - games$Opponent.win
# Create a new column "won" which is TRUE if the Seahawks wom
games$won <- games$diff > 0
# Create a vector of the opponents
opponents <- c('Dolphins', 'Rams','49ers', 'Jets')
# Assign your dataframe rownames of their opponents
rownames(games) <- opponents
|
02349398101c955b542e8cd322fb70c20602ebe9
|
8769749bb0a919299b66db7aaafa400d1d412469
|
/archive/hiccup_loop.old/replicated_loop.anchors.histone.r
|
c9f9c0a20568877022c20866a0067e944010d76c
|
[] |
no_license
|
bioinfx/cvdc_scripts
|
e9e113fae866d2d3f0c2515fae1b410b2d7a3eeb
|
d33757f9d02fa6d503b5cb65336c0e4e410caa78
|
refs/heads/master
| 2022-03-19T00:15:13.372417
| 2019-12-05T04:38:48
| 2019-12-05T04:38:48
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,849
|
r
|
replicated_loop.anchors.histone.r
|
setwd("../../analysis/hiccup_loops/")
options(scipen=99)
pdf("figures/replicated.anchors.cluster.histone.pdf",height=20,width=6)
##table 1: anchors.
for (mark in c("H3K27ac","H3K27me3","H3K4me1","H3K4me3")){
print(mark)
anchors = read.delim("replicated_loops/loop_anchors.uniq.30k.num_loops.cluster.txt")
## table 2: anchors2peak
a2peak = data.frame(fread(paste0("overlap_anchors_to_features/anchor.",mark,"_merged_peaks.txt"),header=F))
a2peak$name = paste(a2peak$V1, a2peak$V2+10000)
## table 3: gene rpkm
feature = data.frame(fread(paste0("../../data/chipseq/edger/",mark,".rpkm.fc.edger.txt")))
feature$fdr = p.adjust(feature$PValue)
keep = names(table(anchors$cluster_dense))[which(table(anchors$cluster_dense)>50)]
anchors.keep = anchors[which(anchors$cluster_dense %in% keep),]
# merge loops with genes
m1 = merge(anchors.keep, a2peak, by="name")
m2 = merge(m1, feature, by.x="V7",by.y="Geneid",all.x=T)
m2$df = m2$fdr < 0.05
anchor.df = m2[which(m2$df==TRUE),]
anchor.df = anchor.df[order(anchor.df$cluster_dense),]
num = table(factor(anchor.df$cluster_dense))
inc = 0
for(i in 1:length(num)) inc[i+1] = inc[i] + num[i]
mat = as.matrix(anchor.df[,21:32])
for (i in 1:length(num)) {
hc = hclust(as.dist( 1- cor(t(mat[which(anchor.df$cluster_dense==names(num)[i]),]))))
mat[which(anchor.df$cluster_dense==names(num)[i]),] = mat[which(anchor.df$cluster_dense==names(num)[i]),][hc$order,]
}
# log transformation
mat = log2(sweep(mat+1e-6, 1, apply(mat,1, mean), "/"))
rownames(mat) = 1:nrow(mat)
mat.melt = melt(mat)
print(ggplot(mat.melt,aes(x=Var2,y=Var1,fill=value)) + geom_tile() +
scale_y_continuous(breaks=inc[-1],labels = names(num))+
scale_fill_gradientn(colours = c("darkblue", "white", "red"),
values = scales::rescale(c(-10, -5, 0, 1, 1))) +
geom_hline(yintercept=inc,size=1) + ggtitle(mark))
}
dev.off()
|
bf1b62ec8eacfc3b9376f9d7a4b8fc49995d2429
|
17fdd34b68df267b8262d532adddba733879b0b8
|
/man/OR.Rd
|
ca2ae3d938759d2f57bedc91304b85f2a0efe2d5
|
[] |
no_license
|
kevinmhadi/khtools
|
f0b57e0be0014084f2f194465ab4a924fe502268
|
85d64808f8decd71f30510ccd18f38986031be74
|
refs/heads/master
| 2023-07-19T21:50:22.341824
| 2023-07-19T01:46:03
| 2023-07-19T01:46:03
| 235,495,453
| 0
| 3
| null | null | null | null |
UTF-8
|
R
| false
| true
| 208
|
rd
|
OR.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/package.R
\name{OR}
\alias{OR}
\title{test boolean OR across multiple vectors}
\usage{
OR(FUN = identity, ...)
}
\description{
}
|
9702dfae6e70177f90f85c3842bf99df5fbe8d38
|
e7b764ecc26924c0ccc3ac4215912bebdf4e1c66
|
/cachematrix.R
|
7c517ee28c57db7e7536533e6aa2459f56b1e2d9
|
[] |
no_license
|
johnepling/ProgrammingAssignment2
|
45d9c349bc9a2036cfc03d63d391405e7a351d2e
|
074a0f164a7f01d7534c73dc1fc6745e15bf3c58
|
refs/heads/master
| 2020-12-03T00:27:07.952392
| 2017-07-02T17:37:44
| 2017-07-02T17:37:44
| 96,031,444
| 0
| 0
| null | 2017-07-02T15:16:21
| 2017-07-02T15:16:20
| null |
UTF-8
|
R
| false
| false
| 2,216
|
r
|
cachematrix.R
|
## A set of functions to cache the inversion of a matrix and retrieve it if needed.
## This function creates a list of functions that sets up the next set.
makeCacheMatrix <- function(x = matrix()) { ##define fxn and be prepared to coerce x to matrix
cacheInv <- NULL ##initialize cacheInv, which will hold the stored value
set <- function(y) { ##initialize set fxn with value of y (which will be passed when x$set is called)
x <<- y ##set x (in parent environment) to value of y
cacheInv <<- NULL ##set cacheInv (in parent env) to NULL after setting x
}
get <- function() x ##used for when runnin x$get - will give value of x stored in current env
setSolve <- function(solve) cacheInv <<- solve ##defines setSolve as a function setting cacheInv in the parent env to the calculated value
getSolve <- function() cacheInv ##function used to get stored value when calling x$getSolve
list(set=set, get = get, setSolve = setSolve, getSolve = getSolve) ##creates vector of functions to be used in cachematrix
}
## This function returns a matrix that's the inverse of 'x' - caclulated if not
## already cached
cacheSolve <- function(x, ...) { ##defines cacheSolve function with input of x and defaults
## Return a matrix that is the inverse of 'x'
cacheInv <- x$getSolve() ##if present, this will pull whatever is in cachceInv from the parent env using x$getSolve
if(!is.null(cacheInv)) { ##if cacheInv has a value, send msg and return cached value
message("getting cached data")
return(cacheInv)
}
data <- x$get() ##assuming cacheInv does not have a value, get the matrix data to calculate it
cacheInv <- solve(data, ...) ##perform the laborious and time-consuming matrix inversion calculation
x$setSolve(cacheInv) ##use x$setSolve to store the result in the parent env so you don't have to calculate it again
cacheInv ##don't forget to return the result
}
|
5070f933cc40e65c877e39d160382d0f47c04f34
|
0c4f1bd80f062f38bda55fd4837910d26d63c299
|
/02_R4DS_EDA.R
|
e849b96bdc8a6c5647a2f703f87e8da11103ab2d
|
[] |
no_license
|
rladiesaustin/R4DS_workshop_series
|
68b7469d1ec7fd11cdc9142d5bcac3a0e441b71d
|
4fe0b15a77657a7483ecf19b36598716394c974a
|
refs/heads/master
| 2021-09-07T11:01:46.375763
| 2018-02-22T00:38:53
| 2018-02-22T00:38:53
| 105,070,864
| 7
| 5
| null | 2018-02-22T00:38:54
| 2017-09-27T21:23:30
|
R
|
UTF-8
|
R
| false
| false
| 4,010
|
r
|
02_R4DS_EDA.R
|
##################################
# R ladies R4DS
# ggplot2
# created 10/29/2017
################################
# for EDA, load the tidyverse!
# common to use both ggplot2 and dplyr when exploring data
library(tidyverse)
##########VISUALIZE DISTRIBUTIONS ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#lets visualize some data from the diamonds dataset included with ggplot2
# for CATEGORICAL VAR
ggplot(data = diamonds)+
geom_bar(mapping = aes(x=cut)) # remember, default geom, default stat. for bar it is count
# for CONTINUOUS VAR
ggplot(data=diamonds)+
geom_histogram(mapping = aes(x=carat), binwidth = 0.5) # try adjusting the bindwidth here, what does it do?
##########TYPICAL VALUES~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#lets first look at smaller diamond sizes
small<-diamonds%>%
filter(carat < 3)
ggplot(data = small, aes(carat))+
geom_histogram(binwidth = 0.01) # what do you notice with this plot? any clustering??
##########UNUSUAL VALUES~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# a big clue to outliers ----very wide x axis:
ggplot(diamonds)+
geom_histogram(aes(x=y), binwidth=0.5)
# looks like nothing is there,but there is! lets zoom in
ggplot(diamonds)+
geom_histogram(aes(x=y), binwidth = 0.5)+
coord_cartesian(ylim=c(0,50))
#there they are!!!
# P . 91 exercise 4
# try above with xlim and ylim---what happens?
# P.91 exercise 2 --what is up with price?
ggplot(diamonds, aes(price))+
geom_histogram()
# try different bin widths here!!
# how could we look at these unusual values with dplyr??? TRY THAT HERE:
##########MISSING VALUES~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# for unusual values, dont delete them, ghost them with NA!
diamonds2<-diamonds%>%
mutate(y = ifelse(y<3 | y>20, NA, y))
#look at the ones that are NA
look<-filter(diamonds2, is.na(y))
#notice any patterns with these?
##########COVARIATION - BOXPLOT~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#good for comparing a continuous variable across a categorical var
ggplot(data=diamonds, aes(x=cut, y=price))+
geom_boxplot()
#notice any odd patterns??
# P 99 # 5 lets check out different plots
ggplot(data=diamonds, aes(x=cut, y=price))+
geom_violin()
ggplot(data=diamonds, aes(x=price, color=cut))+
geom_freqpoly()
##########COVARIATION - TWO CATEGORICAL~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
ggplot(data=diamonds)+
geom_count(aes(x=cut, y=color))
# vs
diamonds%>%
count(color, cut)%>%
ggplot(aes(x=color, y=cut))+
geom_tile(aes(fill=n))
# this is also known as a heat map
# P. 101 exercise 3
# why is the above plot slightly better than the one below??
diamonds%>%
count(color, cut)%>%
ggplot(aes(y=color, x=cut))+
geom_tile(aes(fill=n))
##########COVARIATION - TWO CONTINUOUS~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# out good friend the scatter plot!!
ggplot(data=diamonds)+
geom_point(aes(x=carat, y=price))
# whats up with the 5 carat diamond that is as expensive as many 1-3 carat diamonds?
# what could we add to the above plot to help figure this out?
# bin a continuous variable to behave like a categorical.....
ggplot(data=small, aes(x=carat, y=price))+
geom_boxplot(aes(group=cut_width(carat, 0.1)))
# but this is somewhat misleading---we know the Ns are very different for these groups
# enter varwidth = TRUE!
ggplot(data=small, aes(x=carat, y=price))+
geom_boxplot(aes(group=cut_width(carat, 0.1)), varwidth = T)
|
bf64cd786cf175796f0155f2eb9fc55fc2f0aab4
|
29d34e3302b71d41d77af715727e963aea119392
|
/man/dplot3.Rd
|
60cc73ea87314ed8affd183948854cd503a5ee0d
|
[] |
no_license
|
bakaibaiazbekov/rtemis
|
1f5721990d31ec5000b38354cb7768bd625e185f
|
a0c47e5f7fed297af5ad20ae821274b328696e5e
|
refs/heads/master
| 2020-05-14T20:21:40.137680
| 2019-04-17T15:42:33
| 2019-04-17T15:42:33
| 181,943,092
| 1
| 0
| null | 2019-04-17T18:00:09
| 2019-04-17T18:00:09
| null |
UTF-8
|
R
| false
| true
| 3,485
|
rd
|
dplot3.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dplot3.R
\name{dplot3}
\alias{dplot3}
\title{Dynamic Plots (\code{plotly})}
\usage{
dplot3(x, y = NULL, mode = NULL, group = NULL, point.size = 7,
point.color = NULL, point.alpha = 0.66, point.symbol = "circle",
point.labels = NULL, fit = "none", axes.fixedratio = FALSE,
xlim = NULL, ylim = NULL, gam.k = 4, fit.width = 3,
fit.color = "#18A3AC", fit.alpha = 1, se.fit = TRUE,
se.times = 2, se.color = NULL, se.alpha = 0.2,
density.color = "#18A3AC", density.alpha = 0.66, density.width = 1,
density.mean = FALSE, density.mean.width = 2, main = NULL,
xlab = "x", ylab = "y", font.family = "Helvetica Neue",
font.color = "gray50", font.size = 18, axes = FALSE, grid = TRUE,
grid.col = "#fff", zero.lines = TRUE, zero.col = "#7F7F7F",
zero.lwd = 1, legend = TRUE, legend.bgcol = "#00000000",
legend.bordercol = "gray50", legend.borderwidth = 0,
legend.fontcol = "#000000", margins = c(60, 70, 40, 20), pad = 4,
bg = "#E5E5E5", showlegend = TRUE, legend.xy = c(0, 1),
axes.square = FALSE, height = NULL, width = NULL)
}
\arguments{
\item{x}{Numeric vector. x coordinates}
\item{y}{Numeric vector. y coordinates}
\item{mode}{String: "scatter" or "density"}
\item{group}{String: Name of variable to group by (not yet functional)}
\item{point.size}{Numeric scalar or vector}
\item{point.color}{Color of points}
\item{point.alpha}{Float: Alpha of points}
\item{point.symbol}{String: "circle", "square"; see plotly documentation for more
Default = "circle"}
\item{point.labels}{String, optional: Point labels displayed on mouse over}
\item{fit}{String, optional: "lm", "gam"}
\item{gam.k}{Integer: Number of bases for \code{mgcv::gam}'s smoothing spline}
\item{fit.width}{Float: Width of fit line}
\item{fit.color}{Color of fit line}
\item{fit.alpha}{Float: Alpha of fit line}
\item{se.fit}{Logical: If TRUE, draws +/- \code{se.times * standard error}}
\item{se.times}{Float: Multiplier for standard error band. Default = 2}
\item{se.color}{Color of S.E. band}
\item{se.alpha}{Float: Alpha of S.E. band}
\item{density.color}{Color of density line}
\item{density.alpha}{Float: Alpha of density line}
\item{density.width}{Integer: Width of density line}
\item{density.mean}{Logical: If TRUE, draw vertical line at \code{mean(x)}}
\item{density.mean.width}{Integer: Width of \code{density.mean} line. Default = 2}
\item{main}{String: Plot title}
\item{xlab}{String: x-axis label}
\item{ylab}{String: y-axis label}
\item{font.family}{String: Axes' legends' font family}
\item{font.color}{Font color}
\item{font.size}{Integer: Font size}
\item{axes}{Logical: If TRUE, show x and y axes. Default = TRUE}
\item{grid}{Logical: If TRUE, draw grid lines. Default = FALSE}
\item{pad}{Numeric: Distance of tick labels from axes}
\item{showlegend}{Logical: If TRUE, show legends}
\item{legend.xy}{Vector, length 2 [0, 1]: x, y coordinates of legend. 0 means left and bottom for x and y axis
respectively; 1 means right and top. Default = c(0, 1) (i.e. top-left)}
\item{axes.square}{Logical: If TRUE, make axes square}
\item{margin}{Vector, length 4: Plot margins. Default = c(60, 70, 40, 20)}
}
\description{
Build dynamic plots that can be viewed in RStudio Viewer, a web browser, or exported to a static image.
Support for (x, y) scatter plots with optional fit line(lm, or gam), and density plots.
}
\seealso{
\link{mplot3}
}
\author{
Efstathios D. Gennatas
}
|
370c2e7156806529589f3441e1c690040fcb6ca4
|
9aaeeb4874892b1aaef176fd502c189ce66a4b7d
|
/plot1.R
|
04967cbc805721ed94eebdd85a9487c6f1ef9223
|
[] |
no_license
|
anugadiraju/ExData_Plotting1
|
ecfb8599361883ec143a8046cfcfdd2e5bee1a6f
|
b6bad1110ef5a73c8f9899ab03aa463bde920094
|
refs/heads/master
| 2021-01-18T12:14:47.897467
| 2014-07-13T18:44:20
| 2014-07-13T18:44:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,354
|
r
|
plot1.R
|
## Dataset: Electric power consumption [20Mb]
## Description: Measurements of electric power consumption in one household with a one-minute
## sampling rate over a period of almost 4 years. Different electrical quantities and some sub-metering values are available.
## The following descriptions of the 9 variables in the dataset are taken from the UCI web site:
## Date: Date in format dd/mm/yyyy
## Time: time in format hh:mm:ss
## Global_active_power: household global minute-averaged active power (in kilowatt)
## Global_reactive_power: household global minute-averaged reactive power (in kilowatt)
## Voltage: minute-averaged voltage (in volt)
## Global_intensity: household global minute-averaged current intensity (in ampere)
## Sub_metering_1: energy sub-metering No. 1 (in watt-hour of active energy). It corresponds to the kitchen, containing mainly a dishwasher, an oven and a microwave (hot plates are not electric but gas powered).
## Sub_metering_2: energy sub-metering No. 2 (in watt-hour of active energy). It corresponds to the laundry room, containing a washing-machine, a tumble-drier, a refrigerator and a light.
## Sub_metering_3: energy sub-metering No. 3 (in watt-hour of active energy). It corresponds to an electric water-heater and an air-conditioner.
myplot1 <- function() {
## 1. Read in data
## We will only be using data from the dates 2007-02-01 and 2007-02-02.
## One alternative is to read the data from just those dates rather than reading in the entire dataset and
## subsetting to those dates.
## You may find it useful to convert the Date and Time variables to Date/Time classes in R using the strptime() and as.Date() functions.
##Note that in this dataset missing values are coded as ?.
## R just executes the string in the brackets of pipe, literally like in the command prompt.
## Try this in the windows command prompt
## findstr /B /R ^[1-2]/2/2007 household_power_consumption.txt |more
data <- read.table(pipe('findstr /B /R ^[1-2]/2/2007 household_power_consumption.txt'),header=F, sep=';',na.strings="?")
colnames(data) <-names(read.table('household_power_consumption.txt', header=TRUE,sep=";",nrows=1))
png(filename="./plot1.png")
hist(data$Global_active_power,main="Global Active Power",xlab="Global Active Power(kilowatts)",col="red")
dev.off()
}
|
8f910b5feeef34a0c10da150a7a1b25bd0ea20e7
|
d7c3d93e31c159cf0a59198bfa6fed9d58b964b5
|
/cgf/data_prep/point_to_polygon_stunting_mod_b.R
|
95a510c8d894c493bf8517160e5b7e51efac70cf
|
[] |
no_license
|
sekeundanielyu/lbd_2017
|
b34fda1922ac3cd9ae439d724c6fcf59366a24df
|
314d18503630afbf252895dcab7b2ddaa5898fbb
|
refs/heads/master
| 2020-03-19T22:14:14.736922
| 2018-02-27T21:25:55
| 2018-02-27T21:25:55
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 17,880
|
r
|
point_to_polygon_stunting_mod_b.R
|
## code to resample points to polygons
############### SETUP ###########################
# initiate timing
start_time <- Sys.time()
root <- ifelse(Sys.info()[1]=="Windows", <<<< FILEPATH REDACTED >>>>>, <<<< FILEPATH REDACTED >>>>>)
repo <- <<<< FILEPATH REDACTED >>>>>
setwd(repo)
# set directories
data_dir <- <<<< FILEPATH REDACTED >>>>>
mbg_dir <- <<<< FILEPATH REDACTED >>>>>
save_dir <- <<<< FILEPATH REDACTED >>>>>
root_dir <- <<<< FILEPATH REDACTED >>>>>
# Loading libraries, scripts, etc.
package_lib <- ifelse(grepl("geos", Sys.info()[4]),
paste0(root, <<<< FILEPATH REDACTED >>>>>),
paste0(root, <<<< FILEPATH REDACTED >>>>>))
.libPaths(package_lib)
source('mbg_central/mbg_functions.R')
source('mbg_central/prep_functions.R')
source('mbg_central/covariate_functions.R')
source('mbg_central/misc_functions.R')
source('mbg_central/post_estimation_functions.R')
source('mbg_central/gbd_functions.R')
source('mbg_central/graph_data_coverage.R')
source('mbg_central/shiny_functions.R')
source('mbg_central/holdout_functions.R')
source('mbg_central/polygon_functions.R')
source('mbg_central/collapse_functions.R')
source('mbg_central/seegMBG_transform_functions.R')
package_list <- c('survey', 'pbapply', 'readstata13', 'foreign',
'rgeos', 'data.table','raster','rgdal','INLA',
'seegSDM','seegMBG','plyr','dplyr', 'foreach',
'doParallel')
for(package in package_list) {
library(package, lib.loc = package_lib, character.only=TRUE)
}
## Load iso3 to country name map
iso3_to_country <- fread(paste0(root_dir,"ref/iso3_to_country.csv"))
indicator <- "stunting_mod_b"
##### 1. ################################################################################################################
## Read all data - this looks like output from ubCov that has been mapped to geographies (via Brandon's post-processing code)
########################################################################################################
# DHS/MICS
all_data <- read.csv(paste0(data_dir,"input_data.csv"), stringsAsFactors = FALSE)
all_data <- as.data.table(all_data)
## fix female coding
all_data$sex[which(all_data$sex == 2)] <- 0
## Somalia data (collapsed to clusters)
somalia <- read.csv(paste0(root,<<<< FILEPATH REDACTED >>>>>))
names(somalia)[names(somalia)=="Latitude"] <- "latitude"
names(somalia)[names(somalia)=="Longitude"] <- "longitude"
names(somalia)[names(somalia)=="Year.of.survey"] <- "start_year"
names(somalia)[names(somalia)=="Number.of.children.examined"] <- "N"
names(somalia)[names(somalia)=="Number.stunting"] <- "stunting_mod_b"
somalia$end_year <- somalia$start_year
somalia$source <- "FSNAU"
somalia$country <- "SOM"
somalia$psu <- 1:nrow(somalia)
somalia$pweight <- NA
somalia$NID <- 270669
somalia$Proportion.wasting <- NULL
somalia$Number.wasting <- NULL
somalia$Proportion.stunting <- NULL
somalia$Proportion.underweight <- NULL
somalia$Number.underweight <- NULL
somalia$Source_Citation <- NULL
##### 2. ################################################################################################################
## Save for data coverage plot
coverage_data <- all_data[, indicator := 1]
coverage_data <- coverage_data[, N := 1]
coverage_data <- coverage_data[, list(N = sum(N)),
by = c('indicator','start_year','source','latitude','longitude','location_code','shapefile','country','psu')]
##### 3. ################################################################################################################
## Process your outcome. The result should still be the microdata (rows are individuals)
## but with a single column for your outcome, i.e. "stunting_binomial" = 0 or 1.
## Example: calculate z-score and whether each individual is stunted (stunting_binomial==1) or not (stunting_binomial==0)
## bring in growth charts
HAZ_chart_months <- read.csv(paste0(mbg_dir,"growth_standards/HAZ_0_60_months.csv"), header = TRUE, sep =",")
WHZ_chart_months <- read.csv(paste0(mbg_dir,"growth_standards/WHZ_0_60_months.csv"), header = TRUE, sep =",")
WAZ_chart_months <- read.csv(paste0(mbg_dir,"growth_standards/WAZ_0_60_months.csv"), header = TRUE, sep =",")
HAZ_chart_weeks <- read.csv(paste0(mbg_dir,"growth_standards/HAZ_0_13_weeks.csv"), header = TRUE, sep =",")
WAZ_chart_weeks <- read.csv(paste0(mbg_dir,"growth_standards/WAZ_0_13_weeks.csv"), header = TRUE, sep =",")
## HAZ
all_data_HAZ <- all_data
# prep all_data -- this needs to happen within each section, to maximize data use amidst missingness
all_data_HAZ <- subset(all_data_HAZ, !is.na(age_wks))
all_data_HAZ <- subset(all_data_HAZ, !is.na(child_height))
all_data_HAZ <- subset(all_data_HAZ, all_data_HAZ$child_height < 999)
# prep HAZ charts to be joined on
names(HAZ_chart_months)[names(HAZ_chart_months)=="l"] <- "HAZ_l"
names(HAZ_chart_months)[names(HAZ_chart_months)=="m"] <- "HAZ_m"
names(HAZ_chart_months)[names(HAZ_chart_months)=="s"] <- "HAZ_s"
names(HAZ_chart_months)[names(HAZ_chart_months)=="age_cat"] <- "age_cat_1"
names(HAZ_chart_months)[names(HAZ_chart_months)=="month"] <- "age_mo"
names(HAZ_chart_weeks)[names(HAZ_chart_weeks)=="l"] <- "HAZ_l"
names(HAZ_chart_weeks)[names(HAZ_chart_weeks)=="m"] <- "HAZ_m"
names(HAZ_chart_weeks)[names(HAZ_chart_weeks)=="s"] <- "HAZ_s"
names(HAZ_chart_weeks)[names(HAZ_chart_weeks)=="week"] <- "age_wks"
## subset data to get two datasets that will use different charts, then merge and rbind together
all_data_HAZ_wks <- subset(all_data_HAZ, all_data_HAZ$age_wks <= 13)
all_data_HAZ_wks <- merge(all_data_HAZ_wks, HAZ_chart_weeks, by=c("sex", "age_wks"), all.x = TRUE, allow.cartesian = TRUE)
all_data_HAZ_mo <- subset(all_data_HAZ, all_data_HAZ$age_wks > 13)
all_data_HAZ_mo <- merge(all_data_HAZ_mo, HAZ_chart_months, by=c("age_cat_1", "sex", "age_mo"), all.x = TRUE)
all_data_HAZ <- rbind(all_data_HAZ_wks, all_data_HAZ_mo)
# calculate HAZ score
all_data_HAZ$HAZ <- (((all_data_HAZ$child_height/all_data_HAZ$HAZ_m) ^ all_data_HAZ$HAZ_l)-1)/(all_data_HAZ$HAZ_s*all_data_HAZ$HAZ_l)
# create binary for stunting
all_data_HAZ$stunting_mod_b <- ifelse(all_data_HAZ$HAZ <= -2, 1, 0)
all_data_HAZ$N <- 1
# drop if HAZ is blank
all_data_HAZ <- subset(all_data_HAZ, !is.na(all_data_HAZ$HAZ))
# drop unacceptable z scores # https://peerj.com/articles/380/ Crowe, Seal, Grijalva-Eternod, Kerac 2014
all_data_HAZ <- subset(all_data_HAZ, all_data_HAZ$HAZ > -6)
all_data_HAZ <- subset(all_data_HAZ, all_data_HAZ$HAZ < 6)
all_data <- as.data.table(all_data_HAZ)
all_data <- all_data[, indicator := 1]
##### 5. ################################################################################################################
## Split up into point and polygon datasets
point_data <- all_data[point==1, ]
poly_data <- all_data[point==0, ]
## Add Somalia dataset
somalia <- as.data.table(somalia)
point_data <- rbind(point_data, somalia, fill = TRUE)
##### 4. ################################################################################################################
## Save for data coverage plot
coverage_data <- rbind(point_data, poly_data, fill = TRUE)
coverage_data <- coverage_data[, list(N = sum(N)),
by = c('indicator','start_year','source','latitude','longitude','location_code','shapefile','country','psu')]
##### 5. ################################################################################################################
## Process point_data as you normally would, collapsing to cluster means. Let's call this new dt point_data_collapsed.
## sum() for binomial indicators or mean() for Gaussian indicators
all_point_data <- point_data[, list(N=sum(N), stunting_mod_b=sum(stunting_mod_b)), by=c('source', 'start_year','latitude','longitude','country', 'nid')]
all_point_data <- all_point_data[!is.na(latitude)]
all_point_data <- all_point_data[!is.na(longitude)]
all_point_data$point <- 1
##### 6. ################################################################################################################
## Process poly_data
setnames(all_point_data, "source", "survey_series")
setnames(poly_data, "source", "survey_series")
poly_data_test <- copy(poly_data)
poly_data_test[, N := 1]
poly_data_test <- poly_data_test[, list(N=sum(N)), by=c('start_year', 'country', 'location_code', 'shapefile', 'survey_series')]
poly_data_bad <- poly_data_test[N==1, ]
if(length(poly_data_bad[, survey_series]) > 0) {
message("This many polygons have 1 observation so will be dropped:")
print(table(poly_data_bad[, survey_series], poly_data_bad[, start_year]))
poly_data <- merge(poly_data, poly_data_test, by=c('start_year', 'country', 'location_code', 'shapefile', 'survey_series'))
poly_data <- poly_data[N.x != N.y, ] ## n.x and n.y are equal where both are 1, i.e. where poly had one cluster
setnames(poly_data, 'N.x', 'N') ## set the original N col back
poly_data[, N.y := NULL] ## remove the summed N col
}
## drop strata that have missing pweight so we don't need to drop the whole NID
by_vars <- c('start_year', 'country', 'location_code', 'shapefile', 'survey_series', 'nid')
na.strata <- aggregate(is.na(pweight) ~ start_year + country +
location_code + shapefile + survey_series + nid,
data = poly_data, sum)
if(sum(na.strata[, 'is.na(pweight)']) > 0){ ## need to drop some
drop.strata <- na.strata[which(na.strata[, 'is.na(pweight)'] > 0), ]
drop.rows <- NULL
for(ds in 1:nrow(drop.strata)){
drop.rows <- c(drop.rows, which(poly_data$start_year == drop.strata$start_year[ds] &
poly_data$country == drop.strata$country[ds] &
poly_data$location_code == drop.strata$location_code[ds] &
poly_data$shapefile == drop.strata$shapefile[ds] &
poly_data$survey_series == drop.strata$survey_series[ds] &
poly_data$nid == drop.strata$nid[ds])
)
}
poly_data <- poly_data[-drop.rows, ]
}
point.keepers <- point_data[, c('source', 'start_year','latitude',
'longitude','country','nid',
"geospatial_id", "master.ind",
"cluster_number"), with = FALSE]
na.pw <- aggregate(is.na(pweight) ~ nid, data = poly_data, sum)
drop.nids <- na.pw$nid[which(na.pw[, 2] > 0)]
poly.keepers <- subset(poly_data, !(nid %in% drop.nids))
setnames(poly.keepers, "survey_series", "source")
poly.keepers <- poly.keepers[, c('source', 'start_year','latitude',
'longitude','country','nid',
"geospatial_id", "master.ind",
"cluster_number"), with = FALSE]
keeper.dat <- rbind(point.keepers, poly.keepers)
write.csv(keeper.dat, file = paste0(<<<< FILEPATH REDACTED >>>>>), row.names=FALSE)
## ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
poly_surveys <- unique(poly_data[, nid])
collapse_each_nid <- function(this_nid) {
message(paste0('Collapsing NID: ', this_nid))
test_poly_data <- poly_data[nid==this_nid,]
test_poly_data$strata <- 0
names(test_poly_data)[names(test_poly_data)=='cluster_number'] <- 'psu'
names(test_poly_data)[names(test_poly_data)=='weight'] <- 'pweight'
# Check for missings
if(length(test_poly_data$pweight[is.na(test_poly_data$pweight)])>0) {
message(paste0(length(test_poly_data$pweight[is.na(test_poly_data$pweight)]), ' / ', length(test_poly_data$pweight), ' are missing pweight'))
return(NULL)
}
else {
collapse_polys <- function(x) {
setup_design(df = test_poly_data, var = x)
by_vars <- c('start_year', 'country', 'location_code', 'shapefile', 'survey_series', 'nid')
poly <- collapse_by(df = test_poly_data,
var = x,
by_vars = by_vars)
collapsed <- poly[, c(by_vars, 'mean', 'ss')] ## this converts to ratespace!!
names(collapsed)[names(collapsed)=='mean'] <- x
names(collapsed)[names(collapsed)=='ss'] <- 'N'
collapsed[, eval(x)] <- collapsed[, eval(x)] * collapsed[, 'N'] ## convert back to count space!!
return(collapsed)
}
polys <- c('stunting_mod_b')
polys <- lapply(polys, collapse_polys)
merged_polys <- Reduce(function(...) merge(..., all=T), polys)
return(merged_polys)
}
}
poly_nids <- unique(poly_data[, nid])
poly_data_collapsed <- lapply(poly_nids, collapse_each_nid)
# Append all collapsed polygon surveys together
poly_data_collapsed <- do.call(rbind.fill, poly_data_collapsed)
poly_data_collapsed$point <- 0
all_poly_data <- poly_data_collapsed
##### 7. ################################################################################################################
## Append and save a copy for data coverage Shiny before resampling polygons
collapsed <- rbind(all_poly_data, all_point_data, fill=TRUE)
## save the collapsed data so we can report how many point and
## polygons we use from which NIDs post-cleaning
write.csv(collapsed, file = paste0(<<<< FILEPATH REDACTED >>>>>), row.names=FALSE)
# ##### 7.1 ################################################################################################################
# ## new data coverage plot - grab data plot later
coverage_data <- copy(collapsed)
coverage_data <- coverage_data[, latitude := as.numeric(latitude)]
coverage_data <- coverage_data[, longitude := as.numeric(longitude)]
coverage_data <- coverage_data[, stunting_mod_b := stunting_mod_b / N] #transform from count space to rate space
setnames(coverage_data, 'nid', 'svy_id')
setnames(coverage_data, 'survey_series', 'source')
##### 8. ################################################################################################################
## Resample collapsed polygon data to weighted point data
poly_data_collapsed <- as.data.table(poly_data_collapsed)
poly_data_collapsed$stunting_mod_b_count <- poly_data_collapsed$stunting_mod_b
resampled_poly_data <- resample_polygons_dev(data = poly_data_collapsed,
cores = 20,
indic = 'stunting_mod_b_count') #outcome in count space
all_point_data <- point_data[, list(N=sum(N), stunting_mod_b=sum(stunting_mod_b)),
by=c('source', 'start_year','latitude','longitude','country', 'nid')]
all_point_data$point <- 1
all_point_data <- all_point_data[, pseudocluster := FALSE]
all_point_data <- all_point_data[, weight := 1]
all_point_data <- all_point_data[, shapefile := ""]
all_point_data <- all_point_data[, location_code := ""]
resampled_poly_data <- resampled_poly_data[, stunting_mod_b := stunting_mod_b_count]
resampled_poly_data <- resampled_poly_data[, stunting_mod_b_count := NULL]
## rename survey_series to source for poly data
setnames(resampled_poly_data, 'survey_series', 'source')
##### 9. ################################################################################################################
## Append point and polygon collapsed data
all_processed_data <- rbind(all_point_data, resampled_poly_data)
setnames(all_processed_data, 'start_year', 'year')
all_collapsed <- all_processed_data
## Replace year with period 1998-2002, 2003-2007, 2008-2012, 2013-2017
all_collapsed <- subset(all_collapsed, year >= 1997)
names(all_collapsed)[names(all_collapsed) == "year"] = "original_year"
all_collapsed <- all_collapsed[original_year >= 1998 & original_year <= 2002, year := 2000]
all_collapsed <- all_collapsed[original_year >= 2003 & original_year <= 2007, year := 2005]
all_collapsed <- all_collapsed[original_year >= 2008 & original_year <= 2012, year := 2010]
all_collapsed <- all_collapsed[original_year >= 2013 & original_year <= 2017, year := 2015]
all_collapsed <- all_collapsed[, latitude := as.numeric(latitude)]
all_collapsed <- all_collapsed[, longitude := as.numeric(longitude)]
all_collapsed <- all_collapsed[!is.na(latitude)]
all_collapsed <- all_collapsed[!is.na(longitude)]
all_collapsed <- all_collapsed[latitude>=-90 & latitude<=90]
all_collapsed <- all_collapsed[longitude>=-180 & longitude<=180]
all_collapsed <- all_collapsed[, stunting_mod_b := round(stunting_mod_b, 0)]
## In clusters where LRI > N (due to tiny samples and every child having LRI), cap at N
all_collapsed <- all_collapsed[stunting_mod_b > N, stunting_mod_b := N]
write.csv(all_collapsed, file = paste0("<<<< FILEPATH REDACTED >>>>>/stunting_mod_b.csv"), row.names = FALSE)
Sys.time() - start_time
###### 10 ##########################################################################################################
## make the plot now that the data is ready
coverage_maps <- graph_data_coverage_values(df = coverage_data,
var = 'stunting_mod_b',
title = '',
year_min = '1998',
year_max = '2016',
year_var = 'start_year',
region = 'africa',
sum_by = 'n',
cores = 10,
indicator = 'stunting_mod_b',
high_is_bad = TRUE,
return_maps = TRUE,
legend_title = 'Prevalence \n of MSS')
|
e856867cec8c82280e6e6ac5e8715d29a340db95
|
a005067f580df52c02f3020b431727a12878a125
|
/isl-evans/regularization.R
|
89c5f0efebc18319bb503a349979b9dac99ee34d
|
[] |
no_license
|
sunnykan/RFiles
|
aff48be614a1eb72f394f1e360651d5dd4ce4b64
|
d62618cc175846957183ebb1ac7be0877b6bf616
|
refs/heads/master
| 2022-11-21T10:15:23.175057
| 2022-11-05T16:41:23
| 2022-11-05T16:41:23
| 178,078,739
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 21,296
|
r
|
regularization.R
|
source("load-libraries.R")
source("subsets-plots.R")
hitters <- as.tibble(Hitters)
glimpse(hitters)
names(hitters) <- tolower(names(hitters))
dim(hitters)
sum(is.na(hitters$salary))
# drop any row in dataframe with missing values
hitters <- hitters[complete.cases(hitters),]
sum(is.na(hitters$salary))
regfit.full <- regsubsets(salary ~., hitters)
summary(regfit.full)
regfit.full <- regsubsets(salary ~., hitters, nvmax = 19)
reg.summary = summary(regfit.full)
names(reg.summary)
reg.summary$rsq
which.min(reg.summary$bic)
subsets_plots(reg.summary, 19)
par(mfrow = c(2,2))
plot(reg.summary$rss ,xlab = "Number of Variables ", ylab = "RSS",
type = "l")
plot(reg.summary$adjr2 ,xlab = "Number of Variables ",
ylab = "Adjusted RSq",type = "l")
points(which.max(reg.summary$adjr2),
reg.summary$adjr2[11], col = "red", cex = 2, pch = 20)
plot(reg.summary$cp ,xlab = "Number of Variables ", ylab = "Cp", type = "l")
points(which.min(reg.summary$bic),
reg.summary$cp[10], col = "red", cex = 2, pch = 20)
plot(reg.summary$bic ,xlab = "Number of Variables ",ylab = "BIC", type = "l")
points(which.min(reg.summary$bic),
reg.summary$bic[6],col = "red",cex = 2,pch = 20)
plot(regfit.full, scale = "r2")
plot(regfit.full, scale = "adjr2")
plot(regfit.full, scale = "r2")
plot(regfit.full, scale = "bic")
coef(regfit.full, which.min(reg.summary$bic))
regfit.fwd <- regsubsets(salary ~., hitters, nvmax = 19, method = "forward")
reg.summary <- summary(regfit.fwd)
reg.summary
which.min(reg.summary$bic)
subsets_plots(reg.summary, 19)
regfit.bwd <- regsubsets(salary ~., hitters, nvmax = 19, method = "backward")
reg.summary <- summary(regfit.bwd)
reg.summary
which.min(reg.summary$bic)
subsets_plots(reg.summary, 19)
coef(regfit.full, 7)
coef(regfit.fwd, 7)
coef(regfit.bwd, 7)
# 6.5.3
set.seed(1)
train = sample(c(TRUE, FALSE), nrow(hitters), replace = TRUE)
test = !train
regfit.best <- regsubsets(salary ~., data = hitters[train,], nvmax = 19)
#create a design matrix
test.mat <- model.matrix(salary ~., data = hitters[test,])
val.errors = rep(NA, 19)
calc_rss <- function(i) {
# extract coefficients
coefi <- coef(regfit.best, id = i)
# multiply design matrix by coefficients to get predictions
pred <- test.mat[, names(coefi)] %*% coefi
# square the residuals and take square root to get MSE
val.errors[i] <- mean((hitters$salary[test] - pred)^2)
}
test_mse_vals <- sapply(1:19, calc_rss)
test_mse_vals[which.min(test_mse_vals)]
which.min(test_mse_vals)
coef(regfit.best, which.min(test_mse_vals))
data <- tibble(size = c(1:19), mse = test_mse_vals)
ggplot(data, aes(x = size, y = mse)) +
geom_point(color = "green")
predict.regsubsets <- function(object, newdata, id, ...) {
form <- as.formula(object$call[[2]])
mat <- model.matrix(form, newdata)
coefi <- coef(object, id = id)
xvars <- names(coefi)
mat[, xvars] %*% coefi #return vector of predictions
}
#use full data. select model with 10 variables
regfit.best <- regsubsets(salary ~., data = hitters, nvmax = 19)
coef(regfit.best, 10)
# USING CROSS-VALIDATION
k = 10
set.seed(1)
folds <- sample(1:k, nrow(hitters), replace = TRUE)
cv.errors <- matrix(NA, k, 19, dimnames = list(NULL, paste(1:19)))
for (j in 1:k) {
best.fit <- regsubsets(salary ~., data = hitters[folds != j,], nvmax = 19)
for (i in 1:19) {
pred = predict.regsubsets(best.fit, hitters[folds == j,], id = i)
cv.errors[j, i] <- mean((hitters$salary[folds == j] - pred)^2)
} # end of inside for loop
} # end of outside for loop
# average columns to get average mse across folds (k) by number of variables (i)
mean.cv.errors = apply(cv.errors, 2, mean)
plot(mean.cv.errors, type = 'b')
reg.best = regsubsets(salary ~., data = hitters, nvmax = 19)
coef(reg.best, 11) # 11 is obtained from cross validation
# 6.6 Ridge regression and lasso
rm(list = ls())
hitters <- as.tibble(Hitters)
glimpse(hitters)
names(hitters) <- tolower(names(hitters))
dim(hitters)
sum(is.na(hitters$salary))
# drop any row in dataframe with missing values
hitters <- hitters[complete.cases(hitters),]
sum(is.na(hitters$salary))
# create design matrix
x = model.matrix(salary ~., hitters)[,-1] #removed the column with 1s (ones)
y = hitters$salary
# RIDGE alpha = 0
grid <- 10^seq(10, -2, length = 100)
ridge.mod = glmnet(x, y, alpha = 0, lambda = grid)
# lambda = 11498
ridge.mod$lambda[50]
coef(ridge.mod)[,50]
#l2 norm
sqrt(sum(coef(ridge.mod)[-1, 50]^2))
# lambda = 705 (smaller), coefficients bigger
ridge.mod$lambda[60]
coef(ridge.mod)[,60]
#l2 norm
sqrt(sum(coef(ridge.mod)[-1, 60]^2))
# obtain ridge regression coefficients for a new value of lambda = 50
predict(ridge.mod, s = 50, type = "coefficients")
# Use train and test set
set.seed(1)
train <- sample(1:nrow(x), nrow(x)/2)
test <- (-train)
y.test <- y[test]
# ridge on training set
ridge.mod <- glmnet(x[train,], y[train],
alpha = 0, lambda = grid, thresh = 1e-12)
# get predictions for test set
ridge.pred <- predict(ridge.mod, s = 4, newx = x[test, ])
mean((ridge.pred - y.test)^2)
# for an Intercept-only model. Use the mean of the training observations
mean((mean(y[train]) - y.test)^2)
# or use a very high Lambda value - drive coefficients to zero
ridge.pred = predict(ridge.mod, s = 1e10, newx = x[test,])
mean((ridge.pred - y.test)^2)
# lamda = 0 -> Least Squares results
ridge.pred <- predict(ridge.mod,
s = 0,
x = x[train,],
y = y[train],
newx = x[test,],
exact = TRUE)
mean((ridge.pred - y.test)^2)
# compare with least squares
lm(y ~ x, subset = train)
predict(ridge.mod,
s = 0,
x = x[train,],
y = y[train],
exact = T,
type = "coefficients")[1:20,]
# Using built-in cross validation function to select best lambda
set.seed(1)
cv.out <- cv.glmnet(x[train, ], y[train], alpha = 0)
plot(cv.out)
bestlam = cv.out$lambda.min
bestlam
# test MSE for lambda = bestlam
ridge.pred <- predict(ridge.mod, s = bestlam, newx = x[test, ])
mean((ridge.pred - y.test)^2)
# fit to full data set using lambda = bestlam
out <- glmnet(x, y, alpha = 0)
predict(out, type = "coefficients", s = bestlam)[1:20,]
# LASSO
lasso.mod <- glmnet(x[train,], y[train], alpha = 1, lambda = grid)
plot(lasso.mod)
set.seed(1)
cv.out <- cv.glmnet(x[train,], y[train], alpha = 1)
plot(cv.out)
bestlam <- cv.out$lambda.min
lasso.pred <- predict(lasso.mod, s = bestlam, newx = x[test,])
mean((lasso.pred - y.test)^2)
# Fit to full dataset
out <- glmnet(x, y, alpha = 1, lambda = grid)
lasso.coef <- predict(out, type = "coefficients", s = bestlam)[1:20,]
lasso.coef
plot(out)
### 6.7.1 PCR and PLS
rm(list = ls())
hitters <- as.tibble(Hitters)
glimpse(hitters)
names(hitters) <- tolower(names(hitters))
dim(hitters)
sum(is.na(hitters$salary))
# drop any row in dataframe with missing values
hitters <- hitters[complete.cases(hitters),]
sum(is.na(hitters$salary))
x = model.matrix(salary ~., hitters)[,-1] #removed the column with 1s (ones)
y = hitters$salary
set.seed(1)
train <- sample(1:nrow(x), nrow(x)/2)
test <- (-train)
y.test <- y[test]
set.seed(2)
pcr.fit <- pcr(salary ~., data = hitters, scale = TRUE, validation = "CV")
summary(pcr.fit)
validationplot(pcr.fit, val.type = "MSEP")
# use train data
set.seed(1)
pcr.fit <- pcr(salary ~., data = hitters,
scale = TRUE, validation = "CV",
subset = train)
validationplot(pcr.fit, val.type = "MSEP")
# predict using test data (M = 7, minimum MSEP)
pcr.pred <- predict(pcr.fit, x[test,], ncomp = 7)
mean((pcr.pred - y.test)^2)
# fit on full model using M = 7
pcr.fit <- pcr(y ~ x, scale = TRUE, ncomp = 7)
summary(pcr.fit)
# PLS: Partial Least Squares
set.seed(1)
pls.fit <- plsr(salary ~., data = hitters, subset = train,
scale = TRUE,
validation = "CV")
summary(pls.fit)
validationplot(pls.fit, val.type = "MSEP")
# evaluate on test set with M = 2
pls.pred <- predict(pls.fit, x[test, ], ncomp = 2)
mean((pls.pred - y.test)^2)
# fit on full dataset with M = 2
pls.fit <- plsr(salary ~., data = hitters, scale = TRUE, ncomp = 2)
summary(pls.fit)
### Exercises: Applied
rm(list = ls())
# 8
set.seed(1)
X <- rnorm(100)
e <- rnorm(100)
#Y <- 0.5 + 3 * X + 1.5 * (X^2) - 2.0 * (X^3) + e
b0 <- 2
b1 <- 3
b2 <- -1
b3 <- -0.5
Y <- b0 + b1 * X + b2 * (X^2) + b3 * (X^3) + e
data <- tibble(X, Y)
#SUBSETS
regfit.full <- regsubsets(Y ~ poly(X, 10), data, nvmax = 10)
reg.summary <- summary(regfit.full)
#USING ggplot
nvars <- regfit.full$np - 1 #number of variables
source("subsets-plots.R")
subsets_plots(reg.summary, nvars)
coef(regfit.full, 3)
# forward and backward
regfit.fwd <- regsubsets(Y ~ poly(X, 10), data, nvmax = 10, method = "forward")
reg.fwd.summary <- summary(regfit.fwd)
nvars <- regfit.fwd$np - 1
subsets_plots(reg.fwd.summary, nvars)
coef(regfit.fwd, 3)
regfit.bwd <- regsubsets(Y ~ poly(X, 10), data, nvmax = 10, method = "backward")
reg.bwd.summary <- summary(regfit.bwd)
nvars <- regfit.bwd$np - 1
subsets_plots(reg.bwd.summary, nvars)
coef(regfit.bwd, 3)
#LASSO
grid <- 10^seq(10, -2, length = 100)
x = model.matrix(Y ~ poly(X, 10), data = data)[,-1] #removed the column with 1s (ones)
y = Y
fit_lasso <- glmnet(x, y, alpha = 1)
plot(fit_lasso)
set.seed(1)
cv_lasso <- cv.glmnet(x, y, alpha = 1)
plot(cv_lasso)
bestlam <- cv_lasso$lambda.min
predict(fit_lasso, s = bestlam, type = "coefficients")[1:11,]
set.seed(1)
x <- rnorm(100)
e <- rnorm(100)
b7 <- 0.8
# Y <- b0 + b7 * I(X^7) + e
# plot(X, Y)
data <- tibble(x = X, y = Y)
y <- b0 + b7 * x^7 + e
plot(x, y)
data.full <- data.frame(y = y, x = x)
regfit.full <- regsubsets(y ~ x + I(x^2) + I(x^3) + I(x^4) + I(x^5) + I(x^6) + I(x^7) + I(x^8) + I(x^9) + I(x^10), data = data.full, nvmax = 10)
#SUBSETS
#regfit.full <- regsubsets(y ~ poly(x, 10), data, nvmax = 10)
reg.summary <- summary(regfit.full)
#USING ggplot
nvars <- regfit.full$np - 1 #number of variables
source("subsets-plots.R")
subsets_plots(reg.summary, nvars)
coef(regfit.full, 1)
# LASSO
x = model.matrix(y ~ poly(X, 10, raw = TRUE), data = data)[,-1]
#x = model.matrix(Y ~ poly(X, 10, raw = FALSE), data = data)[,-1]
fit_lasso <- glmnet(x, y, alpha = 1)
plot(fit_lasso)
cv_lasso <- cv.glmnet(x, y, alpha = 1)
plot(cv_lasso)
bestlam <- cv_lasso$lambda.min
predict(fit_lasso, s = bestlam, type = "coefficients")[1:11, ]
# EXAMPLE 9
rm(list = ls())
set.seed(11)
college <- as.tibble(College)
glimpse(college)
college <- college[complete.cases(college),]
dim(college)
names(college) <- tolower(names(college))
#train = sample(c(TRUE, FALSE), nrow(college), replace = TRUE)
#test = !train
#table(train, test)
train = sample(1:dim(College)[1], dim(College)[1] / 2)
test <- -train
# linear model
lm.model <- lm(apps ~., data = college, subset = train)
apps.pred <- predict(lm.model, newdata = college[test,])
lm_rmse <- mean((apps.pred - college$apps[test])^2)
lm_rmse
# ridge
X <- model.matrix(apps ~., data = college)[,-1]
Y <- college$apps
grid <- 10^seq(10, -2, length = 100)
#ridge.mod <- glmnet(X[train,], Y[train],
# alpha = 0, lambda = grid, thresh = 1e-12)
cv.out <- cv.glmnet(X[train, ], Y[train], alpha = 0,
lambda = grid, thresh = 1e-12)
plot(cv.out)
bestlam <- cv.out$lambda.min
test.pred <- predict(cv.out, newx = X[test,], s = bestlam)
ridge_rmse <- mean((test.pred - Y[test])^2)
ridge_rmse
ridge.full.mod <- glmnet(X, Y, alpha = 0, lambda = grid, thresh = 1e-12)
predict(ridge.full.mod, s = bestlam, type = "coefficients")
# lasso
cv.out <- cv.glmnet(X[train, ], Y[train], alpha = 1,
lambda = grid, thresh = 1e-12)
plot(cv.out)
bestlam <- cv.out$lambda.min
test.pred <- predict(cv.out, newx = X[test,], s = bestlam)
lasso_test_rmse <- mean((test.pred - Y[test])^2)
lasso_test_rmse
out = glmnet(X, Y, alpha = 1)
predict(out, type = "coefficients", s = bestlam)
#PCR
pcr.fit <- pcr(apps ~., data = college,
scale = TRUE, validation = "CV",
subset = train)
validationplot(pcr.fit, val.type = "MSEP")
# predict using test data (M = 10, minimum MSEP)
pcr.pred <- predict(pcr.fit, X[test,], ncomp = 10)
pcr_rmse <- mean((pcr.pred - Y[test])^2)
pcr_rmse
# fit on full model using M = 7
pcr.fit <- pcr(Y ~ X, scale = TRUE, ncomp = 10)
summary(pcr.fit)
# PLS
pls.fit <- plsr(apps ~., data = college, subset = train,
scale = TRUE,
validation = "CV")
summary(pls.fit)
validationplot(pls.fit, val.type = "MSEP")
# evaluate on test set with M = 10
pls.pred <- predict(pls.fit, X[test, ], ncomp = 10)
plsr_rmse <- mean((pls.pred - Y[test])^2)
# fit on full dataset with M = 10
pls.fit <- plsr(Y ~ X, scale = TRUE, ncomp = 10)
summary(pls.fit)
# Example 10
set.seed(1)
X <- as.matrix(sapply(1:20, function(x) rnorm(1000)))
# matrix(rnorm(1000 * 20), 1000, 20)
dimnames(X) <- list(NULL, paste("x_", 1:20, sep = ""))
e <- rnorm(1000)
betas <- rnorm(20)
betas[c(3, 4, 9, 10, 19)] <- 0 #set some betas to zero
names(betas) <- paste("x_", 1:20, sep = "")
Y <- X %*% betas + e # dot product matric X and betas
hist(Y)
data_xy <- as.tibble(x = X)
data_xy["y"] <- Y
head(data_xy)
train <- sample(c(1:1000), 100)
test <- -(train)
#subsets
train.fit <- regsubsets(y ~., data = data_xy[train,], nvmax = 20)
train.fit.summary <- summary(train.fit)
#subsets_plots(train.fit.summary, 20)
# melt(train.fit.summary$rss) %>%
# mutate(rmse = value/length(train)) %>%
# ggplot(aes(x = seq_along(rmse), y = rmse)) +
# geom_line(color = "dodgerblue", alpha = 0.7) +
# labs(x = "Number of variables", y = "rmse") +
# scale_x_continuous(breaks = seq_along(1:20))
test.mat <- model.matrix(y ~., data = data_xy[test,])
val.errors = rep(NA, 20)
# calculate mse for each train model on test set
calc_mse <- function(i) {
# extract coefficients
coefi <- coef(train.fit, id = i)
# multiply design matrix by coefficients to get predictions
pred <- test.mat[, names(coefi)] %*% coefi
# square the residuals and take square root to get MSE
val.errors[i] <- sum((data_xy$y[test] - pred)^2)
}
test_mse <- sapply(1:20, calc_mse)
which.min(test_mse)
# melt(test_mse) %>%
# ggplot(aes(x = seq_along(value), y = value)) +
# geom_line(color = "dodgerblue", alpha = 0.7) +
# labs(x = "Number of variables", y = "rmse") +
# scale_x_continuous(breaks = seq_along(1:20))
test_mse_vals <- as.tibble(melt(test_mse, value.name = "test")) %>%
rowid_to_column("id")
train_mse_vals <- as.tibble(melt(train.fit.summary$rss/length(train),
value.name = "train")) %>%
rowid_to_column("id")
inner_join(train_mse_vals, test_mse_vals, by = "id") %>%
melt(id.vars = "id") %>%
rename(data = variable, mse = value) %>%
ggplot(aes(x = id, y = mse, color = data)) +
geom_line()
coef(train.fit, which.min(test_mse))
betas
#for (i in paste("x_", 1:20, sep = "")) {
# print(unlist(map(1:20, function(x) coef(train.fit, x))[2])[i])
#}
betas <- c('(Intercept)' = 0, betas)
#coeffs_i <- sapply(1:20, function(x) coef(train.fit, x))
g <- rep(NA, 20)
for (r in 1:20) {
sum_r <- 0
for (j in (names(coef(train.fit, r)))) {
#print(sum((betas[j] - coef(train.fit, r)[j])^2))
sum_r = sum_r + ((betas[j] - coef(train.fit, r)[j])^2)
}
#names(sum_r) <- NULL
g[r] <- sqrt(sum_r)
}
qplot(x = c(1:20), y = g, geom = "path")
# Exercise 11 ############################
rm(list = ls())
set.seed(1)
boston <- as.tibble(Boston)
boston
glimpse(boston)
# test, train
train <- sample(c(TRUE, FALSE), nrow(boston), replace = TRUE)
test <- !train
table(train, test)
test.mat <- model.matrix(crim ~., data = boston[test,])
# subsets
sub.all <- regsubsets(crim ~., data = boston, nvmax = (length(boston) - 1))
sub.fwd <- regsubsets(crim ~., data = boston, nvmax = (length(boston) - 1),
method = "forward")
sub.bwd <- regsubsets(crim ~., data = boston, nvmax = (length(boston) - 1),
method = "backward")
sub.all <- sub.fwd
sub.all <- sub.bwd
sub.all.summary <- summary(sub.all)
subsets_plots(sub.all.summary, (length(boston) - 1))
which.min(sub.all.summary$bic)
#theoretical correction chooses full, fwd = 3, bwd = 4
rss.all <- sub.all.summary$rss[which.min(sub.all.summary$bic)]
rss.fwd <- sub.all.summary$rss[which.min(sub.all.summary$bic)]
rss.bwd <- sub.all.summary$rss[which.min(sub.all.summary$bic)]
# use train and test data
train.sub.all <- regsubsets(crim ~., data = boston[train,],
nvmax = length(boston))
#train.sub.all.summary <- summary(train.sub.all)
#subsets_plots(train.sub.all.summary, length(boston))
#which.min(train.sub.all.summary$bic)
train.sub.fwd <- regsubsets(crim ~., data = boston[train,],
nvmax = length(boston), method = "forward")
train.sub.bwd <- regsubsets(crim ~., data = boston[train,],
nvmax = length(boston), method = "backward")
train.sub.all <- train.sub.fwd
train.sub.all <- train.sub.bwd
val.errors = rep(NA, length(boston) - 1)
# calculate mse for each train model on test set
calc_mse <- function(i, fitted_model) {
# extract coefficients
coefi <- coef(fitted_model, id = i)
# multiply design matrix by coefficients to get predictions
pred <- test.mat[, names(coefi)] %*% coefi
# square the residuals and take square root to get MSE
val.errors[i] <- mean((boston$crim[test] - pred)^2)
}
test_all_mse <- sapply(1:(length(boston) - 1), calc_mse, train.sub.all)
which.min(test_all_mse) #using test set selects 2
hold_all <- test_all_mse[which.min(test_all_mse)]
hold_fwd <- test_all_mse[which.min(test_all_mse)]
hold_bwd <- test_all_mse[which.min(test_all_mse)]
qplot(x = 1:13, y = test_all_mse, geom = "path")
# full, fwd = 2, bwd = 4
# Use cross-validation
# USING CROSS-VALIDATION
k = 10 # total folds
# set.seed(1)
folds <- sample(1:k, nrow(boston), replace = TRUE)
cv.errors <- matrix(NA, k, 13, dimnames = list(NULL, paste(1:13)))
predict.regsubsets <- function(object, newdata, id, ...) {
form <- as.formula(object$call[[2]])
mat <- model.matrix(form, newdata)
coefi <- coef(object, id = id)
xvars <- names(coefi)
mat[, xvars] %*% coefi #return vector of predictions
}
# write a function that returns the mse. also pass in the method for regsubs
# method = c("exhaustive", "backward", "forward")
# k = 1 is not working. Why?
for (j in 1:k) {
best.fit <- regsubsets(crim ~., data = boston[folds != j,],
nvmax = 13)
for (i in 1:13) {
pred = predict.regsubsets(best.fit, boston[folds == j,], id = i)
cv.errors[j, i] <- mean((boston$crim[folds == j] - pred)^2)
} # end of inside for loop
} # end of outside for loop
# average columns to get average mse across folds (k) by number of variables (i)
mean.cv.errors = apply(cv.errors, 2, mean)
plot(mean.cv.errors, type = 'b')
which.min(mean.cv.errors)
cv.full <- mean.cv.errors[which.min(mean.cv.errors)]
cv.fwd <- mean.cv.errors[which.min(mean.cv.errors)]
cv.bwd <- mean.cv.errors[which.min(mean.cv.errors)]
reg.best = regsubsets(crim ~., data = boston, nvmax = 13)
coefs_all <- coef(reg.best, which.min(mean.cv.errors))
coefs_fwd <- coef(reg.best, which.min(mean.cv.errors))
coefs_bwd <- coef(reg.best, which.min(mean.cv.errors))
# bwd, fwd = 12, all = 11
# ------------------ end cross validataion k = 10
# end of full subset
# Forward and backward subsets
# Ridge
# create design matrix
set.seed(1)
x = model.matrix(crim ~., boston)[,-1] #removed the column with 1s (ones)
y = boston$crim
# RIDGE alpha = 0
grid <- 10^seq(10, -2, length = 100)
cv.ridge.mod = cv.glmnet(x[train,], y[train], alpha = 0)
plot(cv.ridge.mod)
bestlam = cv.ridge.mod$lambda.min
bestlam
ridge.mod <- glmnet(x[train,], y[train],
alpha = 0, lambda = grid, thresh = 1e-12)
ridge.pred <- predict(ridge.mod, s = bestlam, newx = x[test,])
mean((ridge.pred - y[test])^2)
# Least squares MSE for comparison because lambda is very small
# very slight improvement
ridge.pred <- predict(ridge.mod, s = 0, newx = x[test,])
mean((ridge.pred - y[test])^2)
# fit to full data set using lambda = bestlam
out <- glmnet(x, y, alpha = 0)
predict(out, type = "coefficients", s = bestlam)[1:14,]
### end of ridge
## LASSO
cv.lasso.mod = cv.glmnet(x[train,], y[train], alpha = 1)
plot(cv.lasso.mod)
bestlam = cv.lasso.mod$lambda.min
bestlam
lasso.mod <- glmnet(x[train,], y[train],
alpha = 1, lambda = grid, thresh = 1e-12)
lasso.pred <- predict(lasso.mod, s = bestlam, newx = x[test,])
mean((lasso.pred - y[test])^2)
# is the same as least squares but with five predictors
#full model
out <- glmnet(x, y, alpha = 1)
predict(out, type = "coefficients", s = bestlam)
plot(out)
## End of lasso
### PCR
pcr.fit <- pcr(crim ~., data = boston,
scale = TRUE, validation = "CV",
subset = train)
validationplot(pcr.fit, val.type = "MSEP")
# predict using test data (M = 8, minimum MSEP)
pcr.pred <- predict(pcr.fit, x[test,], ncomp = 8)
mean((pcr.pred - y[test])^2)
# fit on full model using M = 7
pcr.fit <- pcr(y ~ x, scale = TRUE, ncomp = 8)
summary(pcr.fit)
|
d476dc18f2b983680b3e87aeba9f73f127e98133
|
29585dff702209dd446c0ab52ceea046c58e384e
|
/lfl/R/perceive.R
|
015676cb5202a2b86b2ab3d499253b6b5a9920cf
|
[] |
no_license
|
ingted/R-Examples
|
825440ce468ce608c4d73e2af4c0a0213b81c0fe
|
d0917dbaf698cb8bc0789db0c3ab07453016eab9
|
refs/heads/master
| 2020-04-14T12:29:22.336088
| 2016-07-21T14:01:14
| 2016-07-21T14:01:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,973
|
r
|
perceive.R
|
.isspec <- function(r1, r2, vars, specs) {
if (r1[1] == r2[1]) {
return(.is.specific(r1[-1], r2[-1], vars, specs))
}
return(FALSE)
}
.perceiveGlobal <- function(rules, vars, specs) {
lvl <- names(vars)
config <- list(rules=lapply(rules, function(x) { as.integer(factor(x, levels=lvl)) - 1 }),
vars=as.numeric(as.factor(vars[lvl])),
specs=specs[lvl, lvl])
result <- .Call("perceiveGlobal", config, PACKAGE="lfl")
return(rules[result])
}
.perceiveLocal <- function(rules, vars, specs, fired) {
len <- length(rules)
if (len <= 1) {
return(seq_along(rules))
}
res <- rep(TRUE, len)
for (i in 1:(len-1)) {
if (res[i]) {
ri <- rules[[i]]
for (j in (i+1):len) {
if (res[j]) {
rj <- rules[[j]]
rispec <- .isspec(ri, rj, vars, specs)
rjspec <- .isspec(ri, rj, vars, specs)
if (rispec || rjspec) {
if (fired[i] > fired[j]) {
res[j] <- FALSE
break
} else if (fired[i] < fired[j]) {
res[i] <- FALSE
break
} else {
if (rispec) {
res[j] <- FALSE
break
} else {
res[i] <- FALSE
break
}
}
}
}
}
}
}
return(seq_along(rules)[res])
}
perceive <- function(rules,
vars,
specs,
type=c('global', 'local'),
fired=NULL) {
type <- match.arg(type)
if (!is.list(rules) && !is.null(rules)) {
stop("'rules' must be a list of rules")
}
if (!is.vector(vars) || is.null(names(vars))) {
stop("'vars' must be a named vector")
}
if (!is.matrix(specs) || !is.numeric(specs) || length(vars) != ncol(specs)
|| length(vars) != nrow(specs) || any(names(vars) != colnames(specs))
|| any(names(vars) != rownames(specs))) {
stop("'specs' must be a numeric matrix with colnames and rownames equal to 'names(vars)'")
}
unlisted <- unique(unlist(antecedents(rules)))
if (length(intersect(unlisted, names(vars))) != length(unlisted)) {
stop("'vars' must contain values for each predicate in 'rules'")
}
if (type == 'local') {
if (!is.vector(fired) || !is.numeric(fired)) {
stop("If type of perception is 'local' then 'fired' must be a numeric vector")
}
return(.perceiveLocal(rules, vars, specs, fired))
} else {
return(.perceiveGlobal(rules, vars, specs))
}
}
|
5137ea73188ae4f0e39f4efd120a3c7c6d8de887
|
b7b3f9d69a29a3034d62d8ab95088a900b592921
|
/man/Antibiotics.Rd
|
fedc3c57056271fb7ca90cec581e6ba0e62f0aef
|
[] |
no_license
|
sp2019-antibiotics/ECOFFBayes
|
c970bd8562182dfe528f8c1e54726b5f5f4dad25
|
9eb720d9d9769b496ebee88bcd1219373c40faea
|
refs/heads/master
| 2020-06-04T20:30:19.605086
| 2019-08-28T13:56:13
| 2019-08-28T13:56:13
| 192,181,118
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 563
|
rd
|
Antibiotics.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Antibiotics.R
\docType{data}
\name{Antibiotics}
\alias{Antibiotics}
\title{Example data set for antibiotic resistance data}
\format{A flat vector of size 500.}
\usage{
Antibiotics
}
\description{
This data set is a simulated data set with n = 500 observations.
}
\details{
This simulated data set may be used to test and work with the functionality of the "ECOFFBayes" package. In total it contains 500 observations
that should be similar to the EUCAST data sets.
}
\keyword{datasets}
|
a5094347596ad2e647708d4272d3d9fedb0b9e90
|
ae6586eb680d1cbcd14c38d19a0e6aed87381d77
|
/analysis/cluster_functional_enrichment.R
|
73d4bc1616d7f9f760dc0bc804cc02b0a03f1f85
|
[] |
no_license
|
clemenshug/erk_senescence
|
fbdd722e529eaf5ec87d80fcef337777de525a6d
|
ac6b88a32e39bc66bbee2e970bc9c46acda9026d
|
refs/heads/master
| 2022-09-23T09:28:40.371472
| 2022-08-04T19:58:13
| 2022-08-04T19:58:13
| 227,699,320
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 27,399
|
r
|
cluster_functional_enrichment.R
|
library(tidyverse)
library(here)
library(synapser)
library(synExtra)
library(enrichR)
library(RColorBrewer)
library(pheatmap)
library(ggforce)
library(viridis)
library(furrr)
library(Homo.sapiens)
synLogin()
syn <- synDownloader(here("tempdl"), followLink = TRUE)
wd <- here("functional_enrichment")
dir.create(wd, recursive = TRUE, showWarnings = FALSE)
# set directories, import files ------------------------------------------------
###############################################################################T
function_clusters <- syn("syn21576614") %>%
read_csv()
deseq_padj <- syn("syn21432184") %>%
read_csv()
deseq_res <- syn("syn22686427") %>%
read_csv()
cluster_names <- syn("syn21567677") %>%
read_csv() %>%
bind_rows(list(class_combined = "unknown", class_name = "Unknown")) %>%
mutate_at(vars(class_name), . %>% as.factor() %>% fct_inorder())
temporal_ordering <- syn("syn21536903") %>%
read_csv()
meta <- syn("syn21432975") %>%
read_csv()
condition_meta <- meta %>%
distinct(condition, DMSO, ERKi, Time, DOX)
surface_fit_p <- syn("syn22800020") %>%
read_csv() %>%
mutate(across(c(gene_id, gene_name), .fns = ~str_replace_all(.x, fixed("'"), "")))
# Metascape ----------------------------------------------------------------------
###############################################################################T
metascape_input <- function_clusters %>%
select(gene_name, consensus) %>%
filter(!consensus %in% c("no_response_0", "none")) %>%
group_by(consensus) %>%
mutate(id = 1:n()) %>%
ungroup() %>%
pivot_wider(id, names_from = consensus, values_from = gene_name, values_fill = "") %>%
select(-id)
write_csv(metascape_input, file.path(wd, "metascape_input_consensus_sets.csv"))
# Metascape all conditions -----------------------------------------------------
###############################################################################T
metascape_input <- deseq_res %>%
semi_join(
condition_meta %>%
filter(Time == 24),
by = "condition"
) %>%
filter(padj <= 0.05) %>%
group_by(condition) %>%
arrange(padj, .by_group = TRUE) %>%
slice_head(n = 500) %>%
distinct(gene_id, condition) %>%
mutate(id = 1:n()) %>%
ungroup() %>%
pivot_wider(id, names_from = condition, values_from = gene_id, values_fill = "") %>%
select(-id)
write_csv(metascape_input, file.path(wd, "metascape_input_all_conditions.csv"))
# Querying CMap ----------------------------------------------------------------
###############################################################################T
library(clueR)
library(biomaRt)
mart <- biomaRt::useMart(
biomart = "ENSEMBL_MART_ENSEMBL",
dataset = "hsapiens_gene_ensembl"
)
ensembl_gene_id_mapping_biomart <- biomaRt::select(
mart,
unique(deseq_res$gene_id),
c("entrezgene_id", "ensembl_gene_id"), "ensembl_gene_id"
) %>%
as_tibble() %>%
distinct()
clue_gmt <- deseq_res %>%
# semi_join(
# condition_meta %>%
# filter(Time == 24 | (DOX == 1 & ERKi %in% c(0, 1000))),
# by = "condition"
# ) %>%
filter(padj <= 0.05) %>%
arrange(padj, .by_group = TRUE) %>%
inner_join(
ensembl_gene_id_mapping_biomart,
by = c("gene_id" = "ensembl_gene_id")
) %>%
dplyr::transmute(
gene_set = condition,
gene_id = entrezgene_id,
direction = if_else(log2FoldChange > 0, "up", "down")
) %>%
mutate(
chunk = gene_set %>%
as.factor() %>%
as.integer() %>%
magrittr::divide_by_int(25)
) %>%
split(.$chunk) %>%
map(
clueR::clue_gmt_from_df, drop_invalid = TRUE
)
clue_jobs <- clue_gmt %>%
imap(
~clueR::clue_query_submit(
.x[["up"]], .x[["down"]],
name = paste0("erk_", .y),
use_fast_tool = FALSE
)
)
clue_result_files <- map_chr(
clue_jobs, clue_query_download
)
clue_results <- clue_result_files %>%
enframe("chunk", "result_path") %>%
crossing(
score_level = c("cell", "summary"),
result_type = c("pert", "pcl")
) %>%
mutate(
data = pmap(
.,
function(result_path, score_level, result_type, ...)
clue_parse_result(
result_path, score_level = score_level, result_type = result_type,
score_type = "tau"
)
)
) %>%
dplyr::select(-chunk, -result_path) %>%
unnest(data)
write_csv(
clue_results,
file.path(wd, "clue_results_all_conditions.csv.gz")
)
# Submit four clusters
clue_gmt <- tribble(
~direction, ~class_name, ~sign,
"up", "bell", "-",
"down", "bell", "+",
"up", "full_range", "+",
"down", "full_range", "-",
"up", "high_erk", "+",
"down", "high_erk", "-",
"up", "low_erk", "-",
"down", "low_erk", "+"
) %>%
mutate(class = paste(class_name, sign, sep = "_")) %>%
left_join(
function_clusters %>%
dplyr::select(gene_id, class = consensus),
by = "class"
) %>%
inner_join(
surface_fit_p,
by = "gene_id"
) %>%
arrange(class_name, direction, fdr_lratio) %>%
inner_join(
ensembl_gene_id_mapping_biomart,
by = c("gene_id" = "ensembl_gene_id")
) %>%
dplyr::select(
gene_set = class_name,
gene_id = entrezgene_id,
direction
) %>%
mutate(
chunk = gene_set %>%
as.factor() %>%
as.integer() %>%
magrittr::divide_by_int(25)
) %>%
split(.$chunk) %>%
map(
clueR::clue_gmt_from_df, drop_invalid = TRUE
)
clue_jobs <- clue_gmt %>%
imap(
~clueR::clue_query_submit(
.x[["up"]], .x[["down"]],
name = paste0("erk_", .y),
use_fast_tool = FALSE
)
)
clue_result_files <- map_chr(
clue_jobs, clue_query_download
)
clue_results <- clue_result_files %>%
enframe("chunk", "result_path") %>%
crossing(
score_level = c("cell", "summary"),
result_type = c("pert", "pcl")
) %>%
mutate(
data = pmap(
.,
function(result_path, score_level, result_type, ...)
clue_parse_result(
result_path, score_level = score_level, result_type = result_type,
score_type = "tau"
)
)
) %>%
dplyr::select(-chunk, -result_path) %>%
unnest(data)
write_csv(
clue_results,
file.path(wd, "clue_results_consensus_clusters.csv.gz")
)
# Store to synapse -------------------------------------------------------------
###############################################################################T
activity <- Activity(
"Connectivity map query",
used = c(
"syn21576614",
"syn22686427",
"syn22800020"
),
executed = "https://github.com/clemenshug/erk_senescence/blob/master/analysis/cluster_functional_enrichment.R"
)
syn_cmap <- synExtra::synMkdir("syn21432134", "functional_enrichment", "cmap", .recursive = TRUE)
c(
file.path(wd, "clue_results_consensus_clusters.csv.gz"),
file.path(wd, "clue_results_all_conditions.csv.gz")
) %>%
synStoreMany(syn_cmap, activity = activity)
# EnrichR ----------------------------------------------------------------------
###############################################################################T
function_clusters_enrichr <- function_clusters %>%
select(-consensus_n) %>%
gather("algorithm", "cluster", -gene_id, -gene_name) %>%
group_by(cluster, algorithm) %>%
summarize(
enrichment = list(
enrichr(
gene_name,
databases = c(
"GO_Biological_Process_2018",
"GO_Molecular_Function_2018",
"KEGG_2019_Human",
"Reactome_2016"
)
) %>%
bind_rows(.id = "database") %>%
as_tibble() %>%
arrange(desc(Combined.Score))
)
) %>%
ungroup()
# Plotting functions -----------------------------------------------------------
###############################################################################T
plot_heatmap_gg <- function(df, aesthetic = aes(class, term, fill = signed_p), facet_by = NULL, ...) {
# browser()
facet_by_quo <- enquo(facet_by)
abs_max <- df %>%
pull(!!aesthetic[["fill"]]) %>%
abs() %>%
quantile(.95, names = FALSE, na.rm = TRUE) %>%
round(1)
mat <- df %>%
dplyr::select(
!!aesthetic[["x"]],
!!aesthetic[["y"]],
!!aesthetic[["fill"]],
!!facet_by_quo
) %>%
{mutate(., !!aesthetic[["x"]] := paste(!!aesthetic[["x"]], !!facet_by_quo)) %>% dplyr::select(-!!facet_by_quo)} %>%
spread(!!aesthetic[["x"]], !!aesthetic[["fill"]], fill = 0) %>%
column_to_rownames(quo_name(aesthetic[["y"]])) %>%
as.matrix()
row_clust <- hclust(dist(mat, method = "euclidian"), "ward.D2")
df_ready <- df %>%
mutate(
!!aesthetic[["y"]] := factor(!!aesthetic[["y"]], levels = rownames(mat)[row_clust$order])
)
ggplot(df_ready, aesthetic) +
geom_raster() +
facet_wrap(vars(direction)) +
scale_fill_distiller(palette = "RdBu", limits = c(-abs_max, abs_max), oob = scales::squish) +
facet_wrap(vars(!!facet_by_quo)) +
theme(axis.text.x = element_text(angle = 45, vjust = 0.5, hjust = 1))
}
plot_heatmap_gg_bivariate <- function(
df, fill_bi_var, aesthetic = aes(class, term, fill = p_signed), colormaps = NULL
) {
fill_bi_var_quo <- enquo(fill_bi_var)
fill_bi_vars <- df %>%
pull(!!fill_bi_var_quo) %>%
unique()
abs_max <- df %>%
pull(!!aesthetic[["fill"]]) %>%
abs() %>%
quantile(.95, names = FALSE, na.rm = TRUE) %>%
round(1)
mat <- df %>%
dplyr::select(
!!aesthetic[["x"]],
!!aesthetic[["y"]],
!!aesthetic[["fill"]],
!!fill_bi_var_quo
) %>%
mutate(!!aesthetic[["x"]] := paste(!!aesthetic[["x"]], !!fill_bi_var_quo)) %>%
dplyr::select(-!!fill_bi_var_quo) %>%
spread(!!aesthetic[["x"]], !!aesthetic[["fill"]]) %>%
column_to_rownames(quo_name(aesthetic[["y"]])) %>%
as.matrix()
row_clust <- hclust(dist(mat, method = "euclidian"), "ward.D2")
# browser()
pals <- (if (!is.null(colormaps)) {
colormaps
} else {
set_names(c("Reds", "Blues"), fill_bi_vars)
}) %>%
map(scales::col_numeric, domain = c(0, 1))
# browser()
df_ready <- df %>%
dplyr::select(
!!aesthetic[["x"]],
!!aesthetic[["y"]],
!!aesthetic[["fill"]],
!!fill_bi_var_quo
) %>%
mutate(
!!aesthetic[["fill"]] := scales::rescale(!!aesthetic[["fill"]], to = c(0, 1), from = c(0, abs_max)) %>%
magrittr::inset(. > 1, 1),
!!aesthetic[["y"]] := factor(!!aesthetic[["y"]], levels = rownames(mat)[row_clust$order])
) %>%
spread(!!fill_bi_var_quo, !!aesthetic[["fill"]]) %>%
mutate(
!!aesthetic[["fill"]] :=
# colorspace::mixcolor(
# 0.5,
# .[[fill_bi_vars[[1]]]] %>%
# pals[[fill_bi_vars[[1]]]]() %>%
# colorspace::hex2RGB(),
# .[[fill_bi_vars[[2]]]] %>%
# pals[[fill_bi_vars[[2]]]]() %>%
# colorspace::hex2RGB(),
# "LAB"
# ) %>%
new("RGB", coords = 1 - (1 - (
.[[fill_bi_vars[[1]]]] %>%
pals[[fill_bi_vars[[1]]]]() %>%
colorspace::hex2RGB()
)@coords +
1 - (
.[[fill_bi_vars[[2]]]] %>%
pals[[fill_bi_vars[[2]]]]() %>%
colorspace::hex2RGB()
)@coords)) %>%
colorspace::hex(fixup = TRUE)
)
ggplot(df_ready, aesthetic) +
geom_raster() +
scale_fill_identity()
}
plot_heatmap <- function(mat, ...) {
abs_max <- c(mat) %>%
abs() %>%
quantile(.95, names = FALSE, na.rm = TRUE) %>%
round(1)
breaks <- seq(0, abs_max, by = 0.1)
cmap <- colorRampPalette(c("#ffffff", brewer.pal(7, "Reds")))(length(breaks))
# cmap <- magma(length(breaks) - 1)
# row_clust <- hclust(as.dist(1 - cor(t(mat), method = "pearson")), "average")
row_clust <- hclust(dist(mat, method = "euclidian"), "ward.D2")
# browser()
pheatmap(
mat,
color = cmap,
breaks = breaks,
cluster_rows = row_clust,
cluster_cols = FALSE,
silent = TRUE,
...
# cutree_rows = 6
)$gtable
}
# EnrichR heatmap --------------------------------------------------------------
###############################################################################T
clusters_go_enrichr_plot_data <- function_clusters_enrichr %>%
unnest(enrichment) %>%
group_nest(algorithm) %>%
mutate(
data = map(
data,
~.x %>%
mutate(
Term_combined = paste(str_sub(database, end = 4L), Term, sep = "_")
) %>%
arrange(desc(Combined.Score)) %>%
filter(
Term_combined %in% (
c(
filter(., Adjusted.P.value <= 0.05) %>%
group_by(cluster) %>%
dplyr::slice(1:5) %>%
ungroup() %>%
pull(Term_combined),
filter(., Adjusted.P.value <= 0.05) %>%
pull(Term_combined)
) %>%
unique() %>%
head(50)
)
) %>%
mutate(
neg_log10_p = -log10(Adjusted.P.value)
) %>%
inner_join(
cluster_names,
by = c("cluster" = "class_combined")
)
)
)
clusters_go_enrichr_plots <- clusters_go_enrichr_plot_data %>%
mutate(
data = map(
data,
~plot_heatmap_gg(
.x %>%
mutate(Combined.Score = log2(Combined.Score)),
aes(class_name, Term_combined, fill = Combined.Score), facet_by = NULL
)
)
)
pwalk(
clusters_go_enrichr_plots,
function(algorithm, data, ...) {
ggsave(
file.path(wd, paste0("clusters_go_enrichr_heatmap_", algorithm, ".pdf")),
data +
ggtitle(algorithm),
width = 10, height = 12
)
}
)
# TopGO functions --------------------------------------------------------------
###############################################################################T
library(topGO)
all_genes <- deseq_padj %>%
pull(gene_id) %>%
unique()
go_objects <- list(
"bp" = new(
getClassDef("topGOdata", package = "topGO"),
ontology = "BP",
allGenes = set_names(rep(1, length(all_genes)), all_genes),
geneSelectionFun = function (x) x > 0.5,
annot = topGO::annFUN.org,
mapping = "org.Hs.eg.db",
ID = "ensembl"
),
"mf" = new(
getClassDef("topGOdata", package = "topGO"),
ontology = "MF",
allGenes = set_names(rep(1, length(all_genes)), all_genes),
geneSelectionFun = function (x) x > 0.5,
annot = topGO::annFUN.org,
mapping = "org.Hs.eg.db",
ID = "ensembl"
)
)
go_gene_mapping <- map(
go_objects,
~usedGO(.x) %>%
{genesInTerm(.x, .)} %>%
enframe("id", "gene_id")
) %>%
bind_rows() %>%
unchop(gene_id) %>%
genebabel::join_hgnc("gene_id", "ensembl_gene_id", c("symbol"))
topgo_enrichment <- function(gene_set, all_genes, go_domain = "bp", ...) {
gene_input <- set_names(
if_else(all_genes %in% gene_set, 1, 0),
all_genes
)
GOdata <- topGO::updateGenes(
go_objects[[go_domain]], gene_input, function (x) x > 0.5
)
suppressMessages(resultFisher <- topGO::runTest(
GOdata,
algorithm = "weight01",
statistic = "fisher"
))
# resultKS <- topGO::runTest(
# GOdata,
# algorithm = "weight01",
# statistic = "ks"
# )
go_genes <- go_gene_mapping %>%
filter(gene_id %in% gene_set) %>%
group_by(id) %>%
summarize(
gene_symbols = paste(unique(symbol), collapse = "|"),
gene_ids = paste(unique(gene_id), collapse = "|")
) %>%
ungroup()
topGO::GenTable(
GOdata,
fisher = resultFisher,
# ks = resultKS,
orderBy = "fisher",
# topNodes = max(length(resultFisher@score), length(resultKS@score)),
topNodes = length(resultFisher@score),
numChar = 1000
) %>%
as_tibble() %>%
dplyr::rename(id = GO.ID, term = Term, pval = fisher, annotated = Annotated, significant = Significant) %>%
dplyr::left_join(
go_genes, by = "id"
) %>%
dplyr::mutate_at(vars(pval), ~as.numeric(gsub("< 1e-30", "1e-30", .x)))
}
surface_fit_go <- topgo_enrichment(
unique(function_clusters$gene_id), all_genes, "bp"
)
openxlsx::write.xlsx(
surface_fit_go %>%
arrange(pval),
file.path(wd, "top_go_results_surface_fit_genes.xlsx")
)
# topGO on function clusters ---------------------------------------------------
###############################################################################T
plan(multisession(workers = 6))
function_clusters_topgo <- function_clusters %>%
dplyr::select(-consensus_n) %>%
gather("algorithm", "cluster", -gene_id, -gene_name) %>%
filter(algorithm == "consensus") %>%
group_nest(algorithm, cluster) %>%
crossing(go_domain = c("bp", "mf")) %>%
mutate(
enrichment = future_map2(
data, go_domain,
~topgo_enrichment(.x$gene_id, all_genes, go_domain = .y),
.progress = TRUE
)
)
function_clusters_topgo_df <- function_clusters_topgo %>%
dplyr::select(-data) %>%
unnest(enrichment) %>%
arrange(pval) %>%
group_by(algorithm) %>%
filter(
id %in% (
c(
filter(., pval <= 0.05) %>%
group_by(cluster) %>%
dplyr::slice(1:5) %>%
ungroup() %>%
pull(id),
filter(., pval <= 0.05) %>%
pull(id)
) %>%
unique() %>%
head(50)
)
) %>%
ungroup()
write_csv(
function_clusters_topgo_df,
file.path(wd, paste("consensus_clusters_topgo_enrichment_table.csv"))
)
function_clusters_topgo_mat <- function_clusters_topgo_df %>%
inner_join(cluster_names, by = c("cluster" = "class_combined")) %>%
transmute(
algorithm,
cluster,
term = paste0(go_domain, "_", term),
pval = -log10(pval)
) %>%
group_nest(algorithm) %>%
mutate(
data = map(
data,
~.x %>%
spread(cluster, pval) %>%
column_to_rownames("term") %>%
as.matrix()
)
)
function_clusters_topgo_hm <- function_clusters_topgo_mat %>%
mutate(
data = map(data, plot_heatmap)
)
pwalk(
function_clusters_topgo_hm,
function(algorithm, data) {
ggsave(
file.path(wd, paste0("go_heatmap_", algorithm, ".pdf")),
data, width = 9, height = 12
)
}
)
# topGO on time series induction -----------------------------------------------
###############################################################################T
temporal_ordering_abs <- temporal_ordering %>%
filter(directed == "absolute") %>%
mutate_at(
vars(max_induction, mid_induction),
cut, breaks = c(0, 3, 10, Inf), labels = c("early", "mid", "late")
) %>%
mutate(
max_induction = fct_cross(max_induction, direction_max, sep = "_"),
mid_induction = fct_cross(mid_induction, direction_max, sep = "_"),
)
plan(multisession(workers = 10))
temporal_ordering_abs_mid_ind_topgo <- temporal_ordering_abs %>%
filter(gene_id %in% function_clusters$gene_id) %>%
group_nest(mid_induction, ERKi) %>%
crossing(go_domain = c("bp", "mf")) %>%
mutate(
enrichment = future_map2(
data, go_domain,
~topgo_enrichment(.x$gene_id, all_genes, go_domain = .y),
.progress = TRUE
)
)
x <- syn("syn21576614") %>%
read_csv()
x %>% filter(
pmap_lgl(
list(function_fitting, k_medoids, linear_model),
function(...) length(unique(list(...))) == 3
)
)
temporal_ordering_abs_mid_ind_topgo_df <- temporal_ordering_abs_mid_ind_topgo %>%
separate(mid_induction, c("mid_induction", "direction"), sep = "_") %>%
filter(ERKi %in% c("0", "1000")) %>%
dplyr::select(-data) %>%
unnest(enrichment) %>%
arrange(pval) %>%
filter(
id %in% (
c(
filter(., pval <= 0.05) %>%
group_by(mid_induction, ERKi) %>%
dplyr::slice(1:5) %>%
ungroup() %>%
pull(id),
filter(., pval <= 0.05) %>%
pull(id)
) %>%
unique() %>%
head(50)
)
) %>%
mutate(
neg_log10_p = -log10(pval),
signed_p = neg_log10_p * if_else(direction == "pos", 1, -1),
term = paste0(go_domain, "_", term)
) %>%
arrange(term, mid_induction, ERKi, direction)
temporal_ordering_abs_mid_ind_topgo_hm <- temporal_ordering_abs_mid_ind_topgo_df %>%
mutate(
mid_induction = factor(mid_induction, levels = c("early", "mid", "late"))
) %>%
arrange(ERKi, mid_induction) %>%
mutate(
class = as.factor(
paste("ERKi", ERKi, mid_induction, sep = "_")
) %>%
fct_inorder()
) %>%
plot_heatmap_gg_bivariate(
fill_bi_var = direction,
aesthetic = aes(class, term, fill = neg_log10_p),
colormaps = list(
# "pos" = "Reds", "neg" = "Blues"
"pos" = c("#FFFFFFFF", "#DD000000"),
"neg" = c("#FFFFFFFF", "#0000DD00")
)
) +
theme(axis.text.x = element_text(angle = 45, vjust = 1, hjust = 1)) +
labs(fill = "signed\n-log10(p)")
ggsave(
file.path(wd, "temporal_ordering_mid_induction_go_heatmap_bivar.pdf"),
temporal_ordering_abs_mid_ind_topgo_hm, width = 10, height = 8
)
# topGO on high/low ERK responders by time -------------------------------------
###############################################################################T
cons_clusters <- function_clusters %>%
dplyr::select(
gene_id, gene_name, consensus_n, consensus
) %>%
extract(
consensus, c("cluster", "direction"),
regex = "^(full_range|high_erk|low_erk|no_response|bell)_([0\\+-])$", remove = FALSE
)
high_low_clusters <- cons_clusters %>%
filter(cluster %in% c("high_erk", "low_erk"))
temporal_ordering_abs <- temporal_ordering %>%
filter(directed == "absolute") %>%
mutate_at(
vars(max_induction, mid_induction),
cut, breaks = c(0, 3, 10, Inf), labels = c("early", "mid", "late")
) %>%
mutate(
max_induction = fct_cross(max_induction, direction_max, sep = "_"),
mid_induction = fct_cross(mid_induction, direction_max, sep = "_"),
)
plan(multisession(workers = 10))
temporal_ordering_abs_mid_ind_topgo <- temporal_ordering_abs %>%
filter(gene_id %in% function_clusters$gene_id) %>%
group_nest(mid_induction, ERKi) %>%
crossing(go_domain = c("bp", "mf")) %>%
mutate(
enrichment = future_map2(
data, go_domain,
~topgo_enrichment(.x$gene_id, all_genes, go_domain = .y),
.progress = TRUE
)
)
high_low_temp_clusters <- temporal_ordering %>%
filter(directed == "directed", ERKi %in% c(0, 1000)) %>%
dplyr::select(gene_id, mid_induction, ERKi) %>%
mutate(high_low = if_else(ERKi == 0, "low_erk", "high_erk")) %>%
inner_join(high_low_clusters, by = c("gene_id", "high_low" = "cluster"))
plan(multisession(workers = 4))
high_low_clusters_go <- high_low_temp_clusters %>%
group_nest(high_low, mid_induction, direction) %>%
mutate_at(vars(mid_induction), as.character) %>%
bind_rows(
mutate(
.,
mid_induction = cut(as.integer(mid_induction), breaks = c(0, 3, 10, Inf), labels = c("early", "mid", "late"))
) %>%
group_by(high_low, mid_induction, direction) %>%
summarize(data = list(bind_rows(data))) %>%
ungroup()
) %>%
crossing(go_domain = c("bp", "mf")) %>%
mutate(
enrichment = future_map2(
data, go_domain,
~topgo_enrichment(.x$gene_id, all_genes, go_domain = .y),
.progress = TRUE
)
)
write_rds(
high_low_clusters_go,
file.path(wd, "high_low_erk_temporal_go_enrichment.rds"),
compress = "gz"
)
# high_low_clusters_go <- read_rds(file.path(wd, "high_low_erk_temporal_go_enrichment.rds"))
high_low_clusters_go_plot_data <- high_low_clusters_go %>%
mutate(
direction = if_else(xor(direction == "+", high_low == "low_erk"), "up", "down")
) %>%
filter(mid_induction %in% c("early", "mid", "late")) %>%
dplyr::select(-data) %>%
unnest(enrichment) %>%
arrange(pval) %>%
group_nest(high_low) %>%
mutate(
data = map(
data,
~.x %>%
filter(
id %in% (
c(
filter(., pval <= 0.05) %>%
group_by(mid_induction, direction) %>%
dplyr::slice(1:5) %>%
ungroup() %>%
pull(id),
filter(., pval <= 0.05) %>%
pull(id)
) %>%
unique() %>%
head(50)
)
) %>%
mutate(
neg_log10_p = -log10(pval),
signed_p = neg_log10_p * if_else(direction == "up", 1, -1),
term = paste0(go_domain, "_", term),
mid_induction = factor(mid_induction, levels = c("early", "mid", "late"))
) %>%
arrange(term, mid_induction, direction)
)
)
high_low_clusters_go_plot_data %>%
unnest(data) %>%
arrange(pval) %>%
write_csv(file.path(wd, "high_low_erk_temporal_go_enrichment_top.csv"))
high_low_clusters_go_plot <- high_low_clusters_go_plot_data %>%
mutate(
data = map(
data,
~plot_heatmap_gg(
.x,
aes(mid_induction, term, fill = signed_p), facet_by = direction
)
)
)
pwalk(
high_low_clusters_go_plot,
function(high_low, data, ...) {
ggsave(
file.path(wd, paste0("high_low_temporal_ordering_go_heatmap", high_low, ".pdf")),
data +
ggtitle(high_low)
)
}
)
high_low_clusters_go_enrichr <- high_low_temp_clusters %>%
group_nest(high_low, mid_induction, direction) %>%
mutate_at(vars(mid_induction), as.character) %>%
bind_rows(
mutate(
.,
mid_induction = cut(as.integer(mid_induction), breaks = c(0, 3, 10, Inf), labels = c("early", "mid", "late"))
) %>%
group_by(high_low, mid_induction, direction) %>%
summarize(data = list(bind_rows(data))) %>%
ungroup()
) %>%
mutate(
enrichment = map(
data,
~enrichr(
.x$gene_name,
databases = c(
"GO_Biological_Process_2018",
"GO_Molecular_Function_2018",
"KEGG_2019_Human",
"Reactome_2016"
)
) %>%
bind_rows(.id = "database") %>%
as_tibble() %>%
arrange(desc(Combined.Score))
)
)
high_low_clusters_go_enrichr_plot_data <- high_low_clusters_go_enrichr %>%
mutate(
direction = if_else(xor(direction == "+", high_low == "low_erk"), "up", "down")
) %>%
filter(mid_induction %in% c("early", "mid", "late")) %>%
dplyr::select(-data) %>%
unnest(enrichment) %>%
mutate(
Term_combined = paste(str_sub(database, end = 4L), Term, sep = "_")
) %>%
arrange(desc(Combined.Score)) %>%
group_nest(high_low) %>%
mutate(
data = map(
data,
~.x %>%
filter(
Term_combined %in% (
c(
filter(., Adjusted.P.value <= 0.05) %>%
group_by(mid_induction, direction) %>%
dplyr::slice(1:5) %>%
ungroup() %>%
pull(Term_combined),
filter(., Adjusted.P.value <= 0.05) %>%
pull(Term_combined)
) %>%
unique() %>%
head(50)
)
) %>%
mutate(
neg_log10_p = -log10(Adjusted.P.value),
signed_p = neg_log10_p * if_else(direction == "up", 1, -1),
mid_induction = factor(mid_induction, levels = c("early", "mid", "late"))
)
)
)
high_low_clusters_go_enrichr_plot <- high_low_clusters_go_enrichr_plot_data %>%
mutate(
data = map(
data,
~plot_heatmap_gg(
.x,
aes(mid_induction, Term_combined, fill = signed_p), facet_by = direction
)
)
)
pwalk(
high_low_clusters_go_enrichr_plot,
function(high_low, data, ...) {
ggsave(
file.path(wd, paste0("high_low_temporal_ordering_go_enrichr_heatmap", high_low, ".pdf")),
data +
ggtitle(high_low)
)
}
)
|
2bdde9b0a707c2c069a5e025e0f38849ed6e2c15
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/INLABMA/examples/logprrho.Rd.R
|
2bd596fd8ca5a56f65ee24aafad243cc8577aeee
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 260
|
r
|
logprrho.Rd.R
|
library(INLABMA)
### Name: logprrho
### Title: Log-prior density for the spatial autocorrelation parameter
### 'rho'
### Aliases: logprrho
### Keywords: distribution
### ** Examples
rrho<-seq(.01, .99, length.out=100)
plot(rrho, exp(logprrho(rrho)))
|
28cf4511439f3af8a3e3270934df1096df2ce030
|
c68ee050bad6a1b94ebffb6360c6d1ad2147510a
|
/rbayes/functions.R
|
23a9098adc813e88370cb48b7cf8a5d67af825b4
|
[
"MIT"
] |
permissive
|
JSRivero/bayes-network
|
1a592651999a0804e5944cd86518dc01a692dd27
|
afed3726a63c65eb6b3f103b4bfe7fa5bdf6184d
|
refs/heads/master
| 2020-03-19T03:20:44.257271
| 2018-07-18T14:52:16
| 2018-07-18T14:52:16
| 135,719,424
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 23,715
|
r
|
functions.R
|
library(bnlearn)
library(readr)
library(Rgraphviz)
library(tictoc)
read_test <- function(){
ep_draw = as.data.frame(t(read_csv('artificial_eps.csv')))
colnames(ep_draw) = as.character(unlist(ep_draw[1,]))
ep_draw = ep_draw[-1,]
ep_draw <- as.data.frame(lapply(ep_draw, function(x) if(is.numeric(x)) factor(x) else x))
}
get_folder_date <- function(date,percent, wind, mov){
unpack[, files] <- lista_files(percent, wind, mov)
File = files[which(startsWith(files,date))[1]]
return(File)
}
lista_files <- function(percent, wind, mov){
general_path = 'C:/Users/Javier/Documents/MEGA/Universitattt/Master/Thesis/CDS_data/Sliced_ep_draw_new'
dir_percent = paste('Slice', as.character(percent), sep = '_')
dir_slice = paste('Slice',as.character(wind),as.character(mov), sep = '_')
path = file.path(general_path, dir_percent, dir_slice)
lista = list.files(path = path)
return(list(path, lista))
}
read_dates <- function(date1, date2, percent, wind, mov, time_para, delay){
unpack[path, files] <- lista_files(percent, wind, mov)
firstFile = files[which(startsWith(files,date1))[1]]
secondFile = files[which(startsWith(files,date2))[1]]
data1 = reading_v2(time_para, delay, path, firstFile)
data2 = reading_v2(time_para, delay, path, secondFile)
return(list(data1, data2))
}
read_one_date <- function(date, percent, wind, mov, time_para, delay){
unpack[path, files] <- lista_files(percent, wind, mov)
firstFile = files[which(startsWith(files,date))[1]]
data = reading_v2(time_para, delay, path, firstFile)
return(data)
}
reading_v2 <- function(time_para, delay, path, directory){
if (delay == 0){
filename = paste(directory,'_raw_ep_drawups',as.character(time_para),'.csv',sep='')
} else{
filename_without_extension = paste(directory,'ep','drawups',as.character(time_para),
'delay',as.character(delay), sep = '_')
filename = paste(filename_without_extension,'csv', sep = '.')
}
path_to_file = paste(path,directory,filename,sep = '/')
ep_draw = as.data.frame(t(read_csv(path_to_file, cols(.dfault = col_integer()), col_names = FALSE)))
colnames(ep_draw) = as.character(unlist(ep_draw[1,]))
ep_draw = ep_draw[-1,]
rownames(ep_draw) = as.character(unlist(ep_draw[,1]))
ep_draw = ep_draw[,-1]
# aux = ncol(ep_draw)
# ep_draw[,(aux-1):aux] = NULL
# To eliminate the name of the institutions from the levels
# Apparently, even though the row is deleted, the values still
# remain as levels, and it completely screws the network
ep_draw <- as.data.frame(lapply(ep_draw, function(x) if(is.factor(x)) factor(x) else x))
# Finally, we need to remove the counterparties that only have one level:
# aux1 = cbind(ep_draw)
for (c in colnames(ep_draw)){
if (length(levels(ep_draw[,c])) == 1){
colm = which(colnames(ep_draw) == c)
ep_draw = ep_draw[,-colm]
}
}
return(ep_draw)
}
reading_original <- function(time_para, delay, path, directory){
if (delay == 0){
filename = paste('raw_ep_drawups',as.character(time_para),'.csv',sep='')
} else{
filename_without_extension = paste('ep','drawups',as.character(time_para),
'delay',as.character(delay), sep = '_')
filename = paste(filename_without_extension,'csv', sep = '.')
}
path_to_file = paste(path,directory,filename,sep = '/')
ep_draw = as.data.frame(t(read_csv(path_to_file, cols(.dfault = col_integer()), col_names = FALSE)))
colnames(ep_draw) = as.character(unlist(ep_draw[1,]))
ep_draw = ep_draw[-1,]
rownames(ep_draw) = as.character(unlist(ep_draw[,1]))
ep_draw = ep_draw[,-1]
# aux = ncol(ep_draw)
# ep_draw[,(aux-1):aux] = NULL
# To eliminate the name of the institutions from the levels
# Apparently, even though the row is deleted, the values still
# remain as levels, and it completely screws the network
ep_draw <- as.data.frame(lapply(ep_draw, function(x) if(is.factor(x)) factor(x) else x))
# Finally, we need to remove the counterparties that only have one level:
# aux1 = cbind(ep_draw)
for (c in colnames(ep_draw)){
if (length(levels(ep_draw[,c])) == 1){
colm = which(colnames(ep_draw) == c)
ep_draw = ep_draw[,-colm]
}
}
return(ep_draw)
}
delete_counterparty <- function(df, counterparty){
colm = which(colnames(df) == counterparty)
return(df[,-colm])
}
reading_split <- function(time_para, delay, wind, move, name_directory, all){
unpack[path_ep, lista_dirs] = lista_files(wind, move)
for (file in lista_dirs){
}
}
reading <- function(time_para, delay){
path = 'C:/Users/Javier/Documents/MEGA/Universitattt/Master/Thesis/CDS_data/epsilon_drawups'
directory = paste(path, paste('stddev_',as.character(time_para),sep = ''), sep = '/')
if (delay == 0){
file_name = paste('raw_ep_drawups_',as.character(time_para),'.csv',sep='')
} else{
file_name = paste('ep_drawups_', as.character(time_para), '_delay_', as.character(delay),'.csv',sep='')
}
path = paste(directory,file_name, sep = '/')
ep_draw = as.data.frame(t(read_csv(path, cols(.dfault = col_integer()), col_names = FALSE)))
colnames(ep_draw) = as.character(unlist(ep_draw[1,]))
ep_draw = ep_draw[-1,]
rownames(ep_draw) = as.character(unlist(ep_draw[,1]))
ep_draw = ep_draw[,-1]
aux = ncol(ep_draw)
ep_draw[,(aux-1):aux] = NULL
# To eliminate the name of the institutions from the levels
# Apparently, even though the row is deleted, the values still
# remain as levels, and it completely screws the network
ep_draw <- as.data.frame(lapply(ep_draw, function(x) if(is.factor(x)) factor(x) else x))
return(ep_draw)
}
name_var <- function(v){
return(deparse(substitute(v)))
}
name_var_csv <- function(v){
return(paste(deparse(substitute(v)),'csv', sep = '.'))
}
direct_net <- function(bn, used_score, Data){
aux = bn
undir_arcs = undirected.arcs(aux)
while (nrow(undir_arcs) != 0){
aux2 = choose.direction(aux, data = Data, arc = undir_arcs[1,], debug = FALSE, criterion = used_score)
undir_arcs_2 = undirected.arcs(aux2)
if (nrow(undir_arcs)==nrow(undir_arcs_2)){
arc_to_change = undir_arcs[sample(1:2,1),]
aux2 = set.arc(aux, from = arc_to_change[1], to = arc_to_change[2])
}
undir_arcs = undir_arcs_2
aux = aux2
}
# if (nrow(undir_arcs) != 0){
# print('funciona')
# for (i in nrow(undir_arcs)){
# aux2 = choose.direction(aux, data = ep_draw, arc = undir_arcs[i,], debug = TRUE, criterion = used_score)
# aux = aux2
# }
# }
return(aux)
}
compute_prob_v2 <- function(network, inst_def, data){
node_names = nodes(network)
parameters = bn.fit(network, data)
CPGSD <- as.data.frame(matrix(0.0,length(node_names),1), row.names = node_names)
colnames(CPGSD) = c('SD')
param = parameters
for (name_node in node_names){
st_SD = paste("(", as.character(inst_def), "=='","1.0')", sep = "")
st = paste("(", name_node, "=='1.0')", sep = "")
cmd_SD = paste("cpquery(param, event = ", st, ", evidence = ", st_SD, ", n = 4 * 10**5)", sep = "")
CPGSD[name_node,'SD'] <- eval(parse(text = cmd_SD))
st = paste("(", name_node, "=='0.5')", sep = "")
cmd_SD = paste("cpquery(param, event = ", st, ", evidence = ", st_SD, ", n = 4 * 10**5)", sep = "")
CPGSD[name_node,'SD'] <- CPGSD[name_node,'SD'] + eval(parse(text = cmd_SD))
# st_NSD = paste("(", as.character(inst_def), "=='", "0.0')", sep = "")
# cmd_NSD = paste("cpquery(param, ", st, ", ", st_NSD, ", n = 10**5)", sep = "")
# CPGSD[name_node,'NSD'] <- eval(parse(text = cmd_NSD))
}
CPGSD <- CPGSD[!(row.names(CPGSD) %in% c(inst_def)),,drop = FALSE]
CPGSD <- CPGSD[order(-CPGSD$SD), , drop = FALSE]
# arrange(cbind(row.names(CPGSD),CPGSD),desc(SD))
return(CPGSD)
}
compute_prob_inv_v2 <- function(network, inst_def, data){
node_names = nodes(network)
parameters = bn.fit(network, data)
CPGSD <- as.data.frame(matrix(0.0,length(node_names),1), row.names = node_names)
colnames(CPGSD) = c('SD')
param = parameters
for (name_node in node_names){
st_SD = paste("(", name_node, "=='","1.0')", sep = "")
st = paste("(", as.character(inst_def), "=='1.0')", sep = "")
cmd_SD = paste("cpquery(param, event = ", st, ", evidence = ", st_SD, ", n = 4 * 10**5)", sep = "")
CPGSD[name_node,'SD'] <- eval(parse(text = cmd_SD))
st = paste("(", as.character(inst_def), "=='0.5')", sep = "")
cmd_SD = paste("cpquery(param, event = ", st, ", evidence = ", st_SD, ", n = 4 * 10**5)", sep = "")
CPGSD[name_node,'SD'] <- CPGSD[name_node,'SD'] + eval(parse(text = cmd_SD))
# st_NSD = paste("(", as.character(inst_def), "=='", "0.0')", sep = "")
# cmd_NSD = paste("cpquery(param, ", st, ", ", st_NSD, ", n = 10**5)", sep = "")
# CPGSD[name_node,'NSD'] <- eval(parse(text = cmd_NSD))
}
CPGSD <- CPGSD[!(row.names(CPGSD) %in% c(inst_def)),,drop = FALSE]
CPGSD <- CPGSD[order(-CPGSD$SD), , drop = FALSE]
# arrange(cbind(row.names(CPGSD),CPGSD),desc(SD))
return(CPGSD)
}
compute_prob_group <- function(network, data, group){
node_names = nodes(network)
parameters = bn.fit(network, data)
nodes_event = node_names[-match(group,node_names)]
CPGSD <- as.data.frame(matrix(0.0,length(node_names),1), row.names = node_names)
colnames(CPGSD) = c('SD')
param = parameters
st_SD = paste('((',group[1]," == '1.0')",sep = '')
for (g in group[-1]){
st_SD = paste(st_SD," & (",g," == '1.0')",sep = '')
}
st_SD = paste(st_SD,')',sep = '')
print(st_SD)
for (name_node in nodes_event){
st = paste("(", name_node, "=='1.0')", sep = "")
cmd_SD = paste("cpquery(param, event = ", st, ", evidence = ", st_SD, ", method='ls', n = 10**6)", sep = "")
CPGSD[name_node,'SD'] <- eval(parse(text = cmd_SD))
st = paste("(", name_node, "=='0.5')", sep = "")
cmd_SD = paste("cpquery(param, event = ", st, ", evidence = ", st_SD, ", method='ls', n = 10**6)", sep = "")
CPGSD[name_node,'SD'] <- CPGSD[name_node,'SD'] + eval(parse(text = cmd_SD))
# st_NSD = paste("(", as.character(inst_def), "=='", "0.0')", sep = "")
# cmd_NSD = paste("cpquery(param, ", st, ", ", st_NSD, ", n = 10**5)", sep = "")
# CPGSD[name_node,'NSD'] <- eval(parse(text = cmd_NSD))
}
for (name_node in group){
CPGSD[name_node,'SD'] <-1.0
}
# CPGSD <- CPGSD[!(row.names(CPGSD) %in% c(inst_def)),,drop = FALSE]
CPGSD <- CPGSD[order(-CPGSD$SD), , drop = FALSE]
# arrange(cbind(row.names(CPGSD),CPGSD),desc(SD))
return(CPGSD)
}
uncer_group <- function(network, data, group, M){
node_names = nodes(network)
parameters = bn.fit(network, data)
nodes_event = node_names[-match(group,node_names)]
CPGSD <- as.data.frame(matrix(0.0,length(node_names),M), row.names = node_names)
CPGSD <- compute_prob_group(network, data, group)
for (i in 2:M){
tic(i)
CPGSD_aux <- compute_prob_group(network, data, group)
CPGSD <- merge(CPGSD,CPGSD_aux, by = 0)
rownames(CPGSD) = as.character(unlist(CPGSD[,1]))
CPGSD = CPGSD[,-1]
toc()
}
m = as.data.frame(apply(CPGSD,1,mean),row.names = rownames(CPGSD))
s = as.data.frame(apply(CPGSD,1,sd),row.names = rownames(CPGSD))
aux = merge(m,s,by = 0)
rownames(aux) = as.character(unlist(aux[,1]))
aux = aux[,-1]
colnames(aux) = c('mean','std')
aux <- aux[order(-aux$mean),,drop = FALSE]
return(aux)
}
compare_prob <- function(net1, net2, data, inst){
aux1 = compute_prob_v2(net1,inst,data)
aux2 = compute_prob_v2(net2,inst,data)
tot = merge(aux1,aux2,by=0)
rownames(tot) = as.character(unlist(tot[,1]))
tot = tot[,-1]
colnames(tot) = c('net1','net2')
tot <- tot[order(-tot$net1),,drop=FALSE]
return(tot)
}
uncer_prob <- function(net,data, inst_def, M){
para = bn.fit(net,data)
node_names = nodes(net)
node_names = node_names[-match(inst_def,node_names)]
# M = 100
prob = data.frame(matrix(0.0,length(node_names),M), row.names = node_names)
for (m in 1:M){
for (node in node_names){
prob[node,m] = cond_prob(para, inst_def, node)
}
}
tot_prob = data.frame()
m = apply(prob,1,mean)
s = apply(prob,1,sd)
for (n in rownames(prob)){
tot_prob[n,'mean']=m[n]
tot_prob[n,'std'] = s[n]
}
tot_prob <- tot_prob[order(-tot_prob$mean),,drop=FALSE]
return(tot_prob)
}
compute_prob <- function(net_information, inst_def, parameters){
# List containing probabilities of defacult given sovereign default
CPGSD <- as.data.frame(matrix(0.0,length(node_names),1), row.names = node_names)
colnames(CPGSD) = c('SD')
param = parameters
for (name_node in node_names){
st_SD = paste("(", as.character(inst_def), "=='","1.0')", sep = "")
st = paste("(", name_node, "=='1.0')", sep = "")
cmd_SD = paste("cpquery(param, event = ", st, ", evidence = ", st_SD, ", n = 4 * 10**5)", sep = "")
CPGSD[name_node,'SD'] <- eval(parse(text = cmd_SD))
st = paste("(", name_node, "=='0.5')", sep = "")
cmd_SD = paste("cpquery(param, event = ", st, ", evidence = ", st_SD, ", n = 4 * 10**5)", sep = "")
CPGSD[name_node,'SD'] <- CPGSD[name_node,'SD'] + eval(parse(text = cmd_SD))
# st_NSD = paste("(", as.character(inst_def), "=='", "0.0')", sep = "")
# cmd_NSD = paste("cpquery(param, ", st, ", ", st_NSD, ", n = 10**5)", sep = "")
# CPGSD[name_node,'NSD'] <- eval(parse(text = cmd_NSD))
}
CPGSD <- CPGSD[!(row.names(CPGSD) %in% c(inst_def)),,drop = FALSE]
CPGSD <- CPGSD[order(-CPGSD$SD), , drop = FALSE]
# arrange(cbind(row.names(CPGSD),CPGSD),desc(SD))
return(CPGSD)
}
compute_inv_prob <- function(node_names, inst_def, parameters){
# List containing probabilities of defacult given sovereign default
CPGSD <- as.data.frame(matrix(0.0,length(node_names),1), row.names = node_names)
colnames(CPGSD) = c('SD')
param = parameters
for (name_node in node_names){
st = paste("(", name_node, "=='1.0')", sep = "")
st_SD = paste("(", as.character(inst_def), "=='","1.0')", sep = "")
cmd_SD = paste("cpquery(param, event = ", st_SD, ", evidence = ", st, ", n = 4 * 10**5)", sep = "")
CPGSD[name_node,'SD'] <- eval(parse(text = cmd_SD))
st_SD = paste("(", as.character(inst_def), "=='0.5')", sep = "")
cmd_SD = paste("cpquery(param, event = ", st_SD, ", evidence = ", st, ", n = 4 * 10**5)", sep = "")
CPGSD[name_node,'SD'] <- CPGSD[name_node,'SD'] + eval(parse(text = cmd_SD))
# st_NSD = paste("(", as.character(inst_def), "=='", "0.0')", sep = "")
# cmd_NSD = paste("cpquery(param, ", st, ", ", st_NSD, ", n = 10**5)", sep = "")
# CPGSD[name_node,'NSD'] <- eval(parse(text = cmd_NSD))
}
CPGSD <- CPGSD[!(row.names(CPGSD) %in% c(inst_def)),,drop = FALSE]
CPGSD <- CPGSD[order(-CPGSD$SD), , drop = FALSE]
# arrange(cbind(row.names(CPGSD),CPGSD),desc(SD))
return(CPGSD)
}
matrix_prob <- function(network, data){
param = bn.fit(network, data)
node_list = nodes(network)
num = length(node_list)
mat = as.data.frame(matrix(0,num,num))
colnames(mat)= as.character(unlist(node_list))
rownames(mat)= as.character(unlist(node_list))
for (r in rownames(mat)){
for (c in colnames(mat)){
if (r!=c){
mat[r,c] <- cond_prob(param = param, evidence = r, event = c)
}
}
}
return(mat)
}
cond_prob <- function(param, evidence, event){
st_SD = paste("(", as.character(evidence), "=='","1.0')", sep = "")
st = paste("(", as.character(event), "=='1.0')", sep = "")
cmd_SD = paste("cpquery(param, event = ", st, ", evidence = ", st_SD, ", n = 4 * 10**5, method = 'lw')", sep = "")
result <- eval(parse(text = cmd_SD))
st = paste("(", event, "=='0.5')", sep = "")
cmd_SD = paste("cpquery(param, event = ", st, ", evidence = ", st_SD, ", n = 4 * 10**5, method = 'lw')", sep = "")
result <- result + eval(parse(text = cmd_SD))
return(result)
}
plot_net <- function(bayesian_net){
g1 = as.graphAM(bayesian_net)
# colors = c()
# for (n in nodes(net_bic_not)){
# colors = append(colors, rgb(255, 255, 0, maxColorValue=255))
# }
plot(g1, attrs=list(node=list(fillcolor="lightgreen", fontsize = 30),edge=list(color="black")))
}
plot_net_colors <- function(bayesian_net, prob, darkness){
col = colnames(prob)[1]
g1 = as.graphAM(bayesian_net)
for (n in nod){
if (!n%in%p){
l = append(l,n)
}
}
colors = c()
nAttrs <- list()
nodes = nodes(bayesian_net)
names(nodes) = nodes(bayesian_net)
nAttrs <- list()
nAttrs$label = nodes
for (n in nodes(net_bic_not)){
if (darkness == 0){
colors = append(colors, rgb(255*(1-prob[n,col]), 255*(1-prob[n,col]), 255*(1-prob[n,col]), maxColorValue=255))
}else{
colors = append(colors, rgb(255*(darkness-prob[n,col])/darkness, 255*(1-prob[n,col]), 255*(1-prob[n,col]), maxColorValue=255))
}
}
names(colors) = nodes(bayesian_net)
nAttrs$fillcolor = colors
plot(g1, nodeAttrs = nAttrs, attrs=list(node=list(fillcolor=colors, fontsize = 30),edge=list(color="black")))
}
compute_scores <- function(bnet, dataset){
scores <- as.data.frame(matrix(0.0,1,5), row.names = c('net'))
colnames(scores) <- c('loglik','bic','k2','bds','bde')
for (s in colnames(scores)){
scores['net',s] <- bnlearn::score(bnet, dataset, type = s)
}
return(scores)
}
score_table <- function(net, data){
scores = compute_scores(net, data)
for (c in colnames(scores)){
cat(paste(' &', format(scores['net',c], digits = 6)))
}
}
table_prob_v2 <- function(prob){
i = 0
for (r in row.names(prob)){
i = i +1
cat(paste(i, ' & ',r, sep = ''))
for (c in colnames(prob)){
cat(paste(' & ', as.numeric(format(prob[r,c], digits = 4))))
}
cat('\\\\\n\\hline\n')
}
}
del_cp <- function(df,cp){
i = which(rownames(df) == cp)
return(df[-i,,drop = F])
}
new_tableprob <- function(lista_df){
df = lista_df[[1]]
m = array(0,0)
m = append(m, mean(df$SD))
b = 1
for (c in lista_df[-1]){
df = cbind(df,names = rownames(c), SD = c)
m = append(m,mean(c$SD))
}
cat('\\begin{center}\n\\begin{tabular}{')
for (k in 1:(ncol(df)+2)){
cat('|c')
}
cat(paste('|}\n\\cline{2-', as.character(ncol(df)+2) , '}\n'),'')
cat('\\multicolumn{1}{c|}{ }')
for (k in 1:length(lista_df)){
cat(' & \\multicolumn{2}{|c|}{ SCORE }')
}
cat('\\\\\n\\hline\nnr.')
for (k in 1:length(lista_df)){
cat(' & Company & Prob')
}
cat('\\\\\n')
cat('\\hline\n')
co = colnames(df)
i = 0
for (r in rownames(df)){
# print(r)
i = i+1
cat(paste(i, ' & ', r, sep = ''))
for (c in 1:ncol(df)){
# print(c)
if (startsWith(co[c], 'names')){
cat(paste(' & ', df[r,c]))
}else{
cat(paste(' & ', as.numeric(format(df[r,c]*100, digits = 4)), '\\%', sep = ''))
}
}
cat('\\\\\n\\hline\n')
}
cat('Mean')
for (a in m){
cat(' & - & ')
cat(as.numeric(format(a*100, digits = 4)))
cat('\\%')
}
cat('\\\\\n\\hline\n')
cat('\\end{tabular}\n\\end{center}\n')
}
table_times <- function(df){
cat('\\begin{center}\n\\begin{tabular}{|c|c|c|c|c|}\n\\hline\n')
cat('Period & Score & HC & Tabu 50 & Tabu 100 \\\\\n')
cat('\\hline\n\\hline\n')
for (r in rownames(df)){
if (startsWith(r,'k2')){
cat('\\multirow{2}{*}{1} & k2 ')
}else{
cat('& bds')
}
for(c in colnames(df)){
cat(' & ')
cat(df[r,c])
}
cat('\\\\\n\\hline\n')
}
cat('\\end{tabular}\n\\end{center}')
}
print_prob_as_dic <- function(b){
for (a in rownames(b)){
cat("\'")
cat(a)
cat("\'")
cat(': ')
cat(b[a,'SD'])
cat(', ')
}
}
print_edges_list <- function(b){
for (i in 1:(length(arcs(b))/2)){
a = arcs(b)[i,]
cat("['")
cat(a[1])
cat("','")
cat(a[2])
cat("'],")
}
}
# table_prob <- function(prob){
# i = 0
# for (r in row.names(prob)){
# i = i +1
# cat(paste(i, ' & ', r,' & ', as.numeric(format(prob[r,'SD'], digits = 6)), '\\\\', sep = ''))
# cat('\n\\hline\n')
# }
# }
BN <- function(Data, used_score, RR, algthm, parameter){
if (algthm == 'tabu'){
boot = boot.strength(data = Data, R = RR, algorithm = algthm, algorithm.args=list(score=used_score,
tabu = parameter[1], max.tabu = parameter[2], max.iter = parameter[3]))
} else{
boot = boot.strength(data = Data, R = RR, algorithm = algthm, algorithm.args=list(score=used_score))
}
avg_net = averaged.network(boot)
final_net = direct_net(avg_net, used_score, Data)
# print(nodes(final_net))
param.bayes = bn.fit(final_net, data = Data, method = 'bayes')
CPGSD_bayes = compute_prob(nodes(final_net), 'RUSSIA', param.bayes)
# CPGSD_inv_bayes = compute_inv_prob(nodes(final_net), 'Russian.Fedn', param.bayes)
param.mle = bn.fit(final_net, data = Data, method = 'mle')
CPGSD_mle = compute_prob(nodes(final_net), 'RUSSIA', param.mle)
# CPGSD_inv_mle = compute_inv_prob(nodes(final_net), 'Russian.Fedn', param.mle)
return(list(CPGSD_bayes, CPGSD_mle, final_net))
}
unpack <- structure(NA,class="result")
"[<-.result" <- function(x,...,value) {
args <- as.list(match.call())
args <- args[-c(1:2,length(args))]
length(value) <- length(args)
for(i in seq(along=args)) {
a <- args[[i]]
if(!missing(a)) eval.parent(substitute(a <- v,list(a=a,v=value[[i]])))
}
x
}
mont.carl.BN <- function(Data, score, bootstrap, algthm, param, iter_mont){
# Firstly we get the counterparties from the columns of the data
counterparties = colnames(Data)
num = length(counterparties)
# Then we get get the array without RUSSIA
index_RUSSIA = which(counterparties == 'RUSSIA')
count_wo_rus = counterparties[-index_RUSSIA]
tot_bayes = as.data.frame(matrix(0.0,num-1,iter_mont))
rownames(tot_bayes) = as.character(unlist(count_wo_rus))
colnames(tot_bayes) = 1:iter_mont
tot_mle = as.data.frame(matrix(0.0,num-1,iter_mont))
rownames(tot_mle) = as.character(unlist(count_wo_rus))
colnames(tot_mle) = 1:iter_mont
adjmat = as.data.frame(matrix(0, num, num))
colnames(adjmat)= as.character(unlist(counterparties))
rownames(adjmat)= as.character(unlist(counterparties))
for (i in 1:iter_mont){
print(i)
tic('BN')
# source('~/MEGA/Universitattt/Master/Thesis/Code/rbayes/epsilon_csv.R')
unpack[CPGSD_bayes, CPGSD_mle, final_net] = BN(Data = Data, used_score = used_score, RR = R, algthm = algorithm, tabu_param)
for (c in count_wo_rus){
tot_mle[c,i] = CPGSD_mle[c,]
tot_bayes[c,i] = CPGSD_bayes[c,]
}
arcs_net = arcs(final_net)
for (i in 1:nrow(arcs_net)){
# print(i)
# print(arcs_net[i,])
# print(adjmat[arcs_net[i,1],arcs_net[i,2]])
adjmat[arcs_net[i,1],arcs_net[i,2]] = adjmat[arcs_net[i,1],arcs_net[i,2]] + 1
}
toc()
}
# Mean and variance of probabilities:
# Bayes method
prob_bayes = as.data.frame(matrix(0.0,num-1,2))
rownames(prob_bayes) = as.character(unlist(count_wo_rus))
colnames(prob_bayes) = c('mean','std')
prob_bayes[,'mean'] = apply(tot_bayes,1,mean)
prob_bayes[,'std'] = apply(tot_bayes,1,sd)
prob_bayes = prob_bayes[order(-prob_bayes$mean), , drop = FALSE]
# MLE method
prob_mle = as.data.frame(matrix(0.0,num-1,2))
rownames(prob_mle) = as.character(unlist(count_wo_rus))
colnames(prob_mle) = c('mean','std')
prob_mle[,'mean'] = apply(tot_mle,1,mean)
prob_mle[,'std'] = apply(tot_mle,1,sd)
prob_mle = prob_mle[order(-prob_mle$mean), , drop = FALSE]
adjmat = adjmat/iter_mont
return(list(prob_bayes, prob_mle, adjmat, final_net))
}
|
40f00ffe871af3c15d4863a2117ada790deb7eb8
|
763668f33f32f807b20d8877b8c1037348b693f1
|
/compare.narr.wrf.r
|
feda8171ed992e8d0ca10ff03157db2851fba58b
|
[] |
no_license
|
xenos-code/RWrfUtils
|
4504b4d31e92a7f9147009eee3d7536aa663d9da
|
0b395e6be16a87c457ba8988d822dd3ea825376d
|
refs/heads/master
| 2023-03-15T13:09:38.611447
| 2016-10-19T22:56:24
| 2016-10-19T22:56:24
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,061
|
r
|
compare.narr.wrf.r
|
compare.narr.wrf <- function(varname, vardim, subset, path, pattern, res = c("3-hourly","6-hourly","daily","monthly"), calc = c("ASIS","MEAN","RMSE"), pct = FALSE, narrpath, narrfile = NULL, prlevs = c(500, 700, 850), return_tstamp = FALSE){
# Calculate the difference between NARR and WRF output on common 0.25x0.25 grids (the same as CPC)
# subset = POSIXt objects or c("%Y-%m-%d %H:%M:%d", "%Y-%m-%d %H:%M:%d")
# calc = "ASIS" - return time series
# "MEAN" - return mean(WRF-NARR)
# "SD" - return rmse
# prlevs - used only for 4D variables
# Issue: Need yet to change the interpolation for precipitation to IDW; currently using bilinear for all
require("RNetCDF")
require("akima")
source("get.wrf.tcon.r")
source("get.wrf.tvar.r")
source("get.narr.tcon.r")
source("get.narr.tvar.r")
source("condense.r")
# lookup table between WRF and NARR variables
WRFVAR = c("hgt","U","V","U10","V10","PSFC","TT","T2","RH","RH2","SH","SH2","RAINNC") # "PR","SST"
NARFILE = c("hgt.yyyymm.nc","uwnd.yyyymm.nc","vwnd.yyyymm.nc","uwnd.10m.yyyy.nc","vwnd.10m.yyyy.nc","pres.sfc.yyyy.nc","air.yyyymm.nc","air.2m.yyyy.nc","shum.yyyy.nc","rhum.2m.yyyy.nc","shum.yyyymm.nc","shum.2m.yyyy.nc","apcp.yyyy.nc") # narr file name pattern
NARVAR = c("hgt","uwnd","vwnd","uwnd","vwnd","pres","air","air","shum","rhum","shum","shum","apcp") # narr variable lat(y,x), lon(y,x), var(time, (level,) y, x), level [hPa], apcp [kg/m^2]
# read the WRF variable
if (varname == "RAINNC"){
calcC = "SUM"
} else {
calcC = "MEAN"
}
vvv = get.wrf.tvar(varname, vardim, subset, path, pattern, calc = "ASIS", prlevs = prlevs, return_tstamp = TRUE)
# convert the WRF variable to specified resolution
wrfvar = condense(vvv$var, vvv$timestamp, calc = calcC, toscale = res, return_tstamp = TRUE)
# get the WRF lat & lon to subset the NARR data to a more reasonable range covered by WRF
wrflat = get.wrf.tcon("XLAT", path, pattern)
wrflon = get.wrf.tcon("XLONG", path, pattern)
wrflatrng = c(min(wrflat,na.rm=TRUE), max(wrflat,na.rm=TRUE))
wrflonrng = c(min(wrflon,na.rm=TRUE), max(wrflon,na.rm=TRUE))
# read the NARR variable (3-hourly)
narrvarname = NARVAR[ which(WRFVAR == varname) ]
if (is.null(narrfile)){
narrfile = NARFILE[ which(WRFVAR == varname) ]
}
rrr = get.narr.tvar(narrvarname, subset, sublat = wrflatrng, sublon = wrflonrng, narrpath = narrpath, narrfile = narrfile, calc = "ASIS", prlevs = prlevs, return_tstamp = TRUE)
# convert rhum to shum because pressure-level rhum is not available
if (varname == "RH"){ # should have 4 dimensions
Rd = 287 # [J kg-1 K-1]
Rv = 462 # [J kg-1 K-1]
e0 = 6.11 # [hPa]
T0 = 273.15 # [K]
L = 2.5 * 10^6 # [J kg-1]
tt = get.narr.tvar("TT", subset, sublat = wrflatrng, sublon = wrflonrng, narrpath, narrfile, calc = "ASIS", prlevs = prlevs, return_tstamp = TRUE) + 273.15 # convert from oC back to K
es = e0 * exp( L / Rv * (1/T0 - 1/tt) )
for (i in 1:length(prlevs)){
rrr$var[,,i,] = rrr$var[,,i,] * Rv * prlevs[i] / ( rrr$var[,,i,]*Rv + Rd ) / es[,,i] * 100
}
}
# convert NARR variable to specified resolution
narrvar = condense(rrr$var, rrr$timestamp, calc = calcC, toscale = res, return_tstamp = TRUE)
# check that number of dimensions match
wrfndim = length(dim(wrfvar$var))
narrndim = length(dim(narrvar$var))
if (wrfndim != narrndim){
stop("The WRF variable and NARR variable are not the same number of dimensions!")
}
# create rectilinear grids that are approx. the same as WRF's resolution
latCONUS = seq(from = 20., to = 50., by = mean( wrflat[,2:dim(wrflat)[2]] - wrflat[,1:(dim(wrflat)[2]-1)] ))
lonCONUS = seq(from = -120, to = -55., by = mean( wrflon[2:dim(wrflat)[1],] - wrflon[1:(dim(wrflat)[1]-1),] ))
# interplate WRF variable
# - subset latCONUS, lonCONUS
lato = latCONUS[ latCONUS >= wrflatrng[1] & latCONUS <= wrflatrng[2] ]
lono = lonCONUS[ lonCONUS >= wrflonrng[1] & lonCONUS <= wrflonrng[2] ]
if (wrfndim == 3){
wrfvar_intrp = array(data=NA, dim=c( length(lono), length(lato), dim(wrfvar$var)[3] ))
for (i in 1:dim(wrfvar$var)[3]){
# missing values not allowed in interp
a = as.vector(wrflon)
b = as.vector(wrflat)
c = as.vector(as.vector(wrfvar$var[,,i]))
a = a[ !is.na(c) ]
b = b[ !is.na(c) ]
c = c[ !is.na(c) ]
wrfvar_intrp[,,i] = interp(a,b,c, xo = lono, yo = lato)$z
}
} else if (wrfndim == 4){
wrfvar_intrp = array(data=NA, dim=c( length(lono), length(lato), dim(wrfvar$var)[3], dim(wrfvar$var)[4] ))
for (i in 1:dim(wrfvar$var)[3]){
for (j in 1:dim(wrfvar$var)[4]){
# missing values not allowed in interp
a = as.vector(wrflon)
b = as.vector(wrflat)
c = as.vector(as.vector(wrfvar$var[,,i,j]))
a = a[ !is.na(c) ]
b = b[ !is.na(c) ]
c = c[ !is.na(c) ]
wrfvar_intrp[,,i,j] = interp(a,b,c, xo = lono, yo = lato)$z
}
}
}
# interpolate NARR variable to 0.25x0.25
narrlat = get.narr.tcon("lat", narrpath, "*.nc", sublat = wrflatrng, sublon = wrflonrng)
narrlon = get.narr.tcon("lon", narrpath, "*.nc", sublat = wrflatrng, sublon = wrflonrng)
if (narrndim == 3){
narrvar_intrp = array(data=NA, dim=c( length(lono), length(lato), dim(narrvar$var)[3] ))
for (i in 1:dim(narrvar$var)[3]){
# missing values not allowed in interp
a = as.vector(narrlon)
b = as.vector(narrlat)
c = as.vector(narrvar$var[,,i])
a = a[ !is.na(c) ]
b = b[ !is.na(c) ]
c = c[ !is.na(c) ]
narrvar_intrp[,,i] = interp(a, b, c, xo = lono, yo = lato)$z
}
} else if (narrndim == 4){
narrvar_intrp = array(data=NA, dim=c( length(lono), length(lato), dim(narrvar$var)[3], dim(narrvar$var)[4] ))
for (i in 1:dim(narrvar$var)[3]){
for (j in 1:dim(narrvar$var)[4]){
# missing values not allowed in interp
a = as.vector(narrlon)
b = as.vector(narrlat)
c = as.vector(narrvar$var[,,i,j])
a = a[ !is.na(c) ]
b = b[ !is.na(c) ]
c = c[ !is.na(c) ]
narrvar_intrp[,,i,j] = interp(a, b, c, xo = lono, yo = lato)$z
}
}
}
# check that the dimensions match
if (sum( dim(narrvar_intrp) - dim(wrfvar_intrp) ) != 0){
stop("Interpolated dimension mismatch?")
}
# calculate the difference
diff = wrfvar_intrp - narrvar_intrp
if (pct){
diff = diff / narrvar_intrp * 100
}
# ASIS, MEAN, RMSE, PCTASIS, PCTMEAN, PCTRMSE
if (calc == "ASIS"){
if (return_tstamp){
return( list(diff = diff, lat = lato, lon = lono, timestamp = narrvar$timestamp) )
} else {
return( list(diff = diff, lat = lato, lon = lono) )
}
} else if (calc == "MEAN"){
if (return_tstamp){
return( list(diff = apply(diff, MARGIN = c(1:(wrfndim-1)), FUN = mean, na.rm = TRUE), lat = lato, lon = lono, timestamp = narrvar$timestamp) )
} else {
return( list(diff = apply(diff, MARGIN = c(1:(wrfndim-1)), FUN = mean, na.rm = TRUE), lat = lato, lon = lono) )
}
} else if (calc == "RMSE"){
if (return_tstamp){
return( list(diff = apply(diff, MARGIN = c(1:(wrfndim-1)), FUN = sd, na.rm = TRUE), lat = lato, lon = lono, timestamp = narrvar$timestamp) )
} else {
return( list(diff = apply(diff, MARGIN = c(1:(wrfndim-1)), FUN = sd, na.rm = TRUE), lat = lato, lon = lono) )
}
}
}
|
5e8fe46a8042bb17c7954049fe4257a6c1a4c080
|
72f0d2c6059e515c0cf2a683d108aa3774ee7a72
|
/man/natVelocity.Rd
|
0f563c27c9c886cc39ff89013b738ceeba2727a3
|
[
"MIT"
] |
permissive
|
dkesada/natPSOHO
|
e9c3b0a32ba75deec3f65991263a2832a8dfe4be
|
b26b1969ad6d4503795122cc3207eed7b1a72a02
|
refs/heads/master
| 2023-08-16T22:16:01.494415
| 2021-10-06T08:56:22
| 2021-10-06T08:56:22
| 317,532,661
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 2,265
|
rd
|
natVelocity.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/velocity.R
\name{natVelocity}
\alias{natVelocity}
\title{R6 class that defines velocities in the PSO}
\arguments{
\item{ordering}{a vector with the names of the nodes in t_0}
\item{ordering_raw}{a vector with the names of the nodes without the appended "_t_0"}
\item{max_size}{maximum number of timeslices of the DBN}
\item{n}{the new number of operations that the velocity performs}
\item{probs}{the weight of each value {-1,0,1}. They define the probability that each of them will be picked}
\item{p}{the parameter of the geometric distribution}
\item{ps1}{the origin natPosition object}
\item{ps2}{the objective natPosition object}
\item{vl}{a Velocity object}
\item{k}{a real number}
}
\value{
A new 'natVelocity' object
the natVelocity that gets the ps1 to ps2
}
\description{
Constructor of the 'natVelocity' class. Only difference with the
natCauslist one is that it has a negative cl attribute.
Getter of the abs_op attribute.
return the number of operations that the velocity performs
Setter of the abs_op attribute. Intended for inside use only.
This should be a 'protected' function in Java-like OOP, but there's no
such thing in R6. This function should not be used from outside the
package.
Randomizes the Velocity's directions.
Given two positions, returns the velocity that gets the first position to the
other one.
Add both velocities directions
Multiply the Velocity by a constant real number
This function multiplies the Velocity by a constant real number.
It is non deterministic by definition. When calculating k*|V|, the
result will be floored and bounded to the set [-max_op, max_op], where max_op
is the maximum number of arcs that can be present in the network.
}
\details{
The velocities will be defined as two natural vectors where each element in
them represents the arcs from a temporal family of nodes to a receiving
node. 1-bits in the binary representation of this number represent arc
additions/deletions
}
\section{Fields}{
\describe{
\item{\code{abs_op}}{Total number of operations 1 or -1 in the velocity}
\item{\code{max_size}}{Maximum number of timeslices of the DBN}
\item{\code{cl_neg}}{Negative part of the velocity}
}}
|
bb2d512dbe9e6940c0afc2a3d3d653355ab716ca
|
523890f39f70682ee627707c1ac899667d1cea66
|
/man/summary_gp_mcmc.Rd
|
80fc7e951e23664831078b90899bc4985d90ad85
|
[
"CC0-1.0"
] |
permissive
|
davharris/nonparametric-bayes
|
1644e35f1add72e547cc542df215726358c63e5b
|
9b0473ad0e18d59311eb403453ee0aa29c8f56a8
|
refs/heads/master
| 2021-01-17T12:43:24.015511
| 2014-01-26T06:36:46
| 2014-01-26T06:36:46
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 636
|
rd
|
summary_gp_mcmc.Rd
|
\name{summary_gp_mcmc}
\alias{summary_gp_mcmc}
\title{Summary plots showing the trace and posteriors for the gp_mcmc estimates}
\usage{
summary_gp_mcmc(gp, burnin = 0, thin = 1)
}
\arguments{
\item{gp}{a fit of the gaussian process from gp_mcmc}
\item{burnin}{length of sequence to discard as a
transient}
\item{thin}{frequency of sub-sampling (make posterior
distribution smaller if necessary)}
}
\value{
two ggplot2 objects, one plotting the trace and one
plotting the posteriors in black with priors overlaid in
red.
}
\description{
Summary plots showing the trace and posteriors for the
gp_mcmc estimates
}
|
216cba7137205d1efb49d25c39a46891a4bf1ca5
|
5106017243dc74a8b2285ae1a93bec1135a0887e
|
/data-raw/mrc_ex_data.R
|
6d20060d7f810558f7a5fb818956e0016f564653
|
[] |
no_license
|
jvcasillas/ds4ling
|
1c2bc5d045c8a67a55cb4b05b6d6421da6a2d590
|
85176892ffc101137c1d6545de99756072d31576
|
refs/heads/main
| 2023-03-31T05:11:00.656207
| 2021-04-05T02:59:11
| 2021-04-05T02:59:11
| 329,095,207
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 435
|
r
|
mrc_ex_data.R
|
set.seed(20210322)
n <- 1e+04
a_h <- 35
a_s <- 50
b <- 1.25
sigma_h <- rnorm(n = n, mean = 0, sd = 3)
sigma_s <- rnorm(n = n, mean = 0, sd = 2)
age <- round(seq(5, 18, length.out = n), 1)
height <- a_h + (age * b) + sigma_h
score <- a_s + (age * b) + sigma_s
mrc_ex_data <- data.frame(
age,
height,
score,
age_c = age - mean(age),
height_c = height - mean(height)
)
usethis::use_data(mrc_ex_data, overwrite = TRUE)
|
01e48b3a6d1acb303841ff863ca4de22741c0617
|
c36842d81ca5df57da61b263dd639fb8ac9ae096
|
/src/main/R/archive/R Scripts For Experiment 1.r
|
d64fb8f87bf221466643e628b98ef0fe563811a0
|
[] |
no_license
|
jimbarritt/bugsim
|
ebbc7ee7fb10df678b6c3e6107bf90169c01dfec
|
7f9a83770fff9bac0d9e07c560cd0b604eb1c937
|
refs/heads/master
| 2016-09-06T08:32:19.941440
| 2010-03-13T10:13:50
| 2010-03-13T10:13:50
| 32,143,814
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 11,166
|
r
|
R Scripts For Experiment 1.r
|
#Scripts for processing output of the simulation
#-------------------------
# This script assumes you have just a summary file which includes each iteration changing the release boundary
# You could combine different experiments as long as they have the rigth structure.
source("~/Work/codemsc/bugsim/resource/R/Bugsim R Library.r", echo=FALSE)
experimentNumber<-1
trialId<-"TrF"
baseFilename<-"edge-exp01a"
rm(inp.df)
inp.df <- importExperiment1aSummary(baseFilename, experimentNumber, trialId)
plan.df<-importExperimentPlan(baseFilename, experimentNumber, trialId)
#Some interesting description of the experiment
#str(inp.df)
levels(inp.df$iteration)
levels(inp.df$B)
levels(inp.df$L)
levels(inp.df$A)
numReplicates<-length(inp.df$replicant)/length(levels(inp.df$iteration))
numReplicates
#t-test of means of centre ratios
attach(inp.df)
subset.df<-data.frame(iteration, replicant, B, CENTRE_RATIO, CORNER_RATIO, TOTAL_RATIO, CENTRE_EGGS, CORNER_EGGS, TOTAL_EGGS)
detach(inp.df)
subset.df
attach(subset.df)
B0.df<-subset(subset.df, subset.df$B==0)
B800.df<-subset(subset.df, subset.df$B==800)
plot(B0.df$replicant,B0.df$CENTRE_RATIO, type="l", ylim=c(0.36, 0.49), col="blue")
lines(B800.df$replicant,B800.df$CENTRE_RATIO, col="green")
plot(replicant,CENTRE_RATIO, type="l")
detach(subset.df)
attach(subset.df)
plotTwoFactors("CENTRE_RATIO", "Centre Ratio", inp.df, baseFilename, experimentNumber, trialId, numReplicates, newPlot=FALSE, plotReverse=FALSE)
plotTwoFactors("TOTAL_EGGS", "Total Eggs", inp.df, baseFilename, experimentNumber, trialId, numReplicates, newPlot=FALSE, plotReverse=FALSE)
plotTwoFactors("ESCAPED_BUTTERFLIES", "Escaped Butterflies", inp.df, baseFilename, experimentNumber, trialId, numReplicates, newPlot=FALSE, plotReverse=FALSE)
#--------------------------------------------------------------------------------------------------------------------------------------------
#Old shit....
str(inp.df$B[[1]])
stats.A.df <- getSummaryStats(inp.df, 1, itrBlock, inp.df$iteration, inp.df$B, "B", "Release Distance", factorName)
stats.B.df <- getSummaryStats(inp.df, ((itrBlock*1)+1), (itrBlock*2), inp.df$iteration, inp.df$B, "B", "Release Distance", factorName)
stats.C.df <- getSummaryStats(inp.df, ((itrBlock*2)+1), (itrBlock*3), inp.df$iteration, inp.df$B, "B", "Release Distance", factorName)
stats.D.df <- getSummaryStats(inp.df, ((itrBlock*3)+1), (itrBlock*4), inp.df$iteration, inp.df$B, "B", "Release Distance", factorName)
stats.E.df <- getSummaryStats(inp.df, ((itrBlock*4)+1), (itrBlock*5), inp.df$iteration, inp.df$B, "B", "Release Distance", factorName)
#stats.F.df <- getSummaryStats(inp.df, ((itrBlock*5)+1), (itrBlock*6), inp.df$iteration, inp.df$B, "B", "Release Distance", factorName)
#Put the list of the stats you want into the all.stats array ....
all.stats<-list(stats.A.df, stats.B.df, stats.C.df, stats.D.df, stats.E.df)
#This is just to do the first four
#all.stats<-list(stats.A.df, stats.B.df, stats.C.df, stats.D.df)
#all.stats<-list(stats.B.df)
#Writes out CSV files for all the stats so we dont have to process them again each time.
writeStats(all.stats, baseFilename, experimentNumber, trialId)
# This can be used to read them back in later.
all.stats<-readStats(baseFilename, experimentNumber, trialId)
#Old stuff.....
directory <- createDirectoryName(baseFilename, experimentNumber, trialId)
filename<-sprintf("%s/TRIAL %s - N-1000 - reps-%d - %s.pdf", directory, trialId, numReplicates, factorTitle)
print("Writing Graph to '%s'")
pdf(filename, width=8.75, height=8.25)
lineCols<-rainbow(length(levels(inp.df$B)))
iB<-1
eggStats.df$TOTAL_EGGS<-0
eggStats.df<-data.frame("L"=as.numeric(levels(inp.df$L)))
eggStats.df$TOTAL_EGGS[[1]]<-stats.A.df$MEAN[[iB]]
eggStats.df$TOTAL_EGGS[[2]]<-stats.B.df$MEAN[[iB]]
eggStats.df$TOTAL_EGGS[[3]]<-stats.C.df$MEAN[[iB]]
eggStats.df$TOTAL_EGGS[[4]]<-stats.D.df$MEAN[[iB]]
eggStats.df$TOTAL_EGGS[[5]]<-stats.E.df$MEAN[[iB]]
plot(eggStats.df, type="l", lty=2, ylim=c(0, 250),xlim=c(1, 85) ,col=lineCols[iB],ylab="Total Eggs")
title("Total Eggs Vs L And B, A=20, S=10, N=1000, REPS=100")
points(eggStats.df$L, eggStats.df$TOTAL_EGGS, pty=2, col=lineCols[iB])
for (iB in 2:5) {
eggStats.df$TOTAL_EGGS<-0
eggStats.df<-data.frame("L"=as.numeric(levels(inp.df$L)))
eggStats.df$TOTAL_EGGS[[1]]<-stats.A.df$MEAN[[iB]]
eggStats.df$TOTAL_EGGS[[2]]<-stats.B.df$MEAN[[iB]]
eggStats.df$TOTAL_EGGS[[3]]<-stats.C.df$MEAN[[iB]]
eggStats.df$TOTAL_EGGS[[4]]<-stats.D.df$MEAN[[iB]]
eggStats.df$TOTAL_EGGS[[5]]<-stats.E.df$MEAN[[iB]]
lines(eggStats.df$L, eggStats.df$TOTAL_EGGS, lty=2, col=lineCols[iB])
points(eggStats.df$L, eggStats.df$TOTAL_EGGS, pty=2, col=lineCols[iB])
}
legends2<-levels(inp.df$L)
legends2<-createFactorLegend("B=%s", inp.df$B)
legend("topleft",horiz=TRUE, legend=legends2, fill=lineCols, inset=0.05)
dev.off()
#===============================================================================================================================================================
#Experimental
help(errbar)
#From here is all old stuff with variations on the theme.
experimentId<-16
directory<-sprintf("edge-effect-exp01a-%03d", experimentId)
summaryFilename<-sprintf("%s/summary-edge-effect-exp01a-%03d-001.csv", directory, experimentId)
expSummary.df <- importIterationSummary(summaryFilename)
expSummary.df
is.factor(expSummary.df$iteration)
length(expSummary.df$iteration)
#Experiment 1a : plotting effect of release boundary distance against centre effect...
#quartz(width=11.75, height=8.25)
pdf("experiment 1a summary - L=5 S=10.pdf" , width=11.75, height=8.25)
par(mfrow=c(1, 2))
maxIt <- length(levels(expSummary.df$iteration))
plot2FactorsWithStdErr(expSummary.df, 1, maxIt, expSummary.df$iteration, expSummary.df$B, "B", "Release Distance", "CENTRE_RATIO")
plot2FactorsWithStdErr(expSummary.df, 1, maxIt, expSummary.df$iteration, expSummary.df$B, "B", "Release Distance", "timesteps")
dev.off()
#DRAW BOTH TOGETHER:
baseFilename<-"summary-edge-effect-exp01a-015"
filenameA<-sprintf("%s-001.csv", baseFilename)
filenameB<-sprintf("%s-002.csv", baseFilename)
filenameA<-summaryFilename
inp.A.df <- importIterationSummary(filenameA)
inp.B.df <- importIterationSummary(filenameB)
str(inp.A.df)
levels(inp.A.df$iteration)
levels(inp.A.df$B)
stats.L01.A20.df <- getSummaryStats(inp.A.df, 1, 5, inp.A.df$iteration, inp.A.df$B, "B", "Release Distance", "CENTRE_RATIO")
stats.L05.A20.df <- getSummaryStats(inp.A.df, 6, 10, inp.A.df$iteration, inp.A.df$B, "B", "Release Distance", "CENTRE_RATIO")
stats.L10.A20.df <- getSummaryStats(inp.A.df, 11, 15, inp.A.df$iteration, inp.A.df$B, "B", "Release Distance", "CENTRE_RATIO")
stats.L20.A20.df <- getSummaryStats(inp.A.df, 16, 20, inp.A.df$iteration, inp.A.df$B, "B", "Release Distance", "CENTRE_RATIO")
stats.L40.A20.df <- getSummaryStats(inp.A.df, 21, 25, inp.A.df$iteration, inp.A.df$B, "B", "Release Distance", "CENTRE_RATIO")
stats.L80.A20.df <- getSummaryStats(inp.A.df, 26, 30, inp.A.df$iteration, inp.A.df$B, "B", "Release Distance", "CENTRE_RATIO")
all.stats<-list(stats.L01.A20.df, stats.L05.A20.df, stats.L10.A20.df, stats.L20.A20.df, stats.L40.A20.df, stats.L80.A20.df)
legends<-c("L=1", "L=5","L=10","L=20","L=40","L=80")
all.stats<-list(stats.L20.A20.df, stats.L40.A20.df, stats.L80.A20.df)
legends<-c("L=20","L=40","L=80")
stats.L01.A20.df <- getSummaryStats(inp.A.df, 1, 2, inp.A.df$iteration, inp.A.df$B, "B", "Release Distance", "CENTRE_RATIO")
stats.L05.A20.df <- getSummaryStats(inp.A.df, 3, 4, inp.A.df$iteration, inp.A.df$B, "B", "Release Distance", "CENTRE_RATIO")
stats.L80.A20.df <- getSummaryStats(inp.A.df, 5, 6, inp.A.df$iteration, inp.A.df$B, "B", "Release Distance", "CENTRE_RATIO")
all.stats<-list(stats.L01.A20.df, stats.L05.A20.df, stats.L80.A20.df)
legends<-c("L=1", "L=5","L=80")
all.stats<-list(stats.L01.A20.df)
legends<-c("L=1")
df.list<-all.stats
title<-"Release boundary vs L (A=20, S=10)"
xaxisLabel<-"B - Release Distance"
yaxisLabel<-"Centre Ratio"
pdf("Varying L and B S=10 A=20.pdf" , width=11.75, height=8.25)
#levels(expSummary.df$L)
#levels(expSummary.df$iteration)
stats.A1.df <- getSummaryStats(inp.A.df, 1, 5, inp.A.df$iteration, inp.A.df$B, "B", "Release Distance", "L")
stats.B.df <- getSummaryStats(inp.B.df, 10, 18, inp.B.df$iteration, inp.B.df$B, "B", "Release Distance", "CENTRE_RATIO")
stats.B1.df <- getSummaryStats(inp.B.df, 10, 18, inp.B.df$iteration, inp.B.df$B, "B", "Release Distance", "timesteps")
plot(stats.A.df$X, stats.A.df$MEAN)
quartz(width=11.75, height=8.25)
outputFilename <- "experiment 1a summary - L=1 S=10 vs L=60 S=10 (A=20)"
pdf(sprintf("%s.pdf", outputFilename) , width=11.75, height=8.25)
#png(file=sprintf("%s.png", outputFilename))
par(mfrow=c(1, 2))
plot2Results1A(stats.A.df, stats.B.df, "Release Distance", "Centre Ratio", "L=1, S=10, A=20", "L=60, S=10, A=20")
plot2Results1A(stats.A1.df, stats.B1.df, "Release Distance", "Timesteps", "L=1, S=10, A=20", "L=60, S=10, A=20")
dev.off()
# Plot summary of border distance effects
quartz(width=10, height=7)
par(mfrow=c(1, 2))
plot2Factors(expSummary.df, "Border Distance", "Timesteps for 100 eggs", expSummary.df$B, expSummary.df$timesteps)
plot2Factors(expSummary.df, "Border Distance", "Centre Ratio", expSummary.df$B, expSummary.df$CENTRE_RATIO)
# Plot Interedge separation for varying values of L + A
outputGraphs(expSummary.df, "S", "L", "A", expSummary.df$S, expSummary.df$L, expSummary.df$A)
# Plot MoveLength vs Angle of turn and S
outputGraphs(expSummary.df, "L", "S","A", expSummary.df$L, expSummary.df$S, expSummary.df$A)
#This is the most useful view .... Plotting angle of turn on each graph with Step length accross the top and separation down.
outputGraphs(expSummary.df, "A", "S","L", expSummary.df$A, expSummary.df$S, expSummary.df$L)
radiusSubset.df <- subset(expSummary.df, expSummary.df$R=="5")
outputGraphs(radiusSubset.df, "A", "S","L", radiusSubset.df$A, radiusSubset.df$S, radiusSubset.df$L, makePDF=TRUE, pdfFilename="Summary Graphs-exp01-009-R5.pdf")
radiusSubset.df <- subset(expSummary.df, expSummary.df$R=="10")
outputGraphs(radiusSubset.df, "A", "S","L", radiusSubset.df$A, radiusSubset.df$S, radiusSubset.df$L, makePDF=TRUE, pdfFilename="Summary Graphs-exp01-009-R10.pdf")
#Calculations for proportional Border where the last S doesnt work.
IS <- c(0, 1, 2, 3)
IS
S.subset <- factor(IS, levels=sort(unique.default(IS)), labels=c("0", "5", "10", "20"))
S.subset
radiusSubset.df <- subset(expSummary.df, expSummary.df$R=="5")
outputGraphs(radiusSubset.df, "A", "S","L", radiusSubset.df$A, S.subset, radiusSubset.df$L, makePDF=TRUE, pdfFilename="Summary Graphs-exp01-011-PROP-R5.pdf")
radiusSubset.df <- subset(expSummary.df, expSummary.df$R=="10")
outputGraphs(radiusSubset.df, "A", "S","L", radiusSubset.df$A, S.subset, radiusSubset.df$L, makePDF=TRUE, pdfFilename="Summary Graphs-exp01-011-PROP-R10.pdf")
#Plot Radius vs A and L for S=10
T1.df <- subset(expSummary.df, expSummary.df$S=="20")
outputGraphs(T1.df, "A", "R","L", T1.df$A, T1.df$R, T1.df$L)
zeroS.df <- subset(expSummary.df, expSummary.df$S=="0")
zeroS.df
str(expSummary$R)
radiusSubset.df
|
5aa833f114d6df19f849a00949314aafe5b1701f
|
4128114972680b1af5687fedeca78ab8b82d4c91
|
/Lectures/ClassNotes_20181105.R
|
7558dc562e1e435cc0c4bfa916e7348abb2ac568
|
[] |
no_license
|
invertdna/SMEA_Rcourse_2019
|
80cc7ff47e9c1e98a342d6d7562a065012129be3
|
a4525bf452150eed1f5c1a82f881a0c48b09a0b4
|
refs/heads/master
| 2021-12-15T01:17:00.107941
| 2021-12-06T17:34:46
| 2021-12-06T17:34:46
| 207,862,551
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 393
|
r
|
ClassNotes_20181105.R
|
#class notes 20181205
jpeg("myplot.jpeg", width = 10, height = 3) #tell R you want to make a pdf, and (optionally) tell it what size you want the canvas to be
plot(mpg~hp, data = mtcars) # Then make the plot
dev.off() #then tell R you are done writing the plot
library(here)
here()
setwd(here("Analysis"))
setwd("/Users/rpk")
getwd()
here()
|
2ea1042c3c8a127a10de44552db848f00bae42aa
|
9dc99f6a37c52dd23f88f0d8f450abda50665db1
|
/RScripts_and_Data/CVheartDiseaseGBMvsRF.R
|
cf7665762837c5b57f5d447a650955fa013f0cec
|
[] |
no_license
|
AxilleasMoukoulis/Predictive-Analytics-Techniques-for-the-administration-of-patients-with-chronic-heart-rate-diseases
|
d5c8a7f0a64bc6c543b4dc2ca21992ff4ec7c523
|
c42b3479f66d6981d6279762107822f46e09682f
|
refs/heads/master
| 2020-11-29T23:33:31.355979
| 2017-08-02T11:25:46
| 2017-08-02T11:25:46
| 96,367,297
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,945
|
r
|
CVheartDiseaseGBMvsRF.R
|
#rm(list = ls())
require(gbm)
require(dplyr)
require(caret)
require(verification)
require(randomForest)
#Read data from csv file
df <- read.csv("cleveland.csv", sep = ",", na.strings = "?")
s <- sum(is.na(df))
df <- na.omit(df)
dim(df)
#Transform to Binomial attribute
df$num[df$num >0] <- 1
############# Load and transform data #################
df$cp <- factor(df$cp)
df$sex <- factor(df$sex)
df$thal <- factor(df$thal)
levels(df$sex) <- c("female", "male", "")
levels(df$cp) <- c("typical angina", "atypical angina", "non-anginal pain", "asymptomatic")
levels(df$thal) <- c("normal", "fixed defected", "reversable defect")
#Split data to training set (70%) and test set (30%)
set.seed(10)
inTrainRows <- createDataPartition(df$num, p = 0.7, list = FALSE)
train <- df[inTrainRows, ]
test <- df[-inTrainRows, ]
nrow(train) / (nrow(test) + nrow(train))
head(train)
summary(train)
#################### partition the data #####################
#there's a function in plyr that will do this, but it's easy to do your own
#for k-fold CV, you create k different partitions in the data
#my data are already in a random order
k = 10
n = floor(nrow(train)/k) #n is the size of each fold
#I rounded down to avoid going out of bounds on the last fold
err.vect = rep(NA,k) #store the error in this vector
#how to partition the first fold
i = 1
s1 = ((i - 1) * n+1) #the start of the subset
s2 = (i * n) #the end of the subset
subset = s1:s2 #the range of the subset
#because of rounding, the end of the subset may be slighly out of range
cv.train = train[-subset,] #train the model using this data
cv.test = train[subset,] #test the model's performance on this data
#to do "standard" CV, we could just run the model on the cv.train data
#and test it on the cv.test data
#k-fold CV allows us to use all of the data for the final model
#but still have realistic model performance estimates
#next, move to the second fold:
i = 2
#...
##############################################################
########################### CV for random forest ############################
#need to loop over each of the folds
for(i in 1:k){
s1 = ((i - 1) * n+1) #the start of the subset
s2 = (i * n) #the end of the subset
subset = s1:s2 #the range of the subset
cv.train = train[-subset,] #train the model using this data
cv.test = train[subset,] #test the model's performance on this data
#run the random forest on the train set
fit = randomForest(x = cv.train[,-14], y = as.factor(cv.train[,14]))
#make predictions on the test set
prediction = predict(fit, newdata = cv.test[,-14], type = "prob")[,2]
#calculate the model's accuracy for the ith fold
err.vect[i] = roc.area(cv.test[,14], prediction)$A
print(paste("AUC for fold", i, ":", err.vect[i]))
}
print(paste("Average AUC:", mean(err.vect)))
#each fold has a different error rate,
#and that's why we do k-fold CV!
##############################################################################
########################### CV for gbm ############################
ntrees = 5000 #the default is only 100
for(i in 1:k){
s1 = ((i - 1) * n+1) #the start of the subset
s2 = (i * n) #the end of the subset
subset = s1:s2 #the range of the subset
cv.train = train[-subset,]
cv.test = train[subset,] #test the model's performance on this data
#estimate the gbm on the cv.train set
fit = gbm.fit(x = cv.train[,-14], y = cv.train[,14],
n.trees = ntrees, verbose = FALSE, shrinkage = 0.01,
interaction.depth = 6, n.minobsinnode = 10, distribution = "bernoulli")
#use bernoulli or adaboost for classification problems
#make predictions on the test set
prediction = predict(fit, newdata = cv.test[,-14], n.trees = ntrees)
err.vect[i] = roc.area(cv.test[,14], prediction)$A
print(paste("AUC for fold", i, ":", err.vect[i]))
}
print(paste("Average AUC:", mean(err.vect)))
|
24cf10b95c37e7593b28cbeeabf077c77dcce010
|
6c9134f160944a72d8d60bb4ef9be336a972ac26
|
/setup.R
|
04b52e082061211c42142253e6b7a47990f1324a
|
[] |
no_license
|
clemente-lab/mmeds-meta
|
123354955f439aeb34b23f3ed1c36c397b9bd252
|
6425c70d40cb82700840c16e9a974151333c0d70
|
refs/heads/master
| 2023-08-07T13:46:56.564982
| 2023-01-11T14:01:32
| 2023-01-11T14:01:32
| 111,951,207
| 3
| 1
| null | 2023-07-20T13:17:39
| 2017-11-24T19:50:13
|
Python
|
UTF-8
|
R
| false
| false
| 120
|
r
|
setup.R
|
#!/usr/bin/env R
install.packages(c('GGally', 'ggplot2', 'RColorBrewer', 'ggrepel'), repos="http://cran.r-project.org")
|
63a1f6508cc51b433d78a0ed48cbb7916af8f422
|
9c4a346af265a744f11738fd1e1e22757ac5d7c5
|
/plot1.R
|
96a784de911aa397a0296a63868a24e177edcf58
|
[] |
no_license
|
CrysBlank/ExData_Plotting1
|
bd11cf1f934bacb2a5cf7538d4df363c86aa61bc
|
ac0ba9710d3611ad09eab60a62285555ed3f7e4c
|
refs/heads/master
| 2021-01-18T01:48:34.394812
| 2015-06-07T17:11:03
| 2015-06-07T17:11:03
| 37,017,770
| 0
| 0
| null | 2015-06-07T13:35:07
| 2015-06-07T13:35:07
| null |
UTF-8
|
R
| false
| false
| 608
|
r
|
plot1.R
|
PowerConsump <- read.table("household_power_consumption.txt",sep=";",header=TRUE)
PowerConsump["Date"] <- as.Date(PowerConsump$Date,"%d/%m/%Y")
#PowerConsump["Time"] <- strptime(PowerConsump$Time,"%H:%M:%S")
dataToUse <- PowerConsump[(PowerConsump$Date >= "2007-02-01" & PowerConsump$Date <= "2007-02-02"), ]
GlobalAcPowerDS <- dataToUse[dataToUse$Global_active_power != "?",]
GlobalAcPowerDS$Global_active_power <- as.numeric(GlobalAcPowerDS$Global_active_power)
png("plot1.png")
hist(GlobalAcPowerDS$Global_active_power*.001,col="darkblue",main="Global Active Power",xlab="Global Active Power(kilowatts)")
|
8bdb972b7844cf673a6d764f57ee27b2e47aef92
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/lakemorpho/examples/lakeShorelineDevelopment.Rd.R
|
c3aef7249d2f9cbcfe3939c99e929112880b4d99
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 217
|
r
|
lakeShorelineDevelopment.Rd.R
|
library(lakemorpho)
### Name: lakeShorelineDevelopment
### Title: Function to calculate shoreline development
### Aliases: lakeShorelineDevelopment
### ** Examples
data(lakes)
lakeShorelineDevelopment(inputLM)
|
48eb62cb5a6a1944dbabddc172278ab1b7d2b9dc
|
b0610f09e3a9dff68d0f547b7ae50469e4672f41
|
/man/tv_summaries.Rd
|
280e31b71d90c658a6c67473b08e235628af23e8
|
[] |
no_license
|
cenux413/gdeltr2
|
2c927997bef56aab3475884cdc958234f3f2eca9
|
c13b2fa56308cd7e2b0e9d733ef0caafaa539813
|
refs/heads/master
| 2020-09-10T00:11:30.308363
| 2019-11-11T18:07:21
| 2019-11-11T18:07:21
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 669
|
rd
|
tv_summaries.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tv_api.R
\name{tv_summaries}
\alias{tv_summaries}
\title{GDELT TV Show Summary}
\usage{
tv_summaries(date_start = "2018-02-02", date_end = Sys.Date(),
return_message = T)
}
\arguments{
\item{date_start}{Start date of data, cannot be before June 16th, 2009}
\item{date_end}{End date of data, cannot exceed current date}
\item{return_message}{if \code{TRUE} returns a messag}
}
\value{
a \code{tibble}
}
\description{
Acquires daily summary data for the television shows
that GDELT monitors
}
\examples{
tv_summaries(date_start = "2018-02-02", date_end = Sys.Date(), return_message = T)
}
|
6060d724d46ee6836ecd52e3200f42f0b882a5e9
|
0a3805db86a688d351c00801029f0ed2c6010cbf
|
/homeworks/homework_04/src/DAAG06_08.R
|
be3c2a712cad99869245483e1db4b4ddc5ca7b1c
|
[
"MIT"
] |
permissive
|
wilsonjefferson/DSSC_SMDS
|
9b2c9d94f86ee28c11ec561617dda07bd494f905
|
580f8e9f5620197a321040737bdb5b9236963145
|
refs/heads/master
| 2022-12-27T11:08:28.718305
| 2020-09-30T10:28:39
| 2020-09-30T10:28:39
| 297,358,392
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 65
|
r
|
DAAG06_08.R
|
/home/emaballarin/DSSC/statmeth/HOMEWORK/rexthor/src/DAAG_06_08.R
|
cea29335c937cfe1bd43a0b85fa2cb485da0764b
|
8c1333fb9fbaac299285dfdad34236ffdac6f839
|
/foundations-of-inference/inference.R
|
c5793539446428f4929930b870e06ffb5ac633d1
|
[
"MIT"
] |
permissive
|
cassiopagnoncelli/datacamp-courses
|
86b4c2a6d19918fc7c6bbf12c51966ad6aa40b07
|
d05b74a1e42b119efbbf74da3dfcf71569c8ec85
|
refs/heads/master
| 2021-07-15T03:24:50.629181
| 2020-06-07T04:44:58
| 2020-06-07T04:44:58
| 138,947,757
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 11,163
|
r
|
inference.R
|
# PARLANCE.
#
# Null hypothesis (H_0): The claim that is not interesting.
#
# Alternative hypothesis (H_a): The claim corresponding to the research
# hypothesis.
#
# Example 1.
#
# Research: Compare the speed of two different species of cheetah.
# (Population measure.)
#
# H_0: Asian and African cheetah run at the same speed. (Not interesting)
# H_a: African > Asian, on average. (Research)
# Example 2.
#
# Research: Candidate X will win.
# (Population measure.)
#
# H_0: Candidate X will get half of the votes.
# H_a: Candidate X gets more than half of the votes.
# Exploratory inference.
library('dplyr')
library('ggplot2')
library('NHANES')
library('infer')
library('oilabs')
data('NHANES')
names(NHANES)
ggplot(NHANES, aes(x = Gender, fill = HomeOwn)) +
geom_bar(position = "fill") +
ylab("Relative frequencies")
ggplot(NHANES, aes(x = SleepHrsNight, col = SleepTrouble)) +
geom_density(adjust = 2) +
facet_wrap(~ HealthGen)
# Study 1.
# Home ownership example.
homes <- NHANES %>%
select(Gender, HomeOwn) %>%
filter(HomeOwn %in% c("Own", "Rent"))
homes %>%
mutate(HomeOwn_perm = sample(HomeOwn)) %>%
group_by(Gender) %>%
summarize(prop_own_perm = mean(HomeOwn_perm == "Own"),
prop_own = mean(HomeOwn == "Own")) %>%
summarize(diff_perm = diff(prop_own_perm),
diff_orig = diff(prop_own))
homeown_perm <- homes %>%
rep_sample_n(size = nrow(homes), reps = 10) %>%
mutate(HomeOwn_perm = sample(HomeOwn)) %>%
group_by(replicate, Gender) %>%
summarize(prop_own_perm = mean(HomeOwn_perm == "Own"),
prop_own = mean(HomeOwn == "Own")) %>%
summarize(diff_perm = diff(prop_own_perm),
diff_orig = diff(prop_own)) # male - female
homeown_perm
ggplot(homeown_perm, aes(x = diff_perm)) +
geom_dotplot(binwidth = .001)
homeown_perm <- homes %>%
rep_sample_n(size = nrow(homes), reps = 100) %>%
mutate(HomeOwn_perm = sample(HomeOwn)) %>%
group_by(replicate, Gender) %>%
summarize(prop_own_perm = mean(HomeOwn_perm == "Own"),
prop_own = mean(HomeOwn == "Own")) %>%
summarize(diff_perm = diff(prop_own_perm),
diff_orig = diff(prop_own)) # male - female
ggplot(homeown_perm, aes(x = diff_perm)) +
geom_dotplot(binwidth = 1e-3)
homeown_perm <- homes %>%
rep_sample_n(size = nrow(homes), reps = 1000) %>%
mutate(HomeOwn_perm = sample(HomeOwn)) %>%
group_by(replicate, Gender) %>%
summarize(prop_own_perm = mean(HomeOwn_perm == "Own"),
prop_own = mean(HomeOwn == "Own")) %>%
summarize(diff_perm = diff(prop_own_perm),
diff_orig = diff(prop_own)) # male - female
ggplot(homeown_perm, aes(x = diff_perm)) +
geom_density()
# Here, 228 out of 1000 differences are more extreme than the
# observed differences.
#
# This represents only 23% of the null statistics.
#
# In other words, 228 permutations are smaller than the original
#
# Thus, the observed difference is consistent with the permuted
# difference.
#
# So we failed to reject the null hypothesis,
#
# H_0: proportions are equal.
# H_a: proportions are not equal.
#
# That means proportions are likely to be equal.
#
# That means if gender played no role in home ownership we'd
# likely get data similar to those observed. However this does
# not mean that we know for sure gender does not play a role,
# it is possible the true difference in home ownership rates is
# 0.1 and surely our population would be consistent with that
# population as well.
#
# We fail to reject the null hypothesis: there is no evidence
# that our data is inconsistent with the null hypothesis.
#
# There is no claim to generalize in larger population,
# there is nothing to report.
#
ggplot(homeown_perm, aes(x = diff_perm)) +
geom_density() +
geom_vline(aes(xintercept = diff_orig),
col = "red")
homeown_perm %>%
summarize(sum(diff_orig >= diff_perm))
# Why 0.05?
#
# "It is a common practice to judge a result significant if
# it is of such a magnitude that it would have been produced
# by chance not more frequently than once in twenty trials.
#
# "This is an arbitrary, but convenient, level of significance
# for the practical investigator, but it doesn't mean he allows
# himself to be deceived once in every twenty experiments.
#
# "The test of significance only tells him what to ignore,
# namely all experiments in which significant results are not
# obtained.
#
# "He should only claim that a phenomenon is experimentally
# demonstrable when he knows how to design an experiment so
# so that it will rarely fail to give a significant result.
#
# "Consequently, isolated significant results which he doesn't
# know how to reproduce are left in suspense pending further
# investigation."
#
# R.A. Fischer, 1929.
#
# p-value.
#
# Probability of observing data as or more extreme than what
# we actually got given that the null hypothesis is true.
#
# In other words, suppose null hypothesis is true. Then p-value
# is the likelihood that we find data as or more extreme than
# that observed.
#
# It is the proportion of times the observed difference is less
# than or equal to the permuted difference.
#
# Study 2.
# Gender discrimination on promotion.
#
# H_0: gender and promotion are unrelated variables.
# H_a: men are more likely to be promoted.
#
# Using rep_sample_n(), you took 5 repeated samples
# (i.e. Replications) of the disc data, then shuffled
# these using sample() to break any links between gender
# and getting promoted. Then for each replication, you
# calculated the proportions of promoted males and females
# in the dataset along with the difference in proportions.
#
# Probability of observing a difference of 0.2917 or greater
# in promotion rates do not vary across gender is 0.03 = p-value.
#
# Because 0.03 < 0.05 we REJECT H_0 in favour of H_a.
#
# Hypothesis testing errors: opportunity cost.
#
# Two control groups of students are each given a set of
# options about buying something.
#
# Group 1: (1) buy, or (2) not buy.
# Group 2: (1) buy, or (2) not buy, save $20 for next purchase.
#
# Then we get the conversion rates (prop_buy) for each group. We
# want to know whether there is a difference in conversion rates
# by using messaging in Group 2, or messaging in Group 1 is better.
#
# Here we have hypothesis
#
# H_0: Difference in conversion rates is zero.
# H_a: Difference in conversion rates is not zero.
#
# In other words,
#
# H_0: Reminding students that they can save money for later
# purchases will not have any impact on students' spending
# decisions.
# H_a: Reminding students that they can save money for later
# purchases will change the chance they will continue with
# a purchase.
#
opportunity = read.csv("foundations-of-inference/opportunity.csv", header=T)
opp_perm <- opportunity %>%
rep_sample_n(size = nrow(opportunity), reps = 1000) %>%
mutate(dec_perm = sample(decision)) %>%
group_by(replicate, group) %>%
summarize(prop_buy_perm = mean(dec_perm == "buyDVD"),
prop_buy = mean(decision == "buyDVD")) %>%
summarize(diff_perm = diff(prop_buy_perm),
diff_orig = diff(prop_buy)) # treatment - control
opp_perm
ggplot(opp_perm, aes(x = diff_perm)) +
geom_histogram(binwidth = .005) +
geom_vline(aes(xintercept = diff_orig), col = "red")
# p-value, or the proportion of permuted differences less than or
# equal to the observed difference.
#
# Conclusion is we can confidently say the different messaging
# caused the students to change their buying habits, since they
# were randomly assigned to treatment and control groups.
#
opp_perm %>% summarize(onesided.pvalue = mean(diff_perm <= diff_orig))
opp_perm %>% summarize(twosided.pvalue = 2 * mean(diff_perm <= diff_orig))
# ERRORS IN HYPOTHESIS TESTING.
#
# +-----------------------+-------------------+
# | Don't reject H_0 | Reject H_0 |
# +---------------|-----------------------+-------------------+
# | H_0 is true | | type I error |
# | | | |
# | H_a is true | type II error | |
# +---------------+-----------------------+-------------------+
#
# type II error: false negative
# type I error: false positive
#
# Here, if you always claim there is a difference in proportions,
# you'll always reject the null hypothesis, so you'll only make
# type I errors, if any.
# In other words, always rejecting H_0 will cease type II errors.
#
# Moreover,
#
# Type I: There is a difference in proportions, and the observed
# difference is big enough to indicate that the proportions are
# different.
#
# Type II: There is not a difference in proportions, and the
# observed difference is not large enough to indicate that the
# proportions are different.
#
# PARAMETERS AND CONFIDENCE INTERVALS.
#
# Research questions examples.
#
# Hypothesis test (comparative):
# - under which diet plan will participants lose more weight on avg?
# - which of two car manufacturers more likely to recommend to friends?
# - are education level and avg income linearly related?
#
# Confidence interval (estimation):
# - how much should participants expect to lose on avg?
# - what % of users are likely to recommend Subaru to friends?
# - for each additional year of education what is predicted income?
#
#
# Confidence interval.
# "We are 95% sure that between 12% and 34% of US population
# recommends Subaru."
#
# The population parameter has to do with *all* the population.
#
# BOOTSTRAPPING: resampling with replacement.
#
# Variability of p-hat from the population.
#
# Typically the researcher has only one sample from the population.
# Turns out bootstrapping several re-samples (with repetition,
# of course) from the sample yields awesome results.
# Actually, we know the size of the sample we need to have to give
# a certain standard deviation.
#
# Goal here is to find the parameter when all we have is the statistic,
# never knowing whether the sample really contains the true parameter.
#
# Technical conditions apply:
# - Sampling distribution of the statistic is reasonably symmetric and
# bell-shaped.
# - Sample size is reasonably large.
#
samp = data.frame(a = rbinom(50, size = 100, prob = 0.3))
head(samp)
samp_boot = samp %>% rep_sample_n(10, replace = T, reps = 5000)
head(samp_boot)
props = samp_boot %>% summarize(prop_yes = mean(a)) # p-hat.
head(props)
props %>% summarize(sd(prop_yes)) # sd of p-hat.
# Distribution of p-hat.
#
# Approximately 95% of samples will produce p-hats within ±2 sd,
# in other words 95% confident that the true parameter is within
# the calculated confidence interval.
#
ggplot() +
geom_density(
data = props,
aes(x = prop_yes),
col = "blue",
bw = .1
)
props %>%
mutate(lower = 30 - 2 * sd(prop_yes),
upper = 30 + 2 * sd(prop_yes),
in_CI = prop_yes > lower & prop_yes < upper) %>%
summarize(mean(in_CI))
props %>%
summarize(
q025 = quantile(prop_yes, 0.025),
q925 = quantile(prop_yes, 0.975)
)
|
3bb5e7290f50ae312151fe58662e41f0d7684823
|
921a06923a4add7202f07c2dcf4dd3ac87215fb2
|
/NextText App Files/ui.R
|
71e391f59b77094c963c9fc9935cd4a042d65461
|
[] |
no_license
|
ermckinnon/CapstoneProject
|
8c8c149002b2d1872e02348b667d95f778fb910c
|
326abf7443f815ae0405c9e8994451089a00124d
|
refs/heads/master
| 2020-12-04T05:11:21.602946
| 2016-09-30T22:33:58
| 2016-09-30T22:33:58
| 67,367,589
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,467
|
r
|
ui.R
|
# User interface code for Next-texT
library(shiny)
shinyUI(fluidPage(
h1("Next-texT - The Next Word Predictor",style = "color:blue"),
#img(src = "logo.png", height = 72, width = 72),
p("Welcome to Next-texT the application which predicts the next word you are going to type in a sentence."),
p("Type your words into the text box below and click submit.
Next-texT then predicts the top five words with the highest probability of occuring after
your text. The highest probability words are often 'stop-words' like 'the', 'to' and 'I'.
A novel aspect of Next-texT is that it also predicts the next five highest probability words which
are not 'stop-words'. So ten predictions are provided in total."),
textInput("text", "Please enter your text below and press submit to get your prediction:", value = "Predicting text with Next-texT is so much",width = '100%'),
submitButton("Submit"),
br(),
p("Top 5 predictions:"),
verbatimTextOutput("text1"),
p("Next 5 'non-stop word' predictions:"),
verbatimTextOutput("text2"),
br(),
h3("About Next-texT",style = "color:blue"),
p("Next-texT in based on an n-gram model comprising 1,2,3 and 4 ngrams derived from a publically available corpus. For an understanding
of Ngram models there are excellent lecture notes available from the University of Illinois at this link:"),
a("http://l2r.cs.uiuc.edu/~danr/Teaching/CS546-09/Lectures/Lec5-Stat-09-ext.pdf"),
br(),
p("Next-texT uses a 'back off' algorithm to handle words you type in that it does not have stored probabilities for.
In tests on 200,000 test sentences the model acheived an average prediction accuracy rate of 29%,
taking on average less then half a second computation time per prediction. The application is written in the R
programming language and all of the code to reproduce this application, as well as a short presentation
on how it works, is available at:"),
a("https://github.com/ermckinnon/CapstoneProject"),
h3("Sources/ Attributions",style = "color:blue"),
p("Next-texT has been trained and tested on a corpus called HC Corpora (www.corpora.heliohost.org) which is available
at the following link:"),
a("http://www.corpora.heliohost.org/aboutcorpus.html"),
p("In addition Next-texT has been enhanced with word occurance probabilities derived from an additional publically available Corpus
at the following link:"),
a("http://corpus.byu.edu/full-text/")
))
|
e2dad562ba41bda40c990df7d1f8f0f519492c18
|
decc750984cfbf1e69405f1416264a9d6a35faa9
|
/inst/dev/objetivos.R
|
265e713cd8c550add9879566a320feed48537a34
|
[] |
no_license
|
randommonkey/ciudatos
|
3d7673b580937a235b5d590221705339b8d4722a
|
a4eae52dcd511c2bc09b4163df9aadb152827e77
|
refs/heads/master
| 2020-09-24T09:41:24.156568
| 2016-11-03T23:51:45
| 2016-11-03T23:51:45
| 66,106,490
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,921
|
r
|
objetivos.R
|
library(devtools)
load_all()
document()
#install()
d0 <- read_csv(sysfile("data/clean/objetivos/bogota/objetivos-bogota-localidades.csv"))
problems(d0)
dic <- read_csv(sysfile("data/clean/objetivos/bogota/objetivos-bogota-localidades-dic.csv"))
problems(d0)
library(leaflet)
library(scales)
library(lazyeval)
names(d0)
selectedYear <- 2011
names(d0)
numVars <- dic %>% filter(ctype == "Nu") %>% .$id
names(numVars) <- dic %>% filter(ctype == "Nu") %>% .$name
selectedVar <- sample(numVars,1)
selectedVarName <- names(selectedVar)
fixedVars <- c("Localidad")
vars <- c(fixedVars,unname(selectedVar))
nm <- unname(selectedVar)
d <- d0 %>%
filter(Localidad != "Total Bogotá") %>%
filter(Anio == selectedYear) %>% select_(.dots = vars) %>%
rename_("value"= nm)
geo <- read_csv(sysfile("data/aux/geoPoints-bogota.csv"))
d <- d %>% left_join(geo[c("name","latitud","longitud")],c("Localidad" = "name"))
d$info <- pystr_format(pystr_format("{selectedVarName}: {value}",
list(selectedVarName = selectedVarName))
,d)
plotLeafletBog(d)
##
library(highcharter)
fixedVars <- c("Localidad","Anio")
vars <- c(fixedVars,unname(selectedVar))
d <- d0 %>% select_(.dots = vars) %>% filter(Localidad != "Total Bogotá") %>% rename_("value"=selectedVar)
#d <- na.omit(d)
title <- selectedVar
dd <- spread(d,Localidad,value)
plotChronoLocalidades(dd)
plotChronoLocalidades(dd, type = "line")
## Gapminder
selectedYear <- 2013
selectedVars <- sample(numVars,3)
# Empresas canceladas
# "v4_ee_005"
# Matricula Privada primera infancia total
# "v4_e_028"
# Ocupaciones ilegales identificadas
# "v7_v_054"
selectedVarNames <- names(selectedVars)
nms <- unname(selectedVars)
fixedVars <- c("Localidad","Anio")
vars <- c(fixedVars,unname(selectedVars))
category <- "Localidad"
d <- d0 %>% select_(.dots = vars) %>%
filter(Anio == selectedYear) %>%
filter(Localidad != "Total Bogotá") %>%
rename_("v1"=nms[1]) %>%
rename_("v2"=nms[2]) %>%
rename_("v3"=nms[3]) %>%
mutate(v1=as.numeric(v1)) %>%
mutate(v2=as.numeric(v2)) %>%
mutate(v3=as.numeric(v3))
#d <- na.omit(d)
any(map(d,is.na) %>% map_lgl(all))
hchart(d, "bubble", x = v1, y = v3, size = v2, color=category) %>%
hc_xAxis(title = list(text=selectedVarNames[1])) %>%
hc_yAxis(title = list(text=selectedVarNames[2])) %>%
hc_plotOptions(
series = list(dataLabels = list(enabled = TRUE,format= '{point.Localidad}'))
)
# %>%
# hc_motion(enabled = TRUE,
# labels = 2000:2003,
# series = c(0,1,2))
# hc_legend(enabled= TRUE)
# hc_colorAxis(categories = d$Localidad) %>%
# #hc_yAxis(type = "logarithmic") %>%
# #hc_title(text = "Our nearest Stars") %>%
# #hc_subtitle(text = "In a Hertzsprung-Russell diagram") %>%
# hc_tooltip(useHTML = TRUE, headerFormat = "", pointFormat = tltip)
hc_legend()
dd <- spread(d,Localidad,value)
|
dffa94da34d8a21c433fb97c21d7f251e87579e6
|
0ee67d115c47cd3c4e0bb9fd67f644496d0cb6da
|
/assets/lectures/lecture1-intro-regression/simpsons-paradox-sat-scores.R
|
c59810b4e8739cea3e4e1791cd722f139de4aa56
|
[] |
no_license
|
nickreich/applied-regression-2016
|
28a52d05bc1d75fd671f26bb42f1240a625c26f5
|
c715999d2d4047e98888c47b24a3f157d61786f6
|
refs/heads/gh-pages
| 2021-01-10T06:22:43.778147
| 2016-04-18T18:01:18
| 2016-04-18T18:01:18
| 48,695,801
| 0
| 6
| null | 2016-02-09T02:14:28
| 2015-12-28T14:16:17
|
TeX
|
UTF-8
|
R
| false
| false
| 1,146
|
r
|
simpsons-paradox-sat-scores.R
|
library(mosaic)
library(mosaicData)
qplot(salary, sat, data=SAT)
qplot(salary, sat, data=SAT, geom=c("point", "smooth"), method="lm", se=FALSE)
SAT$fracgrp = cut(SAT$frac, breaks=c(0, 22, 49, 81),
labels=c("low", "medium", "high"))
qplot(salary, sat, color=fracgrp, data=SAT)
qplot(salary, sat, color=fracgrp, facets=.~fracgrp, data=SAT)
qplot(salary, sat, color=fracgrp, facets=.~fracgrp, data=SAT, geom=c("point", "smooth"), method="lm", se=FALSE)
qplot(salary, frac, data=SAT)
qplot(salary, sat, data=SAT)
qplot(frac, sat, data=SAT)
qplot(expend, sat, data=SAT)
qplot(expend, sat, data=SAT, geom=c("point", "smooth"), method="lm", se=FALSE)
qplot(ratio, sat, data=SAT)
qplot(ratio, sat, data=SAT, geom=c("point", "smooth"), method="lm", se=FALSE)
ggplot(SAT, aes(x=salary, y=sat)) +
geom_smooth(aes(color=fracgrp), method="lm", se=FALSE) +
geom_point(aes(color=fracgrp)) +
geom_smooth(method="lm", se=FALSE, color="gray", linetype=2) +
xlab("estimated average public school teacher salary") +
ylab("average total SAT score, by state")+
scale_color_discrete("% students\ntaking SAT")
|
e84667d38532d6953f83f02e3daffc0cf59da570
|
83676931d003fc9bb6821509279deb057d669ba3
|
/data-raw/_examples.R
|
c966a9a7a161cb971123f0e677c82ac845e532f6
|
[] |
no_license
|
LunaSare/phunding
|
c70a6fd65314650cfd52471347df1aa7415e0a5d
|
4ea97eebea39c820d0ad0c0e24700c86ab2182d2
|
refs/heads/master
| 2020-04-02T05:30:26.245639
| 2019-02-21T16:18:41
| 2019-02-21T16:18:41
| 154,083,205
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,810
|
r
|
_examples.R
|
library(devtools)
data(fam_tree)
load_all("~/Desktop/datelife")
# II. use dates from blackrim service for now
data(all_orders_chrono)
ape::Ntip(all_orders_chrono)
missing_from_chrono <-
# using get_ott_children just takes too long
# ee <- get_ott_children(input=NULL, ott_ids = 93302, ott_rank = "order")
ee_tree <- datelife::get_dated_otol_induced_subtree(input = final) #
# get order tree
data(all_ott_orders)
head(all_ott_orders, 10)
tail(all_ott_orders)
library(rotl)
taxnames <- c("Alligator mississipiensis", "Allium ameloprasum", "Allium sativum", "Artemisia dracunculus", "Citrus limon", "Thymus vulgaris", "Piper nigrum", "Salvia officinalis", "Triticum aestivum", "Vitis vinifera", "Zea mays", "Bos taurus")
taxon_search <- tnrs_match_names(names = taxnames)
taxon_lin <- get_ott_lineage(ott_id = taxon_search$ott_id)
# x <- taxon_lin[[1]]
ingredients_orders <- unname(unique(sapply(taxon_lin, function(x) rownames(x)[grepl("\\border\\b", x[,"ott_ranks"])][1])))
match(ingredients_orders, all_orders_chrono$tip.label)
# paste(taxon_search$ott_id, collapse = ", ")
# taxnames <- cbind(taxnames, unique_name(taxon_search), taxon_search$ott_id)
# ott_in_tree <- ott_id(taxon_search)[is_in_tree(ott_id(taxon_search))] # get id's
# tr <- tol_induced_subtree(ott_ids = ott_in_tree) # get published and taxonomy
# tree <- tr
# tree$tip.label <- c("Citrus_limon (Lemon)", "Vitis_vinifera (White Wine)", "Salvia_officinalis (Sage)", "Thymus_vulgaris (Thymus)", "Artemisia_dracunculus (Terragon)", "Triticum_aestivum (Wheat Flour)", "Zea_mays (Corn Starch)", "Allium_eremoprasum (Leek)", "Allium_sativum (Garlic)", "Piper_nigrum (White Pepper)", "Bos_taurus (Cream)", "Alligator_mississippiensis (Alligator)")
phytools::plotSimmap(all_orders_chrono,type="fan",part=0.5,fsize=0.3, ftype="i", colors = cols)
|
d2dc268edb89e0d13426be850c314ab968b32a76
|
16df226c48c7bdc6e6836174ceabd6b06c20883d
|
/man/eigenmodel_mcmc.Rd
|
6de6f7eea0b400af5dbe323a3b17310f37842c67
|
[] |
no_license
|
pdhoff/eigenmodel
|
2b3d350640ecfd761c264b1812fff7af5958efce
|
2c575e8031fb30bf0479e816d2b36470b6f5ab19
|
refs/heads/master
| 2020-03-18T16:55:17.882509
| 2018-05-28T15:01:18
| 2018-05-28T15:01:18
| 134,993,380
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,749
|
rd
|
eigenmodel_mcmc.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/eigenmodel_mcmc.R
\name{eigenmodel_mcmc}
\alias{eigenmodel_mcmc}
\title{Approximate the posterior distribution of parameters in an eigenmodel}
\usage{
eigenmodel_mcmc(Y, X = NULL, R = 2, S = 1000, seed = 1, Nss = min(S,
1000), burn = 100)
}
\arguments{
\item{Y}{an n x n symmetric matrix with missing diagonal entries.
Off-diagonal missing values are allowed.}
\item{X}{an n x n x p array of regressors}
\item{R}{the rank of the approximating factor matrix}
\item{S}{number of samples from the Markov chain}
\item{seed}{a random seed}
\item{Nss}{number of samples to be saved}
\item{burn}{number of initial scans of the Markov chain to be dropped}
}
\value{
a list with the following components: \item{Z_postmean}{posterior
mean of the latent variable in the probit specification}
\item{ULU_postmean}{posterior mean of the reduced-rank approximating matrix}
\item{Y_postmean}{the original data matrix with missing values replaced by
posterior means} \item{L_postsamp}{samples of the eigenvalues}
\item{b_postsamp}{samples of the regression coefficients} \item{Y}{original
data matrix} \item{X}{original regressor array} \item{S}{number of scans of
the Markov chain}
}
\description{
Construct approximate samples from the posterior distribution of the
parameters and latent variables in an eigenmodel for symmetric relational
data.
}
\examples{
data(YX_Friend)
fit<-eigenmodel_mcmc(Y=YX_Friend$Y,X=YX_Friend$X,R=2,S=50,burn=50)
# in general you should run the Markov chain longer than 50 scans
plot(fit)
#fit<-eigenmodel_mcmc(Y=Y_Gen,R=3,S=10000)
#fit<-eigenmodel_mcmc(Y=Y_Pro,R=3,S=10000)
}
\author{
Peter Hoff
}
\keyword{models}
\keyword{multivariate}
|
5f5aec75a510d2dc3ca4724d0c7e9eb9d91dfc29
|
b8b1502856f6817b4d06c2051cbfd107156f1cef
|
/plot1.R
|
cc59f4c0c7c6e476d0555c9bb1de382f291b1396
|
[] |
no_license
|
ripon08054718/Exploratory_Data_Analysis
|
e2cb1136070a1904b38e5d9703595c0362679208
|
dd61a2ae16daba9b7e5a21a53e6acf5ce9e0da79
|
refs/heads/master
| 2020-04-15T16:12:40.646233
| 2015-01-11T15:38:05
| 2015-01-11T15:38:05
| 29,095,582
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 877
|
r
|
plot1.R
|
############################################################
#### Explanatory Data analysis ###
#### Md. Sahidul Islam ###
############################################################
# Loading data into R...
data<-read.table("household_power_consumption.txt",header=T, sep=';', na.strings="?",
nrows=2075259, check.names=F, stringsAsFactors=F, comment.char="", quote='\"')
data$Date <- as.Date(data$Date, format="%d/%m/%Y")
## Subsetting the data...
data <- subset(data, subset=(Date >= "2007-02-01" & Date <= "2007-02-02"))
dim(data)
head(data)
## Converting dates
datetime <- paste(as.Date(data$Date), data$Time)
data$Datetime <- as.POSIXct(datetime)
## Plot 1....
png("plot1.png", width = 480, height = 480)
plot1<-hist(data$Global_active_power, main="Global Active Power",
xlab="Global Active Power (kilowatts)", ylab="Frequency", col="Red")
dev.off()
|
1a6f6a722117bc71616471ae02aa8ffa475610e2
|
845b6691de8e989fe0ab314143b767dc50e460b5
|
/Analyse_convergent_recombination.R
|
2ea2c6ade98403b76f9da482ae6c9d2b18d96ab7
|
[] |
no_license
|
yhfteng/Shared-TCRs-in-cancers
|
5b5518e47464c21f7d591fbd6b1d561536ccbc8d
|
f13868eee327e17ab455a108db2d499a29e571a0
|
refs/heads/main
| 2023-06-24T12:10:49.190894
| 2021-07-24T14:17:36
| 2021-07-24T14:17:36
| 389,099,355
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,458
|
r
|
Analyse_convergent_recombination.R
|
library(readr)
library(dplyr)
library(tidyr)
library(stringr)
library(ggplot2)
library(purrr)
args=commandArgs(trailingOnly=TRUE)
filepath <- args[1]
setwd(filepath)
binary.df <- read_tsv(args[2]) #input remove healthy binary df
shared <- read_tsv(args[3]) # input shared30 remove healthy list
savefilepath <- args[4]
################put all the files of interest into a list###########################
filenames <- list.files(path=filepath, pattern ="_Clonotype_df.tsv")
fileID <- substring(filenames,1,6)
filelist <- lapply(filenames, read_tsv)
names(filelist) <- fileID
#############################extract shared CDR3#########################################
shared_clonotype <- unlist(shared)
####### create 'clonotype column in individual clonotype_df file and count number of nt##
get.ntCount.eachCDR3 <- function(dat){
selected_df <- dat %>%
mutate(Clonotype= paste0('C',`CDR3-IMGT.x`,'F', '_', V, '_', J)) %>%
filter(Clonotype %in% shared_clonotype) %>%
group_by(Clonotype, `CDR3-IMGT.y`) %>%
summarise(Counts_of_nt_per_Clonotype=n())
return (selected_df)
}
#########execute function on all files in filelist###################################
shared_clonotype_nt <- lapply(filelist, get.ntCount.eachCDR3)
ans <- map_df(shared_clonotype_nt, ~as.data.frame(.x), id='FileNum')
write.table(ans, paste0(savefilepath, '_sharedClonotype_NtList_Counts.tsv'), sep='\t', row.names=F)
num_pat_withsameNt_perCDR3 <- ans %>%
group_by(Clonotype,`CDR3-IMGT.y`) %>%
summarise(Num_Pat_sameNt_perClonotype=n())
write.table(num_pat_withsameNt_perCDR3, paste0(savefilepath,'_num_Pat_withsameNt_persharedClonotype.tsv'), sep='\t', row.names=F)
num_Nt_perCDR3 <- num_pat_withsameNt_perCDR3 %>% group_by(Clonotype) %>% summarise(num_Nt_persharedClonotype = n())
colnames(num_Nt_perCDR3) <- c('Clonotype', 'num_Nt_persharedClonotype')
write.table(num_Nt_perCDR3, paste0(savefilepath,'_num_NT_persharedClonotype.tsv'), sep='\t', row.names=F)
############join NTList_Counts with annotated clonotype list (num_of_samples_present)to plot####
sharedCDR3_numSamples <- binary.df %>% select (Clonotype, Num_Samples_Present, Freq_Samples_Present) %>% filter(Clonotype %in% shared_clonotype)
join<-left_join(num_Nt_perCDR3, sharedCDR3_numSamples, by=c('Clonotype'='Clonotype'))
write.table(join, paste0(savefilepath,'_df_to_plot_convergentRecombination_removeHealthy.tsv'), sep='\t', row.names=F)
|
00592846729e7a26524ef464e53799305facf59d
|
1de57d5207a15f52203b366ef07be5d59017da98
|
/scripts/exploration/intervenor_QA_explore.r
|
a75eed5d91c14cefd08df5038f9f19a26ce7d9f2
|
[] |
no_license
|
cybera/policy-browser
|
f5c298af1e9f695b4c6636e983d329efd7c77635
|
6a78c117d2176751acd165ed1647d77e995b9e1b
|
refs/heads/master
| 2021-09-20T18:28:04.962246
| 2018-08-13T22:11:59
| 2018-08-13T22:11:59
| 103,988,806
| 2
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 15,159
|
r
|
intervenor_QA_explore.r
|
#Script analyzing the questions and answers that took place during CRTC consultation 2015-134
#Top part of script is general analysis
#Bottom is for chord diagrams
library(tidyverse)
library(plotly)
library(cowplot)
library(forcats)
library(chorddiag)
library(lubridate)
library(RColorBrewer)
interrogs <- read.csv("data/processed/intervenors_Q_A_raw_from_Middleton_group.csv")
#Filter out columns that aren't needed
interrogs_filter <- interrogs %>% select(3,4,5,6,8,11,14,17,21,24,28,31)
#Gather the columns - go from a messy to a tidy format; i.e. from a wide to a long df format for analysis
interrogs_gathered <- interrogs_filter %>% gather(key=Type, value=Date, c(-1,-2, -3,-4)) %>%
arrange(Questioner_reformat) %>% filter(Date != "")
#Fixing inconsistent date format and changing question Types
interrogs_gathered <- interrogs_gathered %>% mutate(Date = replace(Date, Date == "10-Dec-15", "2015/12/10"))
interrogs_gathered$Date <- as.Date(interrogs_gathered$Date)
interrogs_gathered <- interrogs_gathered %>%
mutate(Type = replace(Type, Type == "Interrogatory.Date", "Q.Phase1")) %>%
mutate(Type = replace(Type, Type == "Follow.Up.1", "Q.Phase2")) %>%
mutate(Type = replace(Type, Type == "Follow.up.2", "Q.Phase3")) %>%
mutate(Type = replace(Type, Type == "Follow.up.3", "Q.Phase4")) %>%
mutate(Type = replace(Type, Type == "Response.Date", "A.Phase1")) %>%
mutate(Type = replace(Type, Type == "Reply.2", "A.Phase2")) %>%
mutate(Type = replace(Type, Type == "Reply.3", "A.Phase3")) %>%
mutate(Type = replace(Type, Type == "Reply.4", "A.Phase4"))
q_list <- (c("Q.Phase1", "Q.Phase2", "Q.Phase3", "Q.Phase4"))
################################
#End data prep
################################
#Summary stats
#How many questions were asked and in which rounds?
interrogs_gathered %>% filter(Type %in% q_list) %>%
group_by(Type) %>% dplyr::summarise(Qs_asked = n()) %>% arrange(desc(Qs_asked))
q_a_dates <- interrogs_gathered %>% group_by(Date) %>% dplyr::summarise(Sub_count = n()) %>%
ggplot(aes(x=Date, y=Sub_count)) + geom_point() + labs(x = "Date", y = "Number of submissions") +
scale_x_date(date_labels ="%m-%Y", limits = c(dmy("01-08-2015"), dmy("01-03-2016")), date_breaks = "1 month") +
ggtitle("Date all Q & As were submitted")
q_dates <- interrogs_gathered %>% filter(Type %in% q_list) %>%
group_by(Date) %>% dplyr::summarise(Sub_count = n()) %>%
ggplot(aes(x=Date, y=Sub_count)) + geom_point() + labs(x = "Date", y = "Number of submissions") +
scale_x_date(date_labels ="%m-%Y", limits = c(dmy("01-08-2015"), dmy("01-03-2016")), date_breaks = "1 month") +
ggtitle("Date questions were submitted")
r_dates <- interrogs_gathered %>% filter(!(Type %in% q_list)) %>%
group_by(Date) %>% dplyr::summarise(Sub_count = n()) %>%
ggplot(aes(x=Date, y=Sub_count)) + geom_point() + labs(x = "Date", y = "Number of submissions") +
scale_x_date(date_labels ="%m-%Y", limits = c(dmy("01-08-2015"), dmy("01-03-2016")), date_breaks = "1 month") +
ggtitle("Date answers were submitted")
plot_grid(q_a_dates, q_dates, r_dates, align="h", ncol=1)
interrogs_gathered %>% group_by(Date, Type) %>% dplyr::summarise(Sub_count = n()) %>%
mutate(Type = fct_relevel(Type, "Q.Phase1")) %>%
mutate(Type = fct_relevel(Type, "Q.Phase2", after = 2)) %>%
mutate(Type = fct_relevel(Type, "Q.Phase3", after = 4)) %>%
mutate(Type = fct_relevel(Type, "Q.Phase4", after = 6)) %>%
ggplot(aes(x=Date, y=Sub_count)) + geom_point(aes(colour = factor(Type)), size=4) + labs(x = "Date", y = "Number of submissions") +
scale_x_date(date_labels ="%m-%Y", limits = c(dmy("01-08-2015"), dmy("01-03-2016")), date_breaks = "1 month") +
labs(colour = "Q&A Phase") + ggtitle("Date all Q & As were submitted")
#Date questions and responses came in
dates_submitted.plot <- interrogs_gathered %>% group_by(Date) %>%
ggplot(aes(x=Date, y=Type)) + geom_point() + labs(x = "Date", y = "Submission Type") + scale_x_date(date_labels ="%m-%Y")
ggplotly(dates_submitted.plot)
#Count of how many Q & A there were per round and from whom
interrogs_gathered %>% filter(Type %in% q_list) %>%
ggplot(aes(Type)) + geom_bar(aes(fill=Q_category)) +
ggtitle("Question phases and count") + labs(x = "Question Round", fill = "Intervenor Category")
interrogs_gathered %>% filter(!(Type %in% q_list)) %>%
filter(R_category != "") %>%
ggplot(aes(Type)) + geom_bar(aes(fill=R_category)) +
ggtitle("Response phases and count") + labs(x = "Question Round", fill = "Intervenor Category")
#Normalized for easier comparison of who particates in each round
q <- interrogs_gathered %>% filter(Type %in% q_list) %>%
ggplot(aes(Type)) + geom_bar(aes(fill=Q_category), position = position_fill())
r <- interrogs_gathered %>% filter(!(Type %in% q_list)) %>%
filter(R_category != "") %>%
ggplot(aes(Type)) + geom_bar(aes(fill=R_category), position = position_fill())
plot_grid(q, r)
ggplotly(q)
ggplotly(r)
#Which orgs ask most of the questions?
interrogs_gathered %>% filter(Type %in% q_list) %>%
group_by(Questioner_reformat, Q_category) %>% dplyr::summarise(Qs_asked = n()) %>%
arrange(desc(Qs_asked))
#Questions by category
interrogs_gathered %>% filter(Type %in% q_list) %>%
group_by(Q_category) %>% dplyr::summarise(Qs_asked = n()) %>% arrange(desc(Qs_asked))
#Top askers for each category
interrogs_gathered %>% filter(Type %in% q_list) %>%
group_by(Questioner_reformat, Q_category) %>% dplyr::summarise(Qs_asked = n()) %>%
dplyr::ungroup() %>% group_by(Q_category) %>% dplyr::mutate(Category_total = sum(Qs_asked)) %>%
filter(Qs_asked == max(Qs_asked)) %>%
arrange(desc(Qs_asked))
# Who has the largest imbalance on questioner and receiver side?
questions_received <- interrogs_gathered %>% filter(Type %in% q_list) %>% group_by(Responder_reformat, R_category) %>% dplyr::summarise(Qs_received = n()) %>% arrange(desc(Qs_received))
questions_asked <- interrogs_gathered %>% filter(Type %in% q_list) %>% group_by(Questioner_reformat, Q_category) %>% dplyr::summarise(Qs_asked = n()) %>% arrange(desc(Qs_asked))
Qs_asked_received <- full_join(questions_received, questions_asked, by=c("Responder_reformat"="Questioner_reformat")) %>% replace_na(list(Qs_received = 0, Qs_asked = 0)) %>%
mutate(delta = Qs_asked - Qs_received) %>% arrange(desc(delta)) %>% mutate(org_category = ifelse(is.na(R_category), as.character(Q_category), as.character(R_category))) %>%
select(-Q_category, -R_category)
Qs_asked_received %>% group_by(org_category) %>% summarise(total_delta = sum(delta)) %>% arrange(desc(total_delta))
#Largest imbalance on received side
tail(Qs_asked_received, n=10)
#Largest imbalance on questioner side
head(Qs_asked_received, n=10)
Qs_asked_received %>% group_by(org_category) %>% summarise(total_delta = sum(delta)) %>% arrange(desc(total_delta))
#What rounds do intervenors ask questions?
interrogs_gathered %>% filter(Type %in% q_list) %>%
ggplot(aes(Questioner_reformat)) + geom_bar(aes(fill=fct_rev(Type)), position = position_stack()) +
theme(axis.text = element_text(angle = 60, vjust = 0.9, hjust = 1)) +
labs(fill = "Question Round", x = "", y = "Number of Questions")
interrogs_gathered %>% filter(Type %in% q_list) %>%
ggplot(aes(Responder_reformat)) + geom_bar(aes(fill=fct_rev(Type)), position = position_stack()) +
theme(axis.text = element_text(angle = 60, vjust = 0.9, hjust = 1)) +
labs(fill = "Question Round", x = "")
################################
################################
# Chord diagrams
################################
################################
chord_prep <- function(df){
df$to <- droplevels(df$to)
df$from <- droplevels(df$from)
lf <- levels(df$from)
lt <- levels(df$to)
new_levels <- sort(union(lf,lt))
df$from = factor(df$from,levels =new_levels)
df$to = factor(df$to,levels =new_levels)
df_complete <- df %>% tidyr::complete(from, to, fill=list(value = 0))
df.mat <- as.data.frame(df_complete) %>% spread(key=to, value=value, fill=0)
r_names <- df.mat$from
df.mat <- df.mat %>% select(-from)
row.names(df.mat) <- r_names
df.mat <- as.matrix(df.mat)
return(df.mat)
}
chord_prep_asked <- function(df){
df$to <- droplevels(df$to)
df$from <- droplevels(df$from)
lf <- levels(df$from)
lt <- levels(df$to)
new_levels <- sort(union(lf,lt))
df$from = factor(df$from,levels =new_levels)
df$to = factor(df$to,levels =new_levels)
df_complete <- df %>% tidyr::complete(from, to, fill=list(value = 0))
df.mat <- as.data.frame(df_complete) %>% spread(key=from, value=value, fill=0)
r_names <- df.mat$to
df.mat <- df.mat %>% select(-to)
row.names(df.mat) <- r_names
df.mat <- as.matrix(df.mat)
return(df.mat)
}
chord_pre_prep <- function(count_input){
names(count_input) <- c("from", "to", "value")
count_input <- data.frame(count_input) %>% filter(to !="")
count.df <- as.data.frame(count_input)
return(count.df)
}
get_cols <- function(count.df, col_list){
count.df$from <- droplevels(count.df$from)
count.df$to <- droplevels(count.df$to)
lf <- levels(count.df$from)
lt <- levels(count.df$to)
org_levels <- data.frame(sort(union(lf,lt)))
colnames(org_levels) <- "categories"
plot_colours <- dplyr::left_join(org_levels, col_list, by=c("categories" = "all_levels")) %>%
select(cols)
return(plot_colours)
}
individual_chord <- function(df, type=""){
df <- chord_pre_prep(df)
if(type == "asked"){
df.mat <- chord_prep_asked(df)
} else {
df.mat <- chord_prep(df)
}
cdiag <- chorddiag(df.mat, groupnameFontsize = 10, groupnamePadding = 20, tickInterval = 5)
return(cdiag)
}
category_chord <- function(interrog_count, colourdict, type=""){
chord_input <- chord_pre_prep(interrog_count)
plot_cols <- get_cols(chord_input, colourdict)
if(type == "asked"){
df.mat <- chord_prep_asked(chord_input)
} else {
df.mat <- chord_prep(chord_input)
}
chorddiag(df.mat, groupnameFontsize = 10, groupnamePadding = 20, groupColors = plot_cols$cols, tickInterval = 5)
}
#Set up all colours:
lq <- levels(interrogs_gathered$Q_category)
lr <- levels(interrogs_gathered$R_category)
all_levels <- sort(union(lq,lr))
cols <- brewer.pal(n = 12, name = 'Set3')
colourdict <- cbind(all_levels, cols)
colourdict <- data.frame(colourdict)
#####################
###
#Look at all questions together & the different phases:
interrog_count <- interrogs_gathered %>% filter(Type %in% q_list) %>% group_by(Q_category, R_category) %>% dplyr::summarise(count= n())
category_chord(interrog_count, colourdict)
#Rounds 1-4:
interrog_count_rd1 <- interrogs_gathered %>% filter(Type %in% "Q.Phase1") %>% group_by(Q_category, R_category) %>% dplyr::summarise(count= n())
category_chord(interrog_count_rd1, colourdict)
interrog_count_rd2 <- interrogs_gathered %>% filter(Type %in% "Q.Phase2") %>% group_by(Q_category, R_category) %>% dplyr::summarise(count= n())
category_chord(interrog_count_rd2, colourdict)
interrog_count_rd3 <- interrogs_gathered %>% filter(Type %in% "Q.Phase3") %>% group_by(Q_category, R_category) %>% dplyr::summarise(count= n())
category_chord(interrog_count_rd3, colourdict)
interrog_count_rd4 <- interrogs_gathered %>% filter(Type %in% "Q.Phase4") %>% group_by(Q_category, R_category) %>% dplyr::summarise(count= n())
category_chord(interrog_count_rd4, colourdict)
###
#Look at who being asked together & the different phases:
interrog_count <- interrogs_gathered %>% filter(Type %in% q_list) %>% group_by(Q_category, R_category) %>% dplyr::summarise(count= n())
category_chord(interrog_count, colourdict, type="asked")
#Rounds 1-4:
interrog_count_rd1 <- interrogs_gathered %>% filter(Type %in% "Q.Phase1") %>% group_by(Q_category, R_category) %>% dplyr::summarise(count= n())
category_chord(interrog_count_rd1, colourdict, type="asked")
interrog_count_rd2 <- interrogs_gathered %>% filter(Type %in% "Q.Phase2") %>% group_by(Q_category, R_category) %>% dplyr::summarise(count= n())
category_chord(interrog_count_rd2, colourdict, type="asked")
interrog_count_rd3 <- interrogs_gathered %>% filter(Type %in% "Q.Phase3") %>% group_by(Q_category, R_category) %>% dplyr::summarise(count= n())
category_chord(interrog_count_rd3, colourdict, type="asked")
interrog_count_rd4 <- interrogs_gathered %>% filter(Type %in% "Q.Phase4") %>% group_by(Q_category, R_category) %>% dplyr::summarise(count= n())
category_chord(interrog_count_rd4, colourdict, type="asked")
#NB: Can change labels under groupNames to make them fit the graphic better
chorddiag(all_Q_A.mat, groupnameFontsize = 10, groupnamePadding = 20,
groupNames = c("Advocacy orgs","Chamber of commerce",
"Consumer advocacy orgs", "Government", "Individual", "#N/A",
"Network op: Cable co", "Network op: other",
"Network op: Telco Incumbents", "Other", "Small incumbents"))
###################
# Questions by individual organizations
interrog_individual <- interrogs_gathered %>% filter(Type %in% q_list) %>% group_by(Questioner_reformat, Responder_reformat) %>% dplyr::summarise(count= n())
individual_chord(interrog_individual)
interrog_individ_rd1 <- interrogs_gathered %>% filter(Type %in% "Q.Phase1") %>% group_by(Questioner_reformat, Responder_reformat) %>% dplyr::summarise(count= n())
individual_chord(interrog_individ_rd1)
interrog_individ_rd2 <- interrogs_gathered %>% filter(Type %in% "Q.Phase2") %>% group_by(Questioner_reformat, Responder_reformat) %>% dplyr::summarise(count= n())
individual_chord(interrog_individ_rd2)
interrog_individ_rd3 <- interrogs_gathered %>% filter(Type %in% "Q.Phase3") %>% group_by(Questioner_reformat, Responder_reformat) %>% dplyr::summarise(count= n())
individual_chord(interrog_individ_rd3)
interrog_individ_rd4 <- interrogs_gathered %>% filter(Type %in% "Q.Phase4") %>% group_by(Questioner_reformat, Responder_reformat) %>% dplyr::summarise(count= n())
individual_chord(interrog_individ_rd4)
# Investigate which orgs got asked in each round
interrog_individual_asked <- interrogs_gathered %>% filter(Type %in% q_list) %>% group_by(Questioner_reformat, Responder_reformat) %>% dplyr::summarise(count= n())
individual_chord(interrog_individual_asked, type="asked")
interrog_individ_rd1_asked <- interrogs_gathered %>% filter(Type %in% "Q.Phase1") %>% group_by(Questioner_reformat, Responder_reformat) %>% dplyr::summarise(count= n())
individual_chord(interrog_individ_rd1_asked, type="asked")
interrog_individ_rd2_asked <- interrogs_gathered %>% filter(Type %in% "Q.Phase2") %>% group_by(Questioner_reformat, Responder_reformat) %>% dplyr::summarise(count= n())
individual_chord(interrog_individ_rd2_asked, type="asked")
interrog_individ_rd3_asked <- interrogs_gathered %>% filter(Type %in% "Q.Phase3") %>% group_by(Questioner_reformat, Responder_reformat) %>% dplyr::summarise(count= n())
individual_chord(interrog_individ_rd3_asked, type="asked")
interrog_individ_rd4_asked <- interrogs_gathered %>% filter(Type %in% "Q.Phase4") %>% group_by(Questioner_reformat, Responder_reformat) %>% dplyr::summarise(count= n())
individual_chord(interrog_individ_rd4_asked, type="asked")
|
30a84830c39b97f4f0932ca25d139dcd0e97fcbd
|
cff808c3d0a27336639339cf10f4e4922c8ea81b
|
/MAP2/old/elr_pre_map_fix_gts_scratch.R
|
a5902c54730e642144309b2e82a82da954c5249e
|
[] |
no_license
|
jthmiller/QTL_remap
|
e3f9544d0d9bf02c24706da9b2526a35934a7857
|
fa84f0927631e15d2bd20bc53710f1340626ee3d
|
refs/heads/master
| 2020-03-27T23:59:09.869077
| 2019-12-16T17:16:37
| 2019-12-16T17:16:37
| 147,251,773
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,923
|
r
|
elr_pre_map_fix_gts_scratch.R
|
#!/bin/R
### Map QTLs 1 of 3
debug.cross <- T
pop <- 'ELR'
source("/home/jmiller1/QTL_Map_Raw/ELR_final_map/CODE/control_file.R")
library('qtl')
mpath <- '/home/jmiller1/QTL_Map_Raw/ELR_final_map'
i <- commandArgs(TRUE)[commandArgs(TRUE) %in% c(1:24)]
#cross <- read.cross.jm(file = file.path(indpops, paste0(pop, ".unphased.f2.csvr")),
#format = "csvr", geno = c(1:3), estimate.map = FALSE)
fl <- file.path(mpath,'ELR_unmapped_filtered_added_markers.csv')
cross.all <- read.cross(
file = fl,
format = "csv", genotypes=c("AA","AB","BB"), alleles=c("A","B"),
estimate.map = FALSE
)
################################################################################
################################################################################
################################################################################
for (i in 3:24){
cross <- subset(cross.all,chr=i)
nmars <- nmar(cross)
## initial order
ord <- order(as.numeric(gsub(".*:","",names(pull.map(cross)[[1]]))))
cross <- switch.order(cross, chr = i, ord, error.prob = 0.01, map.function = "kosambi",
maxit = 10, tol = 0.001, sex.sp = F)
################################################################################
################################################################################
cross <- subset(cross,ind=!is.na(cross$pheno$Pheno))
cross <- calc.errorlod(cross, err=0.01)
png(paste0('~/public_html/ELR_gts_preclean',i,'.png'),height=2500,width=4500)
plotGeno(cross)
dev.off()
png(paste0('~/public_html/ELR_xo_a',i,'.png'))
hist(sort(table(unlist(locateXO(cross)))),breaks=30)
dev.off()
loc.xocount <- table(unlist(locateXO(cross)))
marker <- sapply(as.numeric(names(loc.xocount)),function(X){
find.marker(cross, chr=i, pos=X) })
dropdf <- data.frame(loc.xocount,marker,stringsAsFactors=F)
dropdf$tot <- sapply(dropdf$mark, function(X){ sum(table(pull.geno(cross,i)[,X]))})
drops <- unique(dropdf[dropdf$Freq/dropdf$tot > 0.10,'marker'])
cross <- drop.markers(cross,drops)
cross <- calc.genoprob(cross)
cross <- sim.geno(cross)
cross <- calc.errorlod(cross, err=0.01)
png(paste0('~/public_html/ELR_gts_preclean_droppedmark',i,'.png'),height=2500,width=4500)
plotGeno(cross)
dev.off()
cross <- cleanGeno_jm(cross, chr=i, maxdist=100, maxmark=8, verbose=TRUE)
cross <- calc.errorlod(cross, err=0.025)
cross <- removeDoubleXO(cross)
cross <- calc.errorlod(cross, err=0.025)
cross <- cleanGeno_jm_2(cross, chr=i, maxdist=50, maxmark=4, verbose=TRUE)
cross <- calc.errorlod(cross, err=0.025)
png(paste0('~/public_html/ELR_clean.png'),height=2500,width=4000)
plotGeno(cross,cex=3)
dev.off()
png(paste0('~/public_html/ELR_RF_clean',i,'.png'))
plotRF(cross)
dev.off()
fl <- file.path(mpath,paste0(i,'ELR_unmapped_unfiltered'))
write.cross(cross,filestem=fl,format="csv")
################################################################################
### THIN MARKERS IF NEEDED #####################################################
mp <- as.numeric(gsub(".*:",'',markernames(cross)))
names(mp) <- markernames(cross)
mp <- list(mp)
names(mp) <- i
cross <- replace.map(cross,mp)
gts <- geno.table(cross)
weight <- 1 - gts$missing/rowSums(gts[,c(3:5)])*10
dwnsmpl <- pickMarkerSubset(pull.map(cross)[[1]],2000, weights=weight)
drops <- markernames(cross)[! markernames(cross) %in% dwnsmpl]
cross.dwn <- drop.markers(cross,drops)
cross.dwn <- calc.genoprob(cross.dwn)
cross.dwn <- sim.geno(cross.dwn)
cross.dwn <- calc.errorlod(cross.dwn, err=0.01)
png(paste0('~/public_html/ELR_gts_CHR',i,'_downsmpl.png'),height=1500,width=4500)
plotGeno(cross.dwn ,cex=3)
dev.off()
#####MAP ########################################################################
cross.dwn <- subset(cross.dwn,ind=!cross$pheno$ID %in% c('ELR_10869','ELR_ER1124F','ELR_10977','ELR_10988','BLI_BI1124M'))
cross.dwn <- orderMarkers(cross.dwn, window=7,verbose=FALSE,chr=i,
use.ripple=TRUE, error.prob=0.025, sex.sp=FALSE,
map.function="kosambi",maxit=500, tol=1e-4)
cross.dwn <- calc.genoprob(cross.dwn)
cross.dwn <- sim.geno(cross.dwn)
cross.dwn <- calc.errorlod(cross.dwn, err=0.01)
##cross.dwn <- read.cross(
## file = filename,
## format = "csv", genotypes=c("AA","AB","BB"), alleles=c("A","B"),
## estimate.map = FALSE
##)
##
cross.dwn_map <- est.map(cross.dwn, error.prob=0.025,
map.function="kosambi",
maxit=10000, tol=1e-6, sex.sp=FALSE,
verbose=FALSE, omit.noninformative=TRUE, n.cluster=6)
cross.dwn_map <- shiftmap(cross.dwn_map, offset=0)
cross.dwn <- replace.map(cross, cross.dwn_map)
filename <- paste0('/home/jmiller1/QTL_Map_Raw/ELR_final_map/ELR_gts_CHR',i,'_downsmpl_map')
write.cross(cross.dwn,chr=i,filestem=filename,format="csv")
}
################################################################################
################################################################################
################################################################################
### THIN MARKERS IF NEEDED #####################################################
mp <- as.numeric(gsub(".*:",'',markernames(cross)))
names(mp) <- markernames(cross)
mp <- list(get(i)=mp)
cross <- replace.map(cross,mp)
gts <- geno.table(cross)
weight <- 1 - gts$missing/rowSums(gts[,c(3:5)])*10
dwnsmpl <- pickMarkerSubset(pull.map(cross)[[1]],2000, weights=weight)
drops <- markernames(cross)[! markernames(cross) %in% dwnsmpl]
cross <- drop.markers(cross,drops)
cross <- calc.genoprob(cross)
cross <- sim.geno(cross)
cross <- removeDoubleXO(cross)
cross <- calc.errorlod(cross, err=0.01)
png(paste0('~/public_html/ELR_gts_c',i,'.png'),width=1000)
plotGeno(cross)
dev.off()
################################################################################
mpath <- '/home/jmiller1/QTL_Map_Raw/ELR_final_map'
write.table(markernames(cross.ss),file.path(mpath,'ER_markers_subst.table'))
fl <- file.path(mpath,'ELR_subsetted')
write.cross(cross.ss,filestem=fl,format="csv")
################################################################################
loglik <- err <- c(0.001, 0.005, 0.01, 0.015, 0.02)
for(i in seq(along=err)) {
cat(i, "of", length(err), "\n")
tempmap <- est.map(mapthis, error.prob=err[i])
loglik[i] <- sum(sapply(tempmap, attr, "loglik"))
}
lod <- (loglik - max(loglik))/log(10)
#### Further improve chr1
cross <- subset(cross.all,chr=1)
lots_missing <- names(sort(nmissing(cross))[sort(nmissing(cross)) > 25])
lots_xo <- names(sort(countXO(cross))[sort(countXO(cross)) > 100])
both <- intersect(lots_missing, lots_xo)
table(pull.geno(cross)[which(cross$pheno$ID=="ELR_10991"),])
table(pull.geno(cross)[which(cross$pheno$ID=="ELR_10974"),])
"ELR_10991", set all 3 to 2
"ELR_10871", set all 3 to 2
"ELR_10989" set all to 2
"ELR_10974" 1 and 3 markers after 16000 should be all 2
"ELR_10953" small region of transition from 1 to 2 to 3. change the 1/2 mix to 2
"ELR_10998" before 10000, change all to hets (or drop)
"ELR_10882" just before 10000, is 3, rest should be 2
"ELR_10969" after stretch of 3, change to 2
"ELR_10924" change all 3 to 2
"ELR_10981" change all to 2
"ELR_10990" change all to 2
"ELR_10980" change all to 2
"ELR_10967" change all to 2 until reigion of 3 starts to end
"ELR_10869" change all to 2 (really bad samp)
"ELR_10971" change all to 2 (really bad samp)
"ELR_11593" after stretch of 3, change to 2 for the rest
"ELR_11592" change all except stretch of 1
all_to_2 <- c("ELR_10991","ELR_10871","ELR_10989","ELR_10981","ELR_10990","ELR_10980","ELR_10967","ELR_10869","ELR_10971")
### ALL HET
for(id in all_to_2){
indv <- cross$pheno$ID==id
tozero <- which(cross$geno[[1]]$data[indv,]!=2)
cross$geno[[1]]$data[indv,tozero] <- 2
}
### 1 to 2
id <- "ELR_10974"
indv <- cross$pheno$ID==id
###pull.geno(cross)[indv,]
start <- which(names(pull.geno(cross)[indv,])=='1:31892692')
end <- length(names(pull.geno(cross)[indv,]))
cross$geno[[1]]$data[indv,c(start,end)] <- 2
### 1 to 2
id <- "ELR_10953"
indv <- cross$pheno$ID==id
pull.geno(cross)[indv,]
start <- which(names(pull.geno(cross)[indv,])=='1:14891665')
end <- which(names(pull.geno(cross)[indv,])=='1:20393972')
chng <- names(pull.geno(cross)[indv,c(start:end)]==1)
cross$geno[[1]]$data[indv,c(start,end)] <- 2
all_1_2 <- c("ELR_10974","ELR_10998","ELR_10882","ELR_10869","ELR_11593","ELR_11593","ELR_10969")
for(id in all_1_2){
indv <- cross$pheno$ID==id
tozero <- which(cross$geno[[1]]$data[indv,]==1)
cross$geno[[1]]$data[indv,tozero] <- 2
}
all_3_2 <- c("ELR_10998","ELR_10882","ELR_10869","ELR_11593","ELR_10924","ELR_11592")
for(id in all_1_2){
indv <- cross$pheno$ID==id
tozero <- which(cross$geno[[1]]$data[indv,]==3)
cross$geno[[1]]$data[indv,tozero] <- 2
}
### 3 to 2
id <- "ELR_10974"
indv <- cross$pheno$ID==id
###pull.geno(cross)[indv,]
start <- which(names(pull.geno(cross)[indv,])=='1:31564972')
end <- length(names(pull.geno(cross)[indv,]))
cross$geno[[1]]$data[indv,c(start:end)] <- 2
######## PLOT #####################
cross <- calc.genoprob(cross)
png(paste0('~/public_html/ELR_gts_fix_these_genos',indo,'.png'),height=1000,width=4000)
plotGeno(subset(cross,ind=both),cex=3)
abline(v=2.862)
dev.off()
########################
indo <- "ELR_10871"
table(pull.geno(cross)[which(cross$pheno$ID==indo),])
png(paste0('~/public_html/ELR_gts_c',indo,'.png'),width=3000)
plotGeno(subset(cross,ind=indo))
dev.off()
fake.f2 <- argmax.geno(cross, step=2, off.end=5, err=0.05)
png(paste0('~/public_html/ELR_gts_c',indo,'.png'),width=3000)
plotGeno(subset(cross,ind=both))
dev.off()
fake.f2 <- reduce
png(paste0('~/public_html/ELR_gts_fix_these_genos',indo,'.png'),height=1000,width=4000)
plotGeno(subset(cross,ind=both),cex=3)
dev.off()
fake.f2 <- fill.geno(fake.f2, method=c("argmax"))
par <- subset(cross.all,chr=1,ind="BLI_BI1124M")
png(paste0('~/public_html/ELR_gts_c',indo,'.png'),height=1000,width=4000)
plotGeno(cross.all,chr=1,cex=3)
dev.off()
|
8aa3326945038e1489e9b4a56c1d7a41db7ecdd3
|
34d9e4c3ebbbe98bf109420f81f44be70f5f72d0
|
/global.R
|
78b37f01d18e5710c9240bf317305e02364f6d49
|
[] |
no_license
|
YuanzhiQi/mta_turnstile_shiny_app
|
af48c656dd5996266174c10820f98f3f56c86b11
|
0117793d712a547bb923d43e2a48403d1e8b5b3d
|
refs/heads/master
| 2021-01-20T20:36:04.687178
| 2016-07-18T20:33:21
| 2016-07-18T20:33:21
| 63,631,256
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 182
|
r
|
global.R
|
library(shinydashboard)
library(shiny)
library(shinyjs)
library(stringr)
library(data.table)
library(leaflet)
library(DT)
library(ggplot2)
library(leaflet)
load("start.RData")
|
32571c1af3fceb2e9169f5582ae6d5b5e98aaed7
|
2d8409a80bdf7b6bd03d14cbade1659427198d51
|
/code/systematic_calibration/simsets_from_systematic.R
|
42a9d25a297cc942e7ee7d2009958bcd6d96a0e4
|
[] |
no_license
|
joeflack4/ending_hiv
|
1ba6d2d8d4494d10528b97d1a850a44b2cd41fe2
|
7b337e6f978ea0cf6eb17d1a5c55267106952008
|
refs/heads/master
| 2023-07-25T03:07:46.399438
| 2021-08-24T17:20:38
| 2021-08-24T17:20:38
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,371
|
r
|
simsets_from_systematic.R
|
source('code/systematic_calibration/systematic_settings.R')
source('code/interventions/interventions_for_simset.R')
source('code/targets/target_msas.R')
make.and.save.simset.for.location <- function(location,
full=T,
from.dir=file.path(SYSTEMATIC.ROOT.DIR, 'systematic_parallel'))
{
# Set up file
if (full)
dst.dir = file.path(SYSTEMATIC.ROOT.DIR, 'full_simsets')
else
dst.dir = file.path(SYSTEMATIC.ROOT.DIR, 'limited_simsets')
print("Loading mcmc")
mcmc = load.mcmc.from.dir(location, from.dir)
# Pull the simset
if (mcmc@n.iter==720)
additional.burn=220
else
additional.burn=250
print("Cutting simset")
if (full)
simset = extract.simset(mcmc, additional.burn=additional.burn, additional.thin=2)
else
simset = extract.simset(mcmc, additional.burn=additional.burn, additional.thin=10)
# simset = extract.simset(mcmc, additional.burn=additional.burn+20, additional.thin=16)
# Set up for interventions
print("Preparing simset to run interventions")
simset = prepare.simset.for.interventions(simset)
# Save and Return
print("Saving")
save(simset, file=file.path(dst.dir, paste0(location, '.Rdata')))
invisible(simset)
}
|
996ff0bd7a9b94f360044edd56086f8f9cb312cd
|
2bec5a52ce1fb3266e72f8fbeb5226b025584a16
|
/hutilscpp/tests/testthat/test-000.R
|
c24bbdfbebc5b317f7ece65e30f50214e3fea0ad
|
[] |
no_license
|
akhikolla/InformationHouse
|
4e45b11df18dee47519e917fcf0a869a77661fce
|
c0daab1e3f2827fd08aa5c31127fadae3f001948
|
refs/heads/master
| 2023-02-12T19:00:20.752555
| 2020-12-31T20:59:23
| 2020-12-31T20:59:23
| 325,589,503
| 9
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 140
|
r
|
test-000.R
|
if (startsWith(Sys.getenv("USERNAME"), "hugh")) {
options(hutilscpp.nThread = 10)
}
test_that("start", {
expect_true(TRUE)
})
|
b5d27521bb9e29aa72d02e27d041edc6235b652e
|
123fc737f01a88e58c6eb3ca7ad0e51e867ff434
|
/bigtime/R/directforecast.R
|
b23afda7faeb4245f27f46711eb12a2d6f301ac1
|
[] |
no_license
|
akhikolla/ClusterTests
|
e12e5486fc1a80609a956fcb7f432b47e8174b0f
|
1c7702ba4035511ffe7d4dd27e9021b6962718fe
|
refs/heads/master
| 2022-12-10T13:00:09.654663
| 2020-09-14T19:12:37
| 2020-09-14T19:12:37
| 295,513,576
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,212
|
r
|
directforecast.R
|
#' Function to obtain h-step ahead direct forecast based on estimated VAR, VARX or VARMA model
#' @param fit Fitted sparse VAR, VARX or VARMA model.
#' @param model Type of model that was estimated: VAR, VARX or VARMA.
#' @param h Desired forecast horizon.
#' @export
#' @return Vector of length k containing the h-step ahead forecasts for the k time series.
#' @examples
#' data(Y)
#' VARfit <- sparseVAR(Y) # sparse VAR
#' VARforecast <- directforecast(fit=VARfit, model="VAR", h=1)
directforecast <- function(fit, model, h=1){
if(h<=0){
stop("Forecast horizon h must be a strictly positive integer.")
}
if(!is.element(model, c("VAR", "VARX", "VARMA"))){
stop("The model needs to be either VAR, VARX or VARMA")
}
# Preliminaries
k <- ncol(fit$Y)
if(model=="VAR"){
Y <- fit$Y
p <- fit$p
Phi <- fit$Phihat
phi0 <- fit$phi0hat
if(is.null(Phi) | is.null(phi0)){
stop("Please provide a fitted VAR model")
}
VARFIT <- HVARmodel(Y=Y, p=p, h=h)
if(k==1){
VARFIT$fullY <- as.matrix(VARFIT$fullY)
Zpred <- c(c(t(VARFIT$fullY[nrow(VARFIT$fullY):(nrow(VARFIT$fullY)-p+1),1:ncol(VARFIT$fullY)])))
Phi <- matrix(Phi, nrow=1)
}else{
Zpred <- c(c(t(VARFIT$fullY[nrow(VARFIT$fullY):(nrow(VARFIT$fullY)-p+1),1:ncol(VARFIT$fullY)])))
}
Ypred <- matrix(Zpred, nrow=VARFIT$p, byrow=T)
Yhat <- phi0
for(i.p in 1:p){
if(k==1){
Yhat <- Yhat + matrix(Phi[, ((i.p-1)*k+1):(i.p*k) ], nrow=k)%*%Ypred[i.p,]
}else{
Yhat <- Yhat + Phi[, ((i.p-1)*k+1):(i.p*k) ]%*%Ypred[i.p,]
}
}
}
if(model=="VARX"| model=="VARMA"){
if(model=="VARX"){
m <- ncol(fit$X)
Y <- fit$Y
U <- fit$X
p <- fit$p
q <- fit$s
Phi <- fit$Phihat
Theta <- fit$Bhat
phi0 <- fit$phi0hat
if(is.null(Phi) | is.null(Theta) | is.null(phi0)){
stop("Please provide a fitted VARX model")
}
}
if(model=="VARMA"){
m <- ncol(fit$U)
Y <- fit$Y
U <- fit$U
p <- fit$VARMAp
q <- fit$VARMAq
Phi <- fit$Phihat
Theta <- fit$Thetahat
phi0 <- fit$phi0hat
if(is.null(Phi) | is.null(Theta) | is.null(phi0)){
stop("Please provide a fitted VARMA model")
}
}
VARXFIT <- HVARXmodelFORECAST(Y=Y, X=U, p=p, s=q, h=h)
if(k==1){
VARXFIT$fullY <- as.matrix(VARXFIT$fullY)
Phi <- matrix(Phi, nrow=1)
Theta <- matrix(Theta, nrow=1)
}
Zpred <- c(c(t(VARXFIT$fullY[nrow(VARXFIT$fullY):(nrow(VARXFIT$fullY)-p+1), 1:ncol(VARXFIT$fullY)])))
Ypred <- matrix(Zpred, nrow=VARXFIT$p, byrow=T)
if(m==1){
VARXFIT$fullXRESP <- as.matrix(VARXFIT$fullXRESP)
}
Xpred <- c(c(t(VARXFIT$fullXRESP[nrow(VARXFIT$fullXRESP):(nrow(VARXFIT$fullXRESP)-q+1),1:ncol(VARXFIT$fullXRESP)])))
Upred <- matrix(Xpred,nrow=VARXFIT$s,byrow=T)
Yhat <- phi0
for(i.p in 1:p){
if(k==1){
Yhat <- Yhat + matrix(Phi[, ((i.p-1)*k+1):(i.p*k) ], nrow=k)%*%Ypred[i.p,]
}else{
Yhat <- Yhat + Phi[, ((i.p-1)*k+1):(i.p*k) ]%*%Ypred[i.p,]
}
}
for(i.q in 1:q){
if(m==1){
Yhat <- Yhat + matrix(Theta[, ((i.q-1)*m+1):(i.q*m) ], nrow=k)%*%Upred[i.q,]
}else{
Yhat <- Yhat + Theta[, ((i.q-1)*m+1):(i.q*m) ]%*%Upred[i.q,]
}
}
}
return("Yhat"=c(Yhat))
}
HVARXmodelFORECAST<-function(Y, X, p, s, h=1){
# Preliminaries
k <- ncol(Y)
kX <-ncol(X)
# Response and predictor matrices
m <- max(s, p)
DATAY <- embed(Y, dimension=m+h)
fullY <- DATAY[, 1:k]
fullZNEW <- as.matrix(as.matrix(DATAY[,-c(1:k)])[,(1:((p+h-1)*k))])
fullZ <- t(fullZNEW[,(ncol(fullZNEW)-k*p+1):ncol(fullZNEW)])
DATAX <- embed(X,dimension=m+h)
fullXRESP <- DATAX[,1:kX]
fullXNEW <- as.matrix(as.matrix(DATAX[,-c(1:kX)])[,(1:((s+h-1)*kX))])
fullX <- t(fullXNEW[,(ncol(fullXNEW)-kX*s+1):ncol(fullXNEW)])
out <- list("fullY"=fullY, "fullX"=fullX, "fullZ"=fullZ, "k"=k, "kX"=kX, "p"=p, "s"=s,
"fullXRESP"=fullXRESP)
}
|
b358051eaedb7a99e6a874c893e932717a42f875
|
c8d3eac72924cc8952e6bdf77497cd2c571194df
|
/fluodilution/R/proliferation-cyton.R
|
352783bf17505663bd28a770d5ab50fefe665907
|
[
"MIT"
] |
permissive
|
hchauvin/fluodilution
|
af57ec858aefe41ae4ad92377ba80ec699d8d2b4
|
1fd52c705edfd3a0951152511f3d1b54b8762f4a
|
refs/heads/master
| 2021-04-03T08:19:23.059053
| 2019-01-11T16:05:03
| 2019-01-11T16:05:03
| 125,096,130
| 0
| 0
|
MIT
| 2019-09-24T21:52:03
| 2018-03-13T18:22:18
|
R
|
UTF-8
|
R
| false
| false
| 4,544
|
r
|
proliferation-cyton.R
|
# Copyright (c) 2015-2018 Hadrien Chauvin
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#####################
#' @export
#' @rdname proliferation
#'
#' @include proliferation-.R
fd_proliferation_cyton <- fd_proliferation(
name = "cyton",
new = function (mgen = 8L, length.out = 500L, ...) {
list(
categories="One",
mgen=mgen,
length.out = length.out,
...
)
},
model = function (self, theta, times, mgen=NULL) {
if (is.null(mgen)) mgen <- self$mgen
value <- theta[[1L]]
ret <- model_cyton(value, times, mgen,
length.out = self$length.out)
ret$Ns <- cbind(ret$Ns)
colnames(ret$Ns) <- self$categories
ret$Ns_lost <- cbind(ret$Ns_lost)
colnames(ret$Ns_lost) <- self$categories
ret$live_pop <- setNames(list(ret$live_pop), self$categories)
ret$lost_pop <- setNames(list(ret$lost_pop), self$categories)
return (ret)
},
constrain = function (self, object, type) {
# nolint start
catcstr(
switch(type,
start = ~ pro:all:({res0 <- 0.2; res <- 0.2} +
(g/g0):{mm <- 5; delta <- 1; ss <- 0.5} +
(f/f0):{mm <- 5; delta <- 0.2; ss <- 0.5}),
lower = ~ pro:all:({res0 <- 0; res <- 0} +
(g/g0/f/f0):{mm <- 0.5; delta <- 0.1; ss <- 0.1} +
(f/g):{delta <- 0.01}),
upper = ~ pro:all:({res0 <- 1; res <- 1} +
(g/g0/f/f0):{mm <- 5; delta <- 1; ss <- 0.5})
),
object) %>% expand_categories(self$categories)
# nolint end
}
)
# Models are implemented on the side for easy customisation
model_cyton <- function (params, times = c(2, 4, 6, 8, 12, 24), mgen = 15L,
length.out = 500L) {
etimes <- sort(unique(c(times, seq(0, max(times), length.out = length.out))))
outtimes <- which(etimes %in% times)
# Precalculate F_g and G_f
dt <- c(etimes[-1], etimes[length(etimes)]) - etimes
F0dist <- fd_pack_dist(params$f0)
G0dist <- fd_pack_dist(params$g0)
F0_g0 <- (1 - fd_pdist(etimes, F0dist)) * fd_ddist(etimes, G0dist)
G0_f0 <- (1 - fd_pdist(etimes, G0dist)) * fd_ddist(etimes, F0dist)
F0_g0[1] <- 0
G0_f0[1] <- 0
Fdist <- fd_pack_dist(params$f)
Gdist <- fd_pack_dist(params$g)
F_g_dt <- (1 - fd_pdist(etimes, Fdist)) * fd_ddist(etimes, Gdist, dt)
G_f_dt <- (1 - fd_pdist(etimes, Gdist)) * fd_ddist(etimes, Fdist, dt)
F_g_dt[1] <- 0
G_f_dt[1] <- 0
# Rates (r: division rate; d: death rate)
r <- matrix(NA, ncol=length(etimes), nrow=mgen + 1)
d <- matrix(NA, ncol=length(etimes), nrow=mgen + 1)
r[1, ] <- (1 - params$res0) * G0_f0
d[1, ] <- (1 - params$res0) * F0_g0
# Ns
N <- matrix(NA, ncol=length(etimes), nrow=mgen + 1)
N_lost <- matrix(NA, ncol=length(etimes), nrow=mgen + 1)
N_lost[1, ] <- cumsum(d[1, ] * dt)
N[1, ] <- pmax(1 - cumsum(r[1, ] * dt) - N_lost[1, ], 0)
for (i in 1:mgen) {
r[i + 1, ] <- 2 * (1 - params$res) * cytonConv(r[i, ], G_f_dt)
d[i + 1, ] <- 2 * (1 - params$res) * cytonConv(r[i, ], F_g_dt)
N_lost[i + 1, ] <- cumsum(d[i + 1, ] * dt)
N[i + 1, ] <-
pmax(cumsum((2 * r[i, ] - r[i + 1, ]) * dt) - N_lost[i + 1, ], 0)
}
N_out <- N[, outtimes]
N_lost_out <- N_lost[, outtimes]
Ns <- colSums(N_out)
Ns_lost <- colSums(N_lost_out)
live_pop <- t(N_out) / Ns
live_pop[!is.finite(live_pop)] <- 0
lost_pop <- t(N_lost_out) / Ns_lost
lost_pop[!is.finite(lost_pop)] <- 0
list(
Ns = Ns,
Ns_lost = Ns_lost,
live_pop = live_pop,
lost_pop = lost_pop
)
}
|
396631c3f04563a88c451370af15f6509322df3c
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/assertive.matrices/examples/is_symmetric_matrix.Rd.R
|
07854e0910f671bd522d8951b305f48c3cc4e605
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 349
|
r
|
is_symmetric_matrix.Rd.R
|
library(assertive.matrices)
### Name: assert_is_symmetric_matrix
### Title: Is the input a symmetric matrix?
### Aliases: assert_is_symmetric_matrix is_symmetric_matrix
### ** Examples
m <- diag(3); m[3, 1] <- 1e-100
assert_is_symmetric_matrix(m)
#These examples should fail.
assertive.base::dont_stop(assert_is_symmetric_matrix(m, tol = 0))
|
e6972e2d40160e9d4dad03f646e4892445482a8b
|
552d910aabda4755d6d831e1063e503015037603
|
/R/admm.iters.r
|
72ea8867c6a94e0378d76c9148b2546ae5e19a7c
|
[] |
no_license
|
cran/JGL
|
da88bdca245751169d574e27c0fd846b70594c4c
|
bca5ba36a8070f7b391bbd7a9d69c5de6e9d6007
|
refs/heads/master
| 2021-05-16T03:11:20.349474
| 2018-11-30T22:40:15
| 2018-11-30T22:40:15
| 17,680,067
| 3
| 5
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,779
|
r
|
admm.iters.r
|
### ADMM for FGL:
admm.iters = function(Y,lambda1,lambda2,penalty="fused",rho=1,rho.increment=1,weights,penalize.diagonal,maxiter = 1000,tol=1e-5,warm=NULL)
{
K = length(Y)
p = dim(Y[[1]])[2]
n=weights
ns = c(); for(k in 1:K){ns[k] = dim(Y[[k]])[1]}
S = list(); for(k in 1:K){S[[k]] = cov(Y[[k]])*(ns[k]-1)/ns[k]}
# initialize theta:
theta = list()
for(k in 1:K){theta[[k]] = diag(1/diag(S[[k]]))}
# initialize Z:
Z = list(); for(k in 1:K){Z[[k]]=matrix(0,p,p)}
# initialize W:
W = list(); for(k in 1:K) {W[[k]] = matrix(0,p,p) }
# initialize lambdas: (shouldn't need to do this if the function is called from the main wrapper function, JGL)
lam1 = penalty.as.matrix(lambda1,p,penalize.diagonal=penalize.diagonal)
if(penalty=="fused") {lam2 = penalty.as.matrix(lambda2,p,penalize.diagonal=TRUE)}
if(penalty=="group") {lam2 = penalty.as.matrix(lambda2,p,penalize.diagonal=penalize.diagonal)}
# iterations:
iter=0
diff_value = 10
while((iter==0) || (iter<maxiter && diff_value > tol))
{
# reporting
# if(iter%%10==0)
if(FALSE)
{
print(paste("iter=",iter))
if(penalty=="fused")
{
print(paste("crit=",crit(theta,S,n=rep(1,K),lam1,lam2,penalize.diagonal=penalize.diagonal)))
print(paste("crit=",crit(Z,S,n=rep(1,K),lam1,lam2,penalize.diagonal=penalize.diagonal)))
}
if(penalty=="group"){print(paste("crit=",gcrit(theta,S,n=rep(1,K),lam1,lam2,penalize.diagonal=penalize.diagonal)))}
}
# update theta:
theta.prev = theta
for(k in 1:K){
edecomp = eigen(S[[k]] - rho*Z[[k]]/n[k] + rho*W[[k]]/n[k])
D = edecomp$values
V = edecomp$vectors
D2 = n[k]/(2*rho) * ( -D + sqrt(D^2 + 4*rho/n[k]) )
theta[[k]] = V %*% diag(D2) %*% t(V)
}
# update Z:
# define A matrices:
A = list()
for(k in 1:K){ A[[k]] = theta[[k]] + W[[k]] }
if(penalty=="fused")
{
# use flsa to minimize rho/2 ||Z-A||_F^2 + P(Z):
if(K==2){Z = flsa2(A,rho,lam1,lam2,penalize.diagonal=TRUE)}
if(K>2){Z = flsa.general(A,rho,lam1,lam2,penalize.diagonal=TRUE)} # the option to not penalize the diagonal is exercised when we initialize the lambda matrices
}
if(penalty=="group")
{
# minimize rho/2 ||Z-A||_F^2 + P(Z):
Z = dsgl(A,rho,lam1,lam2,penalize.diagonal=TRUE)
}
# update the dual variable W:
for(k in 1:K){W[[k]] = W[[k]] + (theta[[k]]-Z[[k]])}
# bookkeeping:
iter = iter+1
diff_value = 0
for(k in 1:K) {diff_value = diff_value + sum(abs(theta[[k]] - theta.prev[[k]])) / sum(abs(theta.prev[[k]]))}
# increment rho by a constant factor:
rho = rho*rho.increment
}
diff = 0; for(k in 1:K){diff = diff + sum(abs(theta[[k]]-Z[[k]]))}
out = list(theta=theta,Z=Z,diff=diff,iters=iter)
return(out)
}
|
25ce46b2305328aee349c9d449db3d8d44ec277a
|
1ae92230540e7f3bfcc2ae0e81682f3a406ab745
|
/poisson-R/main.R
|
24e48239645953478909353732892f6a2b01d7f0
|
[] |
no_license
|
btatkinson/Poisson
|
0104bd678dade20c9181326a1f4a030b9a358d8c
|
daf3aaa4b6f47177d393840c054e5c49a8a433fe
|
refs/heads/master
| 2020-12-08T06:36:36.814724
| 2020-01-27T15:34:54
| 2020-01-27T15:34:54
| 232,908,851
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,569
|
r
|
main.R
|
library(tidyverse)
library(tidyr)
library(dplyr)
library(na.tools)
seasons <- seq(2014, 2019);
# iteration count
i = 0
df <- NULL;
for(season in seasons){
# add iteration count
i = i+1;
# load play by play data
reg_pbp <- read_csv(sprintf("../data/nflscrapR/play_by_play_data/regular_season/reg_pbp_%s.csv",season));
# only grab needed columns
reg_pbp <- reg_pbp[,c("passer_player_name","game_id","pass_touchdown")];
# same person, Josh Allen, has two names-- Jos.Allen and J.Allen
reg_pbp$passer_player_name <- sub("Jos.Allen", "J.Allen", reg_pbp$passer_player_name);
# total pass touchdowns in season
sea_td <- reg_pbp %>%
group_by(passer_player_name) %>%
summarise(season_pass_touchdown = sum(pass_touchdown));
# total games played in season
sea_games <- reg_pbp %>%
group_by(passer_player_name) %>%
summarise(season_game_count = n_distinct(game_id));
# merge games and pass touchdowns
sea_df <- merge(sea_td,sea_games,by="passer_player_name");
# drop small sample QBs
sea_df <- sea_df %>%
filter(season_game_count > 7);
# divide to get touchdowns per game
sea_df <- transform(sea_df, td_pg = season_pass_touchdown / season_game_count);
sea_df <- sea_df %>% drop_na();
# add column denoting season
sea_df$season = season;
# concatenate with other seasons
if(i==1){
df <- sea_df;
}else{
df <- rbind(df, sea_df);
};
# sort values
df <- df[order(-df$td_pg),];
}
print(df);
library(ggplot2)
library(sandwich)
library(msm)
# pasted from python notebook
#There were two defensive projections that stood out to me as bogus, and so I decided to change them.
#Arizona hired Kliff Kingsbury during the offseason, and he installed the fastest offense in the league. He also was not hired because of his defensive prowess. As a result, Arizona would allow significantly more possessions than in the past, and likely either 1) succeed on offense enough to force the other team to pass or 2) fail hard, and let the opponent rack up scores. For this reason I decided to add to their expectation.
#On the other hand, the 49ers have had two straight seasons of devastating injuries, had a historically low, 5 standard deviation off interception rate (2 all year in 2018), and drafted Nick Bosa, a defensive stud, second overall. Having them last in projected pass defense did not seem like a good idea. I subtracted from their expectation.
# qb priors
# load fantasypros data
qb_proj <- read_csv("../data/2019_projections.csv")
# defense priors
|
925c0d43a5abadf1850e6bf3e64cd300347ce59f
|
ffeea4267d2ca45fb9c8be1b73cfbd77d5bbf3ae
|
/rmarkdown/acunha.R
|
e642cad43330e2ed5820047ee8bef398847d89d6
|
[] |
no_license
|
charlenopires/houseofcunha
|
8d25fe7f23bdd283889dcd66ab7872d652deeffb
|
eccdb092f2f737dc8aff58edc6cf5393edcd622e
|
refs/heads/master
| 2021-01-18T08:56:32.378639
| 2015-07-23T02:20:25
| 2015-07-23T02:20:25
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,110
|
r
|
acunha.R
|
library(ggplot2)
library(dplyr)
votos <- read.csv("votacoes//votacao.csv", strip.white=TRUE)
votos <- filter(votos, voto %in% c("sim", "não"))
votos$voto <- droplevels(votos$voto)
votos$num_pro <- factor(votos$num_pro)
votos$num_pro <- paste0(votos$num_pro, "-", votos$id_votacao)
votos$nome <- paste0(votos$nome, " (", votos$partido, ")")
ativos <- votos %>%
group_by(nome) %>%
summarise(c = n()) %>%
filter(c >= 31) %>%
select(nome)
votos <- filter(votos, nome %in% ativos$nome)
votos = mutate(votos, concorda = ifelse(as.character(voto) == as.character(cunha), 1, 0) )
acunhamento = votos %>%
group_by(nome, partido) %>%
summarise(prop = sum(concorda) / n())
ac = acunhamento %>%
mutate(nivel = ifelse(prop >= 0.70,'Cunha', ifelse(prop <= 0.6, 'Acunha','Muro'))) %>%
ungroup() %>%
arrange(desc(prop))
### PLOTS ###
png(file="tops-cunhas.png", height = 850, width = 650)
tops = filter(ac, prop >= 0.9 , prop != 'NA')
ggplot(tops, aes(reorder(nome,prop), prop*100)) +
geom_point(alpha = 0.9, size = 4, colour = "darkred") +
theme_bw() +
theme(axis.title = element_text(color="#666666", face="bold", size=16),
axis.text = element_text(size=14),
axis.line = element_blank()) +
xlab("") + ylab("Concordância com Cunha (%)") +
coord_flip()
dev.off()
to_plot = mutate(ac, cat_partido = ifelse(partido == 'pt' | partido == 'pmdb' | partido == 'psol'
| partido == 'psdb' , as.character(partido), 'outros'))
to_plot$cat_partido <- factor(to_plot$cat_partido,
levels = c("psol", "psdb", "pt", "pmdb", "outros"),
ordered = T)
require(scales)
png(file="cunhometro-por-partido.png", height = 450, width = 600)
ggplot(to_plot, aes(cat_partido, prop * 100, colour = cat_partido) ) +
geom_point( position = position_jitter(width = 0.2), size = 5 ) +
scale_colour_manual(values = c(alpha("#E69F00", 0.6),
alpha("#0066CC", 0.6),
alpha("#FF3300", 0.6),
alpha("darkred", 0.6),
alpha("grey70", .3)),
guide = guide_legend(title = "partido",
override.aes = list(alpha = 1, size = 4))) +
theme(axis.title = element_text(color="#666666", face="bold", size=18),
axis.text.y = element_text(size=18),
axis.line = element_blank()) +
xlab("Partido") + ylab("Concordância com Cunha (%)") +
coord_flip() +
theme_bw()
dev.off()
# top do bem
png(file="tops-acunhas.png", height = 850, width = 650)
tops = filter(ac, prop < 0.4 , prop != 'NA')
ggplot(tops, aes(reorder(nome,prop), prop*100)) +
geom_point(alpha = 0.9, size = 4, colour = "darkgreen") +
theme_bw() +
theme(axis.title = element_text(color="#666666", face="bold", size=16),
axis.text = element_text(size=14),
axis.line = element_blank()) +
xlab("") + ylab("Concordância com Cunha (%)") +
coord_flip()
dev.off()
|
827fb5a868e16e35a9e9d57fd8897110c63e8d11
|
7d120b449fe91d4f49fc68531cf739d45b67edb9
|
/src/STATS_FIRTHLOG.R
|
6e68df4c977e5e515522382c18e4579c4369e9be
|
[
"Apache-2.0",
"GPL-3.0-only"
] |
permissive
|
IBMPredictiveAnalytics/STATS_FIRTHLOG
|
29f50b17b27af1d95ac09a2aec2260e44a583eea
|
05dcfe9a3a44f05c0df9a9a1236b0833fb6978f0
|
refs/heads/master
| 2022-11-05T07:08:00.835410
| 2022-10-21T01:32:27
| 2022-10-21T01:32:27
| 38,112,153
| 0
| 0
|
Apache-2.0
| 2022-04-27T13:41:29
| 2015-06-26T13:18:24
|
R
|
UTF-8
|
R
| false
| false
| 20,123
|
r
|
STATS_FIRTHLOG.R
|
#/***********************************************************************
# * Licensed Materials - Property of IBM
# *
# * IBM SPSS Products: Statistics Common
# *
# * (C) Copyright IBM Corp. 2015, 2019
# *
# * US Government Users Restricted Rights - Use, duplication or disclosure
# * restricted by GSA ADP Schedule Contract with IBM Corp.
# ************************************************************************/
# author__ = "SPSS, JKP"
# version__ = "1.1.1"
# History
# 22-apr-2014 Original Version
# 26-jun-2014 Adapt to incompatible changes in logistf vresion 1.20
# 19-jun-2015 Support for weights and reference category specifications for factors
# 09-dec-2019 Add full and restricted likelihoods to output
helptext="STATS FIRTHLOG DEPENDENT=variable
INDEP=list of variables
/OPTIONS FIRTH=YES or NO ALPHA=sig PPL=PROFILE or WALD
MAXSTEP=number MAXHS=integer MAXIT=integer
LCONV=number XCONV=number
/OUTPUT PLOT=list of integers
/SAVE DATASET=dataset name.
Compute Firth logistic regression.
Example:
STATS FIRTHLOG DEPENDENT=y INDEP=x1 x2 x3
/SAVE DATASET=casewiseresults.
DEPENDENT and INDEP list the dependent (binary) and
independent variables. The dependent variable must
have a scale measurement level and have values 0 and 1.
All other parameters are optional.
FIRTH=YES specifies the use of Firth's penalized maximum likelihood
method. NO specifies standard maximum likelihood.
PPL=PROFILE the use of the profile penalized log likelihood for
the confidence intervals and tests. WALD specifies WALD tests.
CONF specifies the confidence level. It must be a number between
50 and 100.
MAXSTEP through XCONV specify iteration and convergence
criteria. MAXIT is the maximum number of iterations, MAXSTEP
is the maximum step size in beta values per iteration. MAXHS is the
maximum number of step halving in one iteration. LCONV is
the log likelihood convergence criterion and XCONV is the
change criterion for values in the beta vector.
PLOT specifies plots of the likelihood against the parameter value
for selected independent variables. The variables are identified
by their number in the independent variables list starting from 1.
An index greater than the number of variables is ignored. Categorical
variables are not plotted.
DATASET is the name for saving casewise results, which include
the predicted probability of the 1 value and the hat matrix
diagonal element for each case. The first column is the
input case number. The dataset is created even if
convergence is not achieved.
Notes:
Cases with missing data are always deleted.
SPSS weights are not honored.
With R3.1 and logistf 1.21, the hat matrix is not available.
STATS FIRTHLOG /HELP. prints this information and does nothing else.
"
### MAIN ROUTINE ###
dofirth = function(dep, indep=NULL, firth=TRUE, conf=.95, ppl=TRUE,
dataset=NULL, refcatfactors=NULL, refcats=NULL,
maxstep=NULL, maxhs=NULL, maxit=NULL, lconv=NULL, gconv=NULL, xconv=NULL,
plotlist=NULL) {
#estimate regression discontinuity model
setuplocalization("STATS_FIRTHLOG")
# A warnings proc name is associated with the regular output
# (and the same omsid), because warnings/errors may appear in
# a separate procedure block following the regular output
procname=gtxt("Firth Logistic Regression")
warningsprocname = gtxt("Firth Logistic Regression: Warnings")
omsid="STATSFIRTH"
warns = Warn(procname=warningsprocname,omsid=omsid)
tryCatch(library(logistf), error=function(e){
warns$warn(gtxtf("The R %s package is required but could not be loaded.", "logistf"),dostop=TRUE)
}
)
logistfversion = as.numeric(packageDescription("logistf", fields="Version"))
controllist = makecontrollist(list(maxit=maxit, maxhs=maxhs, maxstep=maxstep, lconv=lconv,
gconv=gconv, xconv=xconv))
alpha=(100. - conf)/100.
wtvar = spssdictionary.GetWeightVariable()
allargs = as.list(environment())
if (!is.null(dataset)) {
alldatasets = tolower(spssdata.GetDataSetList())
if (tolower(dataset) %in% alldatasets) {
warns$warn(gtxt("The output dataset name is already in use."), dostop=TRUE)
}
if ("*" %in% alldatasets) {
warns$warn(gtxt("The active dataset must have a name to create an output dataset"),
dostop=TRUE)
}
}
dta = getAndConvertData(dep, indep, refcatfactors, refcats, wtvar, warns)
indep = names(dta)[-1]
if (!is.null(wtvar)) {
indep = indep[1:(length(indep)-1)]
allargs$wtsum = sum(dta[wtvar])
} else {
allargs$wtsum = nrow(dta)
}
frml = buildfrml(dep, indep)
#dta = spssdata.GetDataFromSPSS(alldata, missingValueToNA=TRUE, factorMode="levels")
# validate the dependent variable
if (is.factor(dta[[1]])) {
warns$warn(gtxt("The dependent variable must have a scale measurement level"),
dostop=TRUE)
}
depdist = table(dta[[1]], useNA="no")
depvalues=attr(depdist, "dimnames")[[1]]
if (length(depvalues) != 2 || length(intersect(depvalues, c(0,1))) != 2) {
warns$warn(gtxt("The dependent variable has values other than 0 and 1 or is constant"),
dostop=TRUE)
}
allargs["ndtarows"] = nrow(dta)
# if saving a dataset, remove cases with missing values but record the case numbers
if (!is.null(dataset)) {
cc = complete.cases(dta)
casenumbers = (1:nrow(dta))[cc]
dta = dta[cc,]
} else {
casenumbers = NULL
}
arglist = list(formula=frml, data=dta, firth=firth, pl=ppl,
alpha=alpha, control=controllist)
if (!is.null(wtvar)) {
arglist$weights = dta[[wtvar]]
}
res = tryCatch(do.call(logistf, arglist),
error = function(e) {
warns$warn(e$message, dostop=TRUE)
return(NULL)
}
)
displayresults(allargs, dta, res, controllist, warns)
if (!is.null(dataset)) {
makedataset(allargs, res, casenumbers, warns)
}
}
getAndConvertData = function(dep, indep, refvars, refvalues, wtvar, warns) {
# get the dependent, independent, and weight variables
# augmented by any factors listed in refcatfactors
# dep is the dependent variable name
# indep is the list of independent variables, which can include factors
# refvars lists the factors that have a specified reference category
# refcats list the the reference categories
# wtvar is the name of the weight variable
# warns is the warning class
if (is.null(indep) && is.null(refvars)) {
warns$warn(gtxt("At least one predictor is required"), dostop=TRUE)
}
if (is.null(refvars)) {
dta = spssdata.GetDataFromSPSS(c(dep, indep, wtvar),
missingValueToNA=TRUE, factorMode="levels")
} else {
if (length(refvars) != length(refvalues)) {
warns$warn(gtxt("The reference category list is invalid"), dostop=TRUE)
}
# reconstruct independent list to include any new factors
# must be cased as original
lowerindep = tolower(indep)
lowerrefvars = tolower(refvars)
allindep = union(lowerindep, lowerrefvars)
newindep = setdiff(allindep, lowerindep)
newindep = match(newindep, lowerrefvars)
indep = c(indep, refvars[newindep]) # might not be any new indep
allvars = c(dep, indep, wtvar)
dta = spssdata.GetDataFromSPSS(allvars, missingValueToNA=TRUE, factorMode="levels")
i = 1
for (v in refvars) {
if (!is.factor(dta[[v]])) {
warns$warn(gtxtf(
"A variable for which a reference category was specified is not categorical: %s",
v), dostop=TRUE)
}
lev = levels(dta[[v]])
m = match(refvalues[[i]], lev)
if (!is.na(m)) {
dta[v] = factor(dta[[v]], levels=c(refvalues[[i]], lev[-m]))
}
i = i + 1
}
}
if (!is.null(wtvar)) {
if (is.factor(dta[[wtvar]])) {
warns$warn(gtxt("The weight variable must have a scale measurement level"),
dostop=TRUE)
}
}
return(dta)
}
buildfrml = function(dep, indep) {
# Return formula expression as formula object
# dep is the name of dependent variable
# indep is the list of names of the independent variables
# warns is the error message object
cov = paste(indep, collapse="+")
frml = paste(dep, "~", cov, collapse=" ")
return(as.formula(frml))
}
makecontrollist = function(alist) {
# return control list with null items removed
alist = alist[!sapply(alist, is.null)]
return(do.call(logistf.control, alist))
}
displayresults = function(allargs, dta, res, controllist, warns) {
# Produce pivot tables and charts
StartProcedure(allargs[["procname"]], allargs[["omsid"]])
summarylabels=list(
gtxt("Dependent Variable"),
gtxt("Conf. Interval Type"),
gtxt("Conf. Interval Level (%)"),
gtxt("Estimation Method"),
gtxt("Output Dataset"),
gtxt("Full Likelihood"),
gtxt("Restricted Likelihood"),
gtxt("Likelihood Ratio Test"),
gtxt("Degrees of Freedom"),
gtxt("Significance"),
gtxt("Number of Complete Cases"),
gtxt("Cases with Missing Data"),
gtxt("Weight Variable"),
gtxt("Number of Iterations"),
gtxt("Convergence Status"),
gtxt("Last Log Likelihood Change"),
gtxt("Maximum Last Beta Change")
)
lrt = -2*(res$loglik[1] - res$loglik[2])
if (is.null(res$weights)) {
missingcases = allargs$wtsum - res$n
} else {
missingcases = allargs$wtsum - sum(res$weights, na.rm=TRUE)
}
summaryvalues = list(
allargs[["dep"]],
ifelse(allargs[["ppl"]], gtxt("Profile penalized log likelihood"),
gtxt("Wald")),
allargs[["conf"]],
ifelse(allargs[["firth"]], gtxt("Firth penalized maximum likelihood"),
gtxt("maximum likelihood")),
ifelse(is.null(allargs[["dataset"]]), gtxt("--NA--"), allargs[["dataset"]]),
round(res$loglik[1], 4),
round(res$loglik[2], 4),
round(lrt, 4),
res$df,
1. - pchisq(lrt, res$df),
res$n,
missingcases,
ifelse(is.null(allargs[["wtvar"]]), gtxt("--NA--"), allargs[["wtvar"]]),
res$iter,
ifelse(res$conv[[1]] > controllist[["lconv"]] ||
res$conv[[3]] > controllist[["xconv"]], gtxt("FAILED"), gtxt("Converged")),
res$conv[[1]],
res$conv[[3]]
)
names(summaryvalues) = summarylabels
summarydf = data.frame(cbind(summaryvalues))
colnames(summarydf) = gtxt("Values")
spsspivottable.Display(summarydf, title=gtxt("Firth Logistic Regression Summary"),
templateName="STATSFIRTHSUMMARY",
caption=gtxt("Results computed by R logistf package"),
isSplit=FALSE,
format=formatSpec.Count
)
if (!is.null(allargs$refcatfactors)) {
spsspivottable.Display(
data.frame(cbind(allargs$refcats)),
rowlabels=allargs$refcatfactors,
title=gtxt("Specified Reference Catgories"),
rowdim=gtxt("Factor"),
hiderowdimtitle=FALSE,
collabels=gtxt("Level"),
templateName="STATSFIRTHREFCATS"
)
}
ddf = data.frame(res$coef, sqrt(diag(res$var)), res$ci.lower, res$ci.upper,
qchisq(1 - res$prob, 1), res$prob)
names(ddf) = c(gtxt("Coefficient"), gtxt("Std. Error"),
gtxt("Lower CI"), gtxt("Upper CI"), gtxt("Chi-Square"), "Sig.")
ddf[sapply(ddf, is.infinite)] = "."
spsspivottable.Display(ddf, title=gtxt("Coefficients"),
outline=gtxt("Coefficients"),
templateName="STATSFIRTHCOEF", isSplit=FALSE,
caption=gtxtf("Dependent Variable: %s", allargs[["dep"]])
)
if (!is.null(allargs[["plotlist"]])) {
nindep = length(allargs[["indep"]])
assign("allargs", allargs, envir=.GlobalEnv)
# plots do not work for categorical predictors
for (var in allargs[["plotlist"]]) {
if (var <= nindep) {
vname = allargs[["indep"]][[var]]
if (!is.factor(dta[[vname]])) {
if (allargs$logistfversion < 1.20) {
tryCatch(
logistfplot(formula=allargs[["frml"]], data=dta,
which=as.formula(paste("~",vname, "-1")), firth=allargs[["firth"]],
alpha = allargs[["alpha"]], control=controllist),
error=function(e) {warns$warn(e$message, dostop=TRUE)}
)
}
else {
tryCatch(
plot(profile(res, variable=vname,
alpha=allargs[["alpha"]], control=controllist)),
error=function(e) {warns$warn(e$message, dostop=TRUE)}
)
}
}
}
}
rm("allargs", envir=.GlobalEnv)
}
spsspkg.EndProcedure()
}
makedataset = function(allargs, res, casenumbers, warns) {
# create dataset containing predicted probabilities and hat values
# casenumbers is used to account for missing data
dict = list()
dict[[1]] = c("Case", "", 0, "F8.0", "nominal")
dict[[2]] = c("Prob", gtxt("Predicted probability"), 0, "F8.3", "scale")
if (!is.null(res$hat.diag)) {
dict[[3]] = c("Hat", gtxt("Hat Value"), 0, "F8.3", "scale")
}
dict = spssdictionary.CreateSPSSDictionary(dict)
spssdictionary.SetDictionaryToSPSS(allargs[["dataset"]], dict)
if (!is.null(res$hat.diag)) {
spssdata.SetDataToSPSS(allargs[["dataset"]], data.frame(Case=casenumbers,
prob=res$predict, hat=res$hat.diag))
} else {
spssdata.SetDataToSPSS(allargs[["dataset"]], data.frame(Case=casenumbers,
prob=res$predict))
}
spssdictionary.EndDataStep()
}
Warn = function(procname, omsid) {
# constructor (sort of) for message management
lcl = list(
procname=procname,
omsid=omsid,
msglist = list(), # accumulate messages
msgnum = 0
)
# This line is the key to this approach
lcl = list2env(lcl) # makes this list into an environment
lcl$warn = function(msg=NULL, dostop=FALSE, inproc=FALSE) {
# Accumulate messages and, if dostop or no message, display all
# messages and end procedure state
# If dostop, issue a stop.
if (!is.null(msg)) { # accumulate message
assign("msgnum", lcl$msgnum + 1, envir=lcl)
# There seems to be no way to update an object, only replace it
m = lcl$msglist
m[[lcl$msgnum]] = msg
assign("msglist", m, envir=lcl)
}
if (is.null(msg) || dostop) {
lcl$display(inproc) # display messages and end procedure state
if (dostop) {
stop(gtxt("End of procedure"), call.=FALSE) # may result in dangling error text
}
}
}
lcl$display = function(inproc=FALSE) {
# display any accumulated messages as a warnings table or as prints
# and end procedure state, if any
if (lcl$msgnum == 0) { # nothing to display
if (inproc) {
spss.EndProcedure()
}
} else {
if (!inproc) {
procok =tryCatch({
StartProcedure(lcl$procname, lcl$omsid)
TRUE
},
error = function(e) {
FALSE
}
)
}
if (procok) { # build and display a Warnings table if we can
table = spss.BasePivotTable("Warnings ","Warnings") # do not translate this
rowdim = BasePivotTable.Append(table,Dimension.Place.row,
gtxt("Message Number"), hideName = FALSE,hideLabels = FALSE)
for (i in 1:lcl$msgnum) {
rowcategory = spss.CellText.String(as.character(i))
BasePivotTable.SetCategories(table,rowdim,rowcategory)
BasePivotTable.SetCellValue(table,rowcategory,
spss.CellText.String(lcl$msglist[[i]]))
}
spsspkg.EndProcedure() # implies display
} else { # can't produce a table
for (i in 1:lcl$msgnum) {
print(lcl$msglist[[i]])
}
}
}
}
return(lcl)
}
# localization initialization
setuplocalization = function(domain) {
# find and bind translation file names
# domain is the root name of the extension command .R file, e.g., "SPSSINC_BREUSCH_PAGAN"
# This would be bound to root location/SPSSINC_BREUSCH_PAGAN/lang
fpath = Find(file.exists, file.path(.libPaths(), paste(domain, ".R", sep="")))
bindtextdomain(domain, file.path(dirname(fpath), domain, "lang"))
}
# override for api to account for extra parameter in V19 and beyond
StartProcedure <- function(procname, omsid) {
if (substr(spsspkg.GetSPSSVersion(),1, 2) >= 19) {
spsspkg.StartProcedure(procname, omsid)
}
else {
spsspkg.StartProcedure(omsid)
}
}
gtxt <- function(...) {
return(gettext(...,domain="STATS_FIRTHLOG"))
}
gtxtf <- function(...) {
return(gettextf(...,domain="STATS_FIRTHLOG"))
}
Run = function(args) {
#Execute the STATS FIRTHLOG command
cmdname = args[[1]]
args = args[[2]]
oobj = spsspkg.Syntax(list(
spsspkg.Template("DEPENDENT", subc="", ktype="existingvarlist", var="dep"),
spsspkg.Template("INDEP", subc="", ktype="existingvarlist", var="indep",
islist=TRUE),
spsspkg.Template("REFCATFACTORS", subc="", ktype="existingvarlist",
var="refcatfactors", islist=TRUE),
spsspkg.Template("REFCATS", subc="", ktype="literal", var="refcats", islist=TRUE),
spsspkg.Template("FIRTH", subc="OPTIONS", ktype="bool", var="firth"),
spsspkg.Template("PPL", subc="OPTIONS", ktype="bool", var="ppl"),
spsspkg.Template("PLOT", subc="OPTIONS", ktype="bool", var="doplot"),
spsspkg.Template("CONF", subc="OPTIONS", ktype="float", var="conf",
vallist=list(50,99.9999)),
spsspkg.Template("MAXSTEP", subc="OPTIONS", ktype="float", var="maxstep"),
spsspkg.Template("MAXHS", subc="OPTIONS", ktype="int", var="maxhs"),
spsspkg.Template("MAXIT", subc="OPTIONS", ktype="int", var="maxit"),
spsspkg.Template("LCONV", subc="OPTIONS", ktype="float", var="lconv"),
spsspkg.Template("GCONV", subc="OPTIONS", ktype="float", var="gconv"),
spsspkg.Template("XCONV", subc="OPTIONS", ktype="float", var="xconv"),
spsspkg.Template("PLOT", subc="OUTPUT", ktype="int", var="plotlist", islist=TRUE,
vallist=list(0)),
spsspkg.Template("DATASET", sub="SAVE", ktype="varname", var="dataset")
))
# A HELP subcommand overrides all else
if ("HELP" %in% attr(args,"names")) {
#writeLines(helptext)
helper(cmdname)
}
else {
res <- spsspkg.processcmd(oobj, args, "dofirth")
}
}
helper = function(cmdname) {
# find the html help file and display in the default browser
# cmdname may have blanks that need to be converted to _ to match the file
fn = gsub(" ", "_", cmdname, fixed=TRUE)
thefile = Find(file.exists, file.path(.libPaths(), fn, "markdown.html"))
if (is.null(thefile)) {
print("Help file not found")
} else {
browseURL(paste("file://", thefile, sep=""))
}
}
if (exists("spsspkg.helper")) {
assign("helper", spsspkg.helper)
}
|
6e53c3e2194d12be2131d37856dee9abd6ad6854
|
fa797dec219eb45e9fa7725b2cc3b381e8e695d2
|
/figs/plot_hist_both.R
|
96894487a30c70f7c3b17bf4df89b714f8bce10a
|
[] |
no_license
|
dd-harp/mini-mash
|
018a34279625774c67703d94471e29356bb79950
|
2c9da18c0c601da81e35314c513735d46f0d54ac
|
refs/heads/main
| 2023-06-01T11:49:22.001301
| 2023-05-19T18:54:03
| 2023-05-19T18:54:03
| 309,202,081
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,748
|
r
|
plot_hist_both.R
|
# --------------------------------------------------------------------------------
# load libraries and setup
# --------------------------------------------------------------------------------
rm(list=ls());gc()
dev.off()
library(here)
library(data.table)
library(ggplot2)
pop_mc <- data.table::fread(input = here::here("figs/pop_mc.csv"))
pop_mc_means <- data.table::fread(input = here::here("figs/pop_mc_means.csv"))
abm_mc <- data.table::fread(input = here::here("figs/abm_mc.csv"))
abm_mc_means <- data.table::fread(input = here::here("figs/abm_mc_means.csv"))
pop_mc[,"type" := "pop"]
abm_mc[,"type" := "abm"]
mc_dt <- rbind(pop_mc,abm_mc)
abm_mc_means <- melt(abm_mc_means,id.vars = "variable",variable.name = "type")
abm_mc_means[type == "mean", type := "abm"]
pop_mc_means <- melt(pop_mc_means,id.vars = "variable",variable.name = "type")
pop_mc_means[type == "mean", type := "pop"]
mc_means <- merge(abm_mc_means,pop_mc_means,all= TRUE)
setorder(mc_means,"type")
mc_means_dt <- rbind(abm_mc_means,pop_mc_means)
ggplot(data = mc_dt) +
geom_histogram(aes(value,after_stat(ndensity),fill=variable,linetype=type),position = "identity", color = "black", size = 0.15,alpha=0.6) +
# geom_vline(data = mc_means,aes(xintercept=value,color=variable,linetype=type)) +
guides(fill = FALSE, color = FALSE) +
facet_wrap(. ~ variable,scales = "free") +
theme_bw() +
theme(axis.title = element_blank())
ggplot(data = merge(mc_dt,mc_means_dt)) +
geom_violin(aes(x=type,y=value,fill=variable),alpha=0.8) +
# geom_hline(data = mc_means,aes(yintercept=value,color=variable,linetype=type)) +
guides(fill = FALSE, color = FALSE) +
facet_wrap(. ~ variable,scales = "free") +
theme_bw() +
theme(axis.title = element_blank())
|
e9398993d542f34daac43b72a09755e6c4fc77f9
|
ead929cf3aefc7867206e9710cf3ec406545bb63
|
/R/veg_data.R
|
81602304021397754acef23ff4fc275685fa9d6e
|
[] |
no_license
|
lydiaPenglish/STRIPS2veg
|
cadee6c843d09dfec156defe820937fb68777181
|
3482b932fbd50e76bbe6131b2a9b3cf2a9f3a070
|
refs/heads/master
| 2022-11-06T07:47:17.287401
| 2020-06-08T17:53:08
| 2020-06-08T17:53:08
| 132,038,278
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,222
|
r
|
veg_data.R
|
#' Useful information about plants identified
#'
#' A dataset containing life history and taxonomic information about plants identified in vegetation surveys.
#'
#'
#' @format A data frame with 268 rows and 10 columns:
#' \describe{
#' \item{full_name}{Scientific name - the genus and species}
#' \item{common_name}{Common name of a species}
#' \item{family}{Taxanomic family to which the species belongs}
#' \item{native}{"Native" or "introduced" indicating origin of species}
#' \item{life_cycle}{"Annual" "biennial" or "perennial" indicating the longevity of a species}
#' \item{group}{Life-history group: prairie/weedy forb, C3 grass, C4 grass, sedge , rush, fern, woody}
#' \item{code}{5 letter code unique to each species. First three letters of genus and first two letters of species}
#' \item{alternate name}{Secondary common name}
#' \item{seeds_per_oz}{Number of seeds per ounce, gathered from \href{https://www.prairiemoon.com/}{Prairie Moon Nursery}}
#' \item{group_simple}{Simplified version of life history group. Consolidates C3, C4 grasses and sedges into just grasses
#' and creates "other" group for rushes, ferns, and wetland forbs}
#' }
#'
"species_list"
#' Vegetation data for every quadrat
#'
#' A dataset containing the visual percent cover estimates for all species identified in every quadrat of every strip.
#'
#'
#' @format A data frame with 10416 rows and 7 columns:
#' \describe{
#' \item{year}{The year the data was collected}
#' \item{quadratID}{A unique code each quadrat. Reads like "siteID_stripID_quadratNumber"}
#' \item{siteID}{A unique 3 letter code for the site}
#' \item{speciesID}{A unique 5 letter code for each species - consists of first 3 letters of genus, followed by
#' first two letters of species. Ex: \emph{Ratibida pinnata} = ratpi}
#' \item{cover}{Visual percent cover of vegetation. Has 7 distinct catagories. }
#' \item{notes}{Any notes taken during data collection}
#' \item{flowering}{Whether or not a species was flowering - This was data we thought we'd collect throughout sampling
#' for another project, but we really didn't end up consistently recording it. Therefore it can largely be ignored.}
#' }
#'
"vegetation"
|
9aab5f9bd05319e049ad454dadae0aa592285e43
|
b7bf62deb228ee72d8ff96205a230575cbdfb51c
|
/R/predicting.int.R
|
2c6822d0d6ccdaf99f51dfe10103d88d329527c8
|
[] |
no_license
|
BenitoJaillard/combinAna
|
f48c855ea4b8b2dd22eb58652fa6b87b49c56a4a
|
114df1efc319db55c72f3b9bb23095a4345dabbb
|
refs/heads/master
| 2020-04-24T03:05:11.212153
| 2019-03-26T09:57:19
| 2019-03-26T09:57:19
| 171,659,371
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 89,208
|
r
|
predicting.int.R
|
#xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
#
# Set of internal functions ####
#
#xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
figures <- c(21, 22, 24, 23, 25)
# 21 = "circle", 22 = "square", 23 = "diamond",
# 24 = triangle point-up", 25 = "triangle point-down"
couleurs <- c("red3", "blue2", "orange2", "turquoise3", "magenta", "green4",
"pink", "violet", "salmon4", "skyblue2", "sienna3", "olivedrab3")
myLetters <- c(letters, LETTERS)
#' @include stats.int.R
NULL
#oooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooo
#
# Arithmetic mean by Motif ####
#
#oooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooo
#xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
#'
#' @title Arithmetic mean (amean) by assembly motif (bymot)
#'
#' @description Take the numeric vector \code{fct} and return a vector of same
#' length, of which values are computed as the arithmetic mean of all vector
#' elements belonging to a same motif. The motif of each vector element is
#' specified in the vector \code{assMotifs}.
#'
#' @usage predict_amean_bymot(fct, assMotifs)
#'
#' @param fct a vector of numeric values (assembly properties).
#'
#' @param assMotifs a vector of labels of same length as \code{fct} (assembly
#' motifs).
#'
#' @return Return a vector of same length as \code{fct}, of which values are
#' computed as the arithmetic mean of all vector elements belonging to a same
#' motif.
#'
#' @details Prediction is computed using arithmetic mean \code{amean} by motif
#' \code{bymot} in a whole (WITHOUT taking into account species contribution)
#' by including all elements belonging to a same motif, even the one to
#' predict.
#'
#' @keywords internal
#'
#xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
predict_amean_bymot <- function(fct, assMotifs) {
fctPrd <- numeric(length(assMotifs))
setMot <- unique(assMotifs)
for (mot in seq_along(setMot)) {
indMot <- which(assMotifs == setMot[mot])
fctPrd[indMot] <- amean(fct[indMot])
}
return(fctPrd)
}
#xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
#'
#' @title Arithmetic mean (amean) by motif (bymot) over several experiments
#' (xpr)
#'
#' @description Take the numeric vector \code{fct} and return a vector of same
#' length, of which values are computed as the arithmetic mean of all vector
#' elements belonging to a same motif, over several experiments. The motif of
#' each vector element is specified in the vector \code{assMotifs}. The
#' experiment of each vector element is specified in the vector \code{xpr}.
#'
#' @usage predict_amean_bymot_xpr(fct, assMotifs, xpr)
#'
#' @param fct a vector of numeric values (assembly properties).
#'
#' @param assMotifs a vector of labels of same length as \code{fct}
#' (assembly motifs).
#'
#' @param xpr a vector of labels of same length as \code{fct}
#' (assembly experiments).
#'
#' @return Return a vector of same length as \code{fct}, of which values
#' are computed as the arithmetic mean of all vector elements belonging to
#' the same motif over several experiments.
#'
#' @details Prediction is computed using arithmetic mean \code{amean} by
#' motif \code{bymot} in a whole (WITHOUT taking into account species
#' contribution) by including all elements belonging to a same motif,
#' even the one to predict.
#'
#' @keywords internal
#'
#xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
predict_amean_bymot_xpr <- function(fct, assMotifs, xpr) {
fctPrd <- numeric(length(assMotifs))
setXpr <- unique(xpr)
for (ix in seq_along(setXpr)) {
indXpr <- which(xpr == setXpr[ix])
fctPrd[indXpr] <- predict_amean_bymot(fct[indXpr], assMotifs[indXpr])
}
return(fctPrd)
}
#xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
#'
#' @title Arithmetic mean (amean) by motif (bymot) by leave-one-out (LOO)
#'
#' @description Take the numeric vector \code{fctMot} and return a vector of
#' same length, of which values are computed as the arithmetic mean of all
#' vector elements.
#'
#' @usage amean_bymot_LOO(fctMot)
#'
#' @param fctMot a vector of numeric values of elements belonging to a same
#' motif.
#'
#' @return Return a vector of same length as \code{fctMot}, of which values
#' are computed as the arithmetic mean of all vector elements, excepted the
#' value of element to predict that have been left out.
#'
#' @details Prediction computed using arithmetic mean \code{amean} by motif
#' \code{bymot} in a whole (WITHOUT taking into account species
#' contribution) by excluding the element to predict.
#'
#' @keywords internal
#'
#xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
amean_bymot_LOO <- function(fctMot) {
nbass <- length(fctMot)
if (nbass > 1) {
fctPrd <- numeric(nbass)
for (ind in seq_len(nbass)) fctPrd[ind] <- amean(fctMot[-ind])
} else {
fctPrd <- NA
}
return(fctPrd)
}
#xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
#'
#' @title Arithmetic mean (amean) by motif (bymot) by leave-one-out (LOO)
#'
#' @description Take a numeric vector and return the predicted vector
#' computed as the arithmetic mean of all elements belonging to a same motif.
#'
#' @usage predict_amean_bymot_LOO(fct, assMotifs)
#'
#' @param fct a vector of numeric values (assembly properties).
#'
#' @param assMotifs a vector of labels of same length as \code{fct}
#' (assembly motifs).
#'
#' @return Return a vector of same length as \code{fctMot}, of which
#' values are computed as the arithmetic mean of all vector elements,
#' excepted the value of element to predict that have been left out.
#'
#' @details Prediction computed using arithmetic mean \code{amean} by
#' motif \code{bymot} in a whole (WITHOUT taking into account species
#' contribution) by excluding the element to predict.
#'
#' @keywords internal
#'
#xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
predict_amean_bymot_LOO <- function(fct, assMotifs) {
fctPrd <- numeric(length(assMotifs))
setMot <- unique(assMotifs)
for (mot in seq_along(setMot)) {
indMot <- which(assMotifs == setMot[mot])
fctPrd[indMot] <- amean_bymot_LOO(fct[indMot])
}
return(fctPrd)
}
#xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
#'
#' @title Arithmetic mean (amean) by motif (bymot) by leave-one-out (LOO)
#' over several experiments (xpr)
#'
#' @description Take a numeric vector and return the predicted vector
#' computed as the arithmetic mean of all elements belonging to a same motif.
#'
#' @usage predict_amean_bymot_LOO_xpr(fct, assMotifs, xpr)
#'
#' @param fct a vector of numeric values (assembly properties).
#'
#' @param assMotifs a vector of labels of same length as \code{fct}
#' (assembly motifs).
#'
#' @param xpr a vector of labels of same length as \code{fct}
#' (assembly experiments).
#'
#' @return Return a vector of same length as \code{fctMot}, of which
#' values are computed as the arithmetic mean of all vector elements,
#' over several experiments, excepted the value of element to predict
#' that have been left out.
#'
#' @details Prediction computed using arithmetic mean \code{amean} by
#' motif \code{bymot} in a whole (WITHOUT taking into account species
#' contribution) by excluding the element to predict.
#'
#' @keywords internal
#'
#xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
predict_amean_bymot_LOO_xpr <- function(fct, assMotifs, xpr) {
fctPrd <- numeric(length(assMotifs))
setXpr <- unique(xpr)
for (ix in seq_along(setXpr)) {
indXpr <- which(xpr == setXpr[ix])
fctPrd[indXpr] <- predict_amean_bymot_LOO(fct[indXpr], assMotifs[indXpr])
}
return(fctPrd)
}
#xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
#'
#' @title Arithmetic mean (amean) by motif (bymot) by jackknife (jack)
#' over several experiments (xpr)
#'
#' @description Take a numeric vector and return the predicted vector
#' computed as the arithmetic mean of all elements belonging to the same motif.
#'
#' @usage amean_bymot_jack(fctMot, jack)
#'
#' @param fctMot a vector of numeric values of elements belonging to a
#' same motif.
#'
#' @param jack a vector of two elements. The first one \code{jack[1]}
#' specifies the size of subset, the second one \code{jack[2]} specifies
#' the number of subsets.
#'
#' @return Return a vector of same length as \code{fctMot},
#' of which values are computed as the arithmetic mean of all vector elements.
#'
#' @details Prediction is computed using arithmetic mean \code{amean}
#' by motif \code{bymot} in a whole (WITHOUT taking into account species
#' contribution). The elements belonging to a same motif are divided
#' into \code{jack[2]} subsets of \code{jack[1]} elements. Prediction
#' is computed by excluding \code{jack[1]} elements, of which the element
#' to predict. If the total number of elements belonging to the motif
#' is lower than \code{jack[1]*jack[2]}, prediction is computed by
#' Leave-One-Out (LOO).
#'
#' @keywords internal
#'
#xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
amean_bymot_jack <- function(fctMot, jack) {
nbass <- length(fctMot)
if (nbass > jack[1] * jack[2]) {
fctPrd <- numeric(nbass)
index <- sample.int(nbass)
size <- floor(nbass / jack[2])
for (ind in seq_len(jack[2] - 1)) {
indjack <- index[(ind - 1) * size + (1:size)]
fctPrd[indjack] <- amean(fctMot[-indjack])
}
indjack <- index[(ind * size + 1):nbass]
fctPrd[indjack] <- amean(fctMot[-indjack])
} else {
fctPrd <- amean_bymot_LOO(fctMot)
}
return(fctPrd)
}
#xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
#'
#' @title Arithmetic mean (amean) by motif (bymot) by jackknife (jack)
#'
#' @description Take a numeric vector and return the predicted vector
#' computed as the arithmetic mean of all elements belonging to the same motif.
#'
#' @usage predict_amean_bymot_jack(fct, assMotifs, jack)
#'
#' @param fct a vector of numeric values (assembly properties).
#'
#' @param assMotifs a vector of labels of same length as \code{fct}
#' (assembly motifs).
#'
#' @param jack a vector of two elements. The first one\code{jack[1]}
#' specifies the size of subset, the second one \code{jack[2]}
#' specifies the number of subsets.
#'
#' @return Return a vector of same length as \code{fct},
#' of which values are computed as the arithmetic mean of all vector elements.
#'
#' @details Prediction is computed using arithmetic mean \code{amean}
#' by motif \code{bymot} in a whole (WITHOUT taking into account
#' species contribution). The elements belonging to a same motif are
#' divided into \code{jack[2]} subsets of \code{jack[1]} elements.
#' Prediction is computed by excluding \code{jack[1]} elements,
#' of which the element to predict. If the total number of elements
#' belonging to the motif is lower than \code{jack[1]*jack[2]},
#' prediction is computed by Leave-One-Out (LOO).
#'
#' @keywords internal
#'
#xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
predict_amean_bymot_jack <- function(fct, assMotifs, jack) {
fctPrd <- numeric(length(assMotifs))
setMot <- unique(assMotifs)
for (mot in seq_along(setMot)) {
indMot <- which(assMotifs == setMot[mot])
fctPrd[indMot] <- amean_bymot_jack(fct[indMot], jack)
}
return(fctPrd)
}
#xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
#'
#' @title Arithmetic mean (amean) by motif (bymot) by jackknife (jack)
#' over several experiments (xpr)
#'
#' @description Take a numeric vector and return the predicted vector
#' computed as the arithmetic mean of all elements belonging to a same motif.
#'
#' @usage predict_amean_bymot_jack_xpr(fct, assMotifs, jack, xpr)
#'
#' @param fct a vector of numeric values (assembly properties).
#'
#' @param assMotifs a vector of labels of same length as \code{fct}
#' (assembly motifs).
#'
#' @param jack a vector of two elements. The first one \code{jack[1]}
#' specifies the size of subset, the second one \code{jack[2]} specifies
#' the number of subsets.
#'
#' @param xpr a vector of labels of same length as \code{fct}
#' (assembly experiments).
#'
#' @return Return the arithmetic mean of a vector, as standard \code{mean}
#' function.
#'
#' @details Prediction is computed using arithmetic mean \code{amean}
#' by motif \code{bymot} in a whole (WITHOUT taking into account species
#' contribution). The elements belonging to a same motif are divided
#' into \code{jack[2]} subsets of \code{jack[1]} elements. Prediction
#' is computed by excluding \code{jack[1]} elements, of which the
#' element to predict. If the total number of elements belonging to
#' the motif is lower than \code{jack[1]*jack[2]}, prediction is
#' computed by Leave-One-Out (LOO).
#'
#' @keywords internal
#'
#xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
predict_amean_bymot_jack_xpr <- function(fct, assMotifs, jack, xpr) {
fctPrd <- numeric(length(assMotifs))
setXpr <- unique(xpr)
for (ix in seq_along(setXpr)) {
indXpr <- which(xpr == setXpr[ix])
fctPrd[indXpr] <- predict_amean_bymot_jack(fct[indXpr],
assMotifs[indXpr], jack)
}
return(fctPrd)
}
#oooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooo
#
# Geometric mean by Motif ####
#
#oooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooo
#xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
#'
#' @title Geometric mean (gmean) by motif (bymot)
#'
#' @description Take the numeric vector \code{fct} and return a vector
#' of same length, of which values are computed as the geometric mean
#' of all vector elements belonging to a same motif. The motif of each
#' vector element is specified in the vector \code{assMotifs}.
#'
#' @usage predict_gmean_bymot(fct, assMotifs)
#'
#' @param fct a vector of numeric values (assembly properties).
#'
#' @param assMotifs a vector of labels of \code{length(fct)}
#' (assembly motifs).
#'
#' @return Return a vector of \code{length(fct)}, of which values are
#' computed as the geometric mean of all vector elements belonging
#' to a same motif.
#'
#' @details Prediction is computed using geometric mean \code{gmean}
#' by motif \code{bymot} in a whole (WITHOUT taking into account
#' species contribution) by including all elements belonging
#' to a same motif, even the one to predict.
#'
#' @keywords internal
#'
#xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
predict_gmean_bymot <- function(fct, assMotifs) {
fctPrd <- numeric(length(assMotifs))
setMot <- unique(assMotifs)
for (mot in seq_along(setMot)) {
indMot <- which(assMotifs == setMot[mot])
fctPrd[indMot] <- gmean(fct[indMot])
}
return(fctPrd)
}
#xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
#'
#' @title Geometric mean (gmean) by motif (bymot) over several experiments (xpr)
#'
#' @description Take the numeric vector \code{fct} and return a vector
#' of same length, of which values are computed as the geometric mean
#' of all vector elements belonging to a same motif, over several experiments.
#' The motif of each vector element is specified in the vector
#' \code{assMotifs}. The experiment of each vector element is
#' specified in the vector \code{xpr}.
#'
#' @usage predict_gmean_bymot_xpr(fct, assMotifs, xpr)
#'
#' @param fct a vector of numeric values (assembly properties).
#'
#' @param assMotifs a vector of labels of same length as \code{fct}
#' (assembly motifs).
#'
#' @param xpr a vector of labels of same length as \code{fct}
#' (assembly experiments).
#'
#' @return Return a vector of same length as \code{fct}, of which values
#' are computed as the geometric mean of all vector elements belonging
#' to the same motif over several experiments.
#'
#' @details Prediction is computed using geometric mean \code{gmean}
#' by motif \code{bymot} in a whole (WITHOUT taking into account species
#' contribution) by including all elements belonging to a same motif,
#' even the one to predict.
#'
#' @keywords internal
#'
#xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
predict_gmean_bymot_xpr <- function(fct, assMotifs, xpr) {
fctPrd <- numeric(length(assMotifs))
setXpr <- unique(xpr)
for (ix in seq_along(setXpr)) {
indXpr <- which(xpr == setXpr[ix])
fctPrd[indXpr] <- predict_gmean_bymot(fct[indXpr], assMotifs[indXpr])
}
return(fctPrd)
}
#xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
#'
#' @title Geometric mean (gmean) by motif (bymot) by leave-one-out (LOO)
#'
#' @description Take the numeric vector \code{fctMot} and return a vector
#' of same length, of which values are computed as the geometric mean
#' of all vector elements.
#'
#' @usage gmean_bymot_LOO(fctMot)
#'
#' @param fctMot a vector of numeric values of elements belonging
#' to a same motif.
#'
#' @return Return a vector of \code{length(fctMot)}, of which values
#' are computed as the geometric mean of all vector elements, excepted
#' the value of element to predict that have been left out.
#'
#' @details Prediction computed using geometric mean \code{gmean}
#' by motif \code{bymot} in a whole (WITHOUT taking into account
#' species contribution) by excluding the element to predict.
#'
#' @keywords internal
#'
#xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
gmean_bymot_LOO <- function(fctMot) {
nbass <- length(fctMot)
if (nbass > 1) {
fctPrd <- numeric(nbass)
for (ind in seq_len(nbass)) fctPrd[ind] <- gmean(fctMot[-ind])
} else {
fctPrd <- NA
}
return(fctPrd)
}
#xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
#'
#' @title Geometric mean (gmean) by motif (bymot) by leave-one-out (LOO)
#'
#' @description Take a numeric vector and return the predicted vector
#' computed as the geometric mean of all elements belonging to a same motif.
#'
#' @usage predict_gmean_bymot_LOO(fct, assMotifs)
#'
#' @param fct a vector of numeric values (assembly properties).
#'
#' @param assMotifs a vector of labels of \code{length(fct)} (assembly motifs).
#'
#' @return Return a vector of \code{length(fctMot)}, of which values
#' are computed as the geometric mean of all vector elements, excepted
#' the value of element to predict that have been left out.
#'
#' @details Prediction computed using geometric mean \code{gmean}
#' by motif \code{bymot} in a whole (WITHOUT taking into account
#' species contribution) by excluding the element to predict.
#'
#' @keywords internal
#'
#xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
predict_gmean_bymot_LOO <- function(fct, assMotifs) {
fctPrd <- numeric(length(assMotifs))
setMot <- unique(assMotifs)
for (mot in seq_along(setMot)) {
indMot <- which(assMotifs == setMot[mot])
fctPrd[indMot] <- gmean_bymot_LOO(fct[indMot])
}
return(fctPrd)
}
#xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
#'
#' @title Geometric mean (gmean) by motif (bymot) by leave-one-out (LOO)
#' over several experiments (xpr)
#'
#' @description Take a numeric vector and return the predicted vector
#' computed as the gGeometric mean of all elements belonging to a same motif.
#'
#' @usage predict_gmean_bymot_LOO_xpr(fct, assMotifs, xpr)
#'
#' @param fct a vector of numeric values (assembly properties).
#'
#' @param assMotifs a vector of labels of same length as \code{fct}
#' (assembly motifs).
#'
#' @param xpr a vector of labels of same length as \code{fct}
#' (assembly experiments).
#'
#' @return Return a vector of same length as \code{fct},
#' of which values are computed as the geometric mean of all
#' vector elements, over several experiments, excepted the value
#' of element to predict that have been left out.
#'
#' @details Prediction computed using geometric mean \code{gmean}
#' by motif \code{bymot} in a whole (WITHOUT taking into
#' account species contribution) by excluding the element to predict.
#'
#' @keywords internal
#'
#xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
predict_gmean_bymot_LOO_xpr <- function(fct, assMotifs, xpr) {
fctPrd <- numeric(length(assMotifs))
setXpr <- unique(xpr)
for (ix in seq_along(setXpr)) {
indXpr <- which(xpr == setXpr[ix])
fctPrd[indXpr] <- predict_gmean_bymot_LOO(fct[indXpr], assMotifs[indXpr])
}
return(fctPrd)
}
#xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
#'
#' @title Geometric mean (gmean) by motif (bymot) by jackknife (jack)
#' over several experiments (xpr)
#'
#' @description Take a numeric vector and return the predicted vector
#' computed as the geometric mean of all elements belonging to the same motif.
#'
#' @usage gmean_bymot_jack(fctMot, jack)
#'
#' @param fctMot a vector of numeric values of elements belonging
#' to a same motif.
#'
#' @param jack a vector of two elements. The first one \code{jack[1]}
#' specifies the size of subset, the second one \code{jack[2]} specifies
#' the number of subsets.
#'
#' @return Return a vector of same length as \code{fctMot}, of which
#' values are computed as the geometric mean of all vector elements.
#'
#' @details Prediction is computed using geometric mean \code{gmean}
#' by motif \code{bymot} in a whole (WITHOUT taking into account species
#' contribution). The elements belonging to a same motif are divided
#' into \code{jack[2]} subsets of \code{jack[1]} elements. Prediction
#' is computed by excluding \code{jack[1]} elements, of which
#' the element to predict. If the total number of elements
#' belonging to the motif is lower than \code{jack[1]*jack[2]},
#' prediction is computed by Leave-One-Out (LOO).
#'
#' @keywords internal
#'
#xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
gmean_bymot_jack <- function(fctMot, jack) {
nbass <- length(fctMot)
if (nbass > jack[1] * jack[2]) {
fctPrd <- numeric(nbass)
index <- sample.int(nbass)
size <- floor(nbass / jack[2])
for (ind in seq_len(jack[2] - 1)) {
indjack <- index[(ind - 1) * size + (1:size)]
fctPrd[indjack] <- gmean(fctMot[-indjack])
}
indjack <- index[(ind * size + 1):nbass]
fctPrd[indjack] <- gmean(fctMot[-indjack])
} else {
fctPrd <- gmean_bymot_LOO(fctMot)
}
return(fctPrd)
}
#xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
#'
#' @title Geometric mean (gmean) by motif (bymot) by jackknife (jack)
#'
#' @description Take a numeric vector and return the predicted vector
#' computed as the geometric mean of all elements belonging to the same motif.
#'
#' @usage predict_gmean_bymot_jack(fct, assMotifs, jack)
#'
#' @param fct a vector of numeric values (assembly properties).
#'
#' @param assMotifs a vector of labels of same length as \code{fct}
#' (assembly motifs).
#'
#' @param jack a vector of two elements. The first one\code{jack[1]}
#' specifies the size of subset, the second one \code{jack[2]} specifies
#' the number of subsets.
#'
#' @return Return a vector of same length as \code{fct}, of which
#' values are computed as the geometric mean of all vector elements.
#'
#' @details Prediction is computed using geometric mean \code{gmean}
#' by motif \code{bymot} in a whole (WITHOUT taking into account species
#' contribution). The elements belonging to a same motif are divided
#' into \code{jack[2]} subsets of \code{jack[1]} elements. Prediction
#' is computed by excluding \code{jack[1]} elements, of which the
#' element to predict. If the total number of elements belonging
#' to the motif is lower than \code{jack[1]*jack[2]}, prediction
#' is computed by Leave-One-Out (LOO).
#'
#' @keywords internal
#'
#xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
predict_gmean_bymot_jack <- function(fct, assMotifs, jack) {
fctPrd <- numeric(length(assMotifs))
setMot <- unique(assMotifs)
for (mot in seq_along(setMot)) {
indMot <- which(assMotifs == setMot[mot])
fctPrd[indMot] <- gmean_bymot_jack(fct[indMot], jack)
}
return(fctPrd)
}
#xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
#'
#' @title Geometric mean (gmean) by motif (bymot) by jackknife (jack)
#' over several experiments (xpr)
#'
#' @description Take a numeric vector and return the predicted vector
#' computed as the geometric mean of all elements belonging to a same motif.
#'
#' @usage predict_amean_bymot_jack_xpr(fct, assMotifs, jack, xpr)
#'
#' @param fct a vector of numeric values (assembly properties).
#'
#' @param assMotifs a vector of labels of same length as \code{fct}
#' (assembly motifs).
#'
#' @param jack a vector of two elements. The first one \code{jack[1]}
#' specifies the size of subset, the second one \code{jack[2]} specifies
#' the number of subsets.
#'
#' @param xpr a vector of labels of \code{length(fct)} (assembly experiments).
#'
#' @return Return a vector of \code{length(fct)}, of which values
#' are computed as the geometric mean of all vector elements, over
#' several experiments, excepted the value of element to predict
#' that have been left out.
#'
#' @details Prediction is computed using geometric mean \code{gmean}
#' by motif \code{bymot} in a whole (WITHOUT taking into account species
#' contribution). The elements belonging to a same motif are divided
#' into \code{jack[2]} subsets of \code{jack[1]} elements. Prediction
#' is computed by excluding \code{jack[1]} elements, of which the
#' element to predict. If the total number of elements belonging to
#' the motif is lower than \code{jack[1]*jack[2]}, prediction is
#' computed by Leave-One-Out (LOO).
#'
#' @keywords internal
#'
#xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
predict_gmean_bymot_jack_xpr <- function(fct, assMotifs, jack, xpr) {
fctPrd <- numeric(length(assMotifs))
setXpr <- unique(xpr)
for (ix in seq_along(setXpr)) {
indXpr <- which(xpr == setXpr[ix])
fctPrd[indXpr] <- predict_gmean_bymot_jack(assMotifs[indXpr],
fct[indXpr], jack)
}
return(fctPrd)
}
#oooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooo
#
# Arithmetic mean by Element within each Motif ####
#
#oooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooo
#xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
#'
#' @title Arithmetic mean (amean) by elements occurring within the
#' assemblage and other assemblages, all assemblages belonging to
#' a same motif (byelt)
#'
#' @description The numeric vector \code{fctMot} get together the
#' properties of assemblages belonging to a same assembly motif. The
#' properties \code{fctMot} of assemblages containing a given
#' element are separately averaged. The property of each assemblage
#' is computed as the average of mean values of assemblages containing
#' the same elements as the considered assemblage. The elemental
#' composition of each assemblage is specified in the binary matrix
#' \code{mOccurMot}: \code{0} if the element does not occur,
#' \code{1} if the element occurs.
#'
#' @usage amean_byelt(fctMot, mOccurMot)
#'
#' @param fctMot a vector of numeric values (assembly properties).
#'
#' @param mOccurMot a matrix of occurrence (occurrence of elements).
#' Its first dimension equals to \code{length(fctMot)}. Its second
#' dimension equals to the number of elements.
#'
#' @return Return a vector of \code{length(fctMot)}, of which values
#' are computed as an arithmetic mean.
#'
#' @details Prediction is computed using arithmetic mean \code{amean}
#' by element \code{byelt} occurring within the assemblage and other
#' assemblages of a same motif, by including all assemblages belonging
#' to a same motif, even the one to predict.
#'
#' @keywords internal
#'
#xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
amean_byelt <- function(fctMot, mOccurMot) {
fctPrd <- numeric(length(fctMot))
setElt <- unique(which((mOccurMot[ , , drop = FALSE] == 1),
arr.ind = TRUE)[ , 2])
for (elt in seq_along(setElt)) {
indElt <- which(mOccurMot[ , setElt[elt]] == 1)
fctPrd[indElt] <- fctPrd[indElt] + amean(fctMot[indElt])
}
fctPrd <- fctPrd / apply(mOccurMot, MARGIN = 1, FUN = sum)
return(fctPrd)
}
#xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
#'
#' @title Arithmetic mean (amean) by elements occurring within
#' the assemblage and other assemblages, all assemblages belonging
#' to a same motif (byelt)
#'
#' @description The numeric vector \code{fct} get together the
#' properties of assemblages belonging to different assembly motifs.
#' The properties \code{fct} of assemblages belonging to a given
#' assembly motif and containing a given element are separately averaged.
#' The property of each assemblage is computed as the average of mean
#' values of assemblages containing the same elements as the considered
#' assemblage. The motif of each vector element is specified in
#' the vector \code{assMotifs}. The elemental composition of
#' each assemblage is specified in the binary matrix \code{mOccur}:
#' \code{0} if the element does not occur, \code{1} if the element occurs.
#'
#' @name predict_amean_byelt
#'
#' @usage predict_amean_byelt(fct, assMotifs, mOccur)
#'
#' @param fct a vector of numeric values (assembly properties).
#'
#' @param assMotifs a vector of labels of same length as \code{fct}
#' (assembly motifs).
#'
#' @param mOccur a matrix of occurrence (occurrence of elements).
#' Its first dimension equals to length of \code{fct}. Its second
#' dimension equals to the number of elements.
#'
#' @return Return a vector of same length as \code{fct}, of which
#' values are computed as the arithmetic mean of all vector elements
#' belonging to a same motif.
#'
#' @details Prediction is computed using arithmetic mean \code{amean}
#' by element \code{byelt} occurring within the assemblage and other
#' assemblages of a same motif, by including all assemblages belonging
#' to a same motif, even the one to predict.
#'
# @keywords internal
#' @export
#'
#xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
predict_amean_byelt <- function(fct, assMotifs, mOccur) {
fctPrd <- numeric(length(assMotifs))
setMot <- unique(assMotifs)
for (mot in seq_along(setMot)) {
indMot <- which(assMotifs == setMot[mot])
if (length(indMot) > 1) {
fctPrd[indMot] <- amean_byelt(fct[indMot], mOccur[indMot, ])
} else {
fctPrd[indMot] <- fct[indMot]
}
}
return(fctPrd)
}
#xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
#'
#' @title Arithmetic mean (amean) by elements occurring within the
#' assemblage and other assemblages, all assemblages belonging
#' to a same motif (byelt), over several experiments (xpr)
#'
#' @description The numeric vector \code{fct} get together the properties
#' of assemblages belonging to different assembly motifs. The properties
#' \code{fct} of assemblages belonging to a given assembly motif and
#' containing a given element are separately averaged. The property
#' of each assemblage is computed as the average of mean values
#' of assemblages containing the same elements as the considered assemblage.
#' The motif of each vector element is specified in the vector
#' \code{assMotifs}. The elemental composition of each assemblage
#' is specified in the binary matrix \code{mOccur}: \code{0}
#' if the element does not occur, \code{1} if the element occurs.
#'
#' @usage predict_amean_byelt_xpr(fct, assMotifs, mOccur, xpr)
#'
#' @param fct a vector of numeric values (assembly properties).
#'
#' @param assMotifs a vector of labels of \code{length(fct)}
#' (assembly motifs).
#'
#' @param mOccur a matrix of occurrence (occurrence of elements).
#' Its first dimension equals to \code{length(fct)}. Its second
#' dimension equals to the number of elements.
#'
#' @param xpr a vector of labels of same length as \code{fct}
#' (assembly experiments).
#'
#' @return Return a vector of same length as \code{fct}, of which
#' values are computed as the arithmetic mean of all vector elements
#' belonging to a same motif.
#'
#' @details Prediction is computed using arithmetic mean \code{amean}
#' by element \code{byelt} occurring within the assemblage and other
#' assemblages of a same motif, by including all assemblages belonging
#' to a same motif, even the one to predict.
#'
#' @keywords internal
#'
#xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
predict_amean_byelt_xpr <- function(fct, assMotifs, mOccur, xpr) {
fctPrd <- numeric(length(assMotifs))
setXpr <- unique(xpr)
for (ix in seq_along(setXpr)) {
indXpr <- which(xpr == setXpr[ix])
fctPrd[indXpr] <- predict_amean_byelt(fct[indXpr],
assMotifs[indXpr],
mOccur[indXpr, ])
}
return(fctPrd)
}
#xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
#'
#' @title Arithmetic mean (amean) by elements occurring within
#' the assemblage and other assemblages belonging to a same motif (byelt),
#' except the assemblage to predict (leave-one-out, LOO)
#'
#' @description The numeric vector \code{fctMot} get together the
#' properties of assemblages belonging to a same assembly motif.
#' The properties \code{fctMot} of assemblages containing a given element
#' are separately averaged, except the assemblage to predict. The property
#' of each assemblage is computed as the average of mean values of
#' assemblages containing the same elements as the considered assemblage.
#' The elemental composition of each assemblage is specified in the
#' binary matrix \code{mOccurMot}: \code{0} if the element does not
#' occur, \code{1} if the element occurs.
#'
#' @usage amean_byelt_LOO(fctMot, mOccurMot)
#'
#' @param fctMot a vector of numeric values (assembly properties).
#'
#' @param mOccurMot a matrix of occurrence (occurrence of elements).
#' Its first dimension equals to \code{length(fctMot)}. Its second
#' dimension equals to the number of elements.
#'
#' @return Return a vector of \code{length(fct)}, of which values are
#' computed as an arithmetic mean of all vector elements over several
#' experiments, excepted the value of element to predict that have
#' been left out.
#'
#' @details Prediction is computed using arithmetic mean \code{amean}
#' by element \code{byelt} occurring within the assemblage and other
#' assemblages of a same motif, by including all assemblages belonging
#' to a same motif, except the assemblage to predict.
#'
#' @keywords internal
#'
#xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
amean_byelt_LOO <- function(fctMot, mOccurMot) {
nbass <- length(fctMot)
fctPrd <- numeric(nbass)
vfct <- numeric(dim(mOccurMot)[2])
for (ind in seq_len(nbass)) {
vfct[] <- NA
indOth <- seq_len(nbass)[-ind]
setElt <- which(mOccurMot[ind, ] != 0)
for (elt in seq_along(setElt)) {
indElt <- which(mOccurMot[indOth, setElt[elt]] == 1)
if (length(indElt) > 0)
vfct[setElt[elt]] <- amean(fctMot[indOth[indElt]])
# if (length(indElt) > 0) {
# vfct[setElt[elt]] <- amean(fctMot[indOth[indElt]])
# } else {
# vfct[setElt[elt]] <- amean(fctMot[indOth])
# }
}
fctPrd[ind] <- amean(vfct[setElt])
}
return(fctPrd)
}
#xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
#'
#' @title Arithmetic mean (amean) by elements occurring within the
#' assemblage and other assemblages belonging to a same motif (byelt),
#' except the assemblage to predict (leave-one-out, LOO)
#'
#' @description The numeric vector \code{fct} get together the properties
#' of assemblages belonging to different assembly motifs. The properties
#' \code{fct} of assemblages belonging to a given assembly motif and
#' containing a given element are separately averaged. The property of
#' each assemblage is computed as the average of mean values of assemblages
#' containing the same elements as the considered assemblage, except the
#' assemblage to predict (leave-one-out, LOO). The motif of each vector
#' element is specified in the vector \code{assMotifs}. The elemental
#' composition of each assemblage is specified in the binary matrix
#' \code{mOccur}: \code{0} if the element does not occur, \code{1}
#' if the element occurs.
#'
#' @usage predict_amean_byelt_LOO(fct, assMotifs, mOccur)
#'
#' @param fct a vector of numeric values (assembly properties).
#'
#' @param assMotifs a vector of labels of \code{length(fct)} (assembly motifs).
#'
#' @param mOccur a matrix of occurrence (occurrence of elements).
#' Its first dimension equals to \code{length(fct)}. Its second
#' dimension equals to the number of elements.
#'
#' @return Return a vector of same length as \code{fctMot}, of which
#' values are computed as an arithmetic mean.
#'
#' @details Prediction is computed using arithmetic mean \code{amean}
#' by element \code{byelt} occurring within the assemblage and other
#' assemblages of a same motif, by including all assemblages belonging
#' to a same motif, except the assemblage to predict.
#'
#' @keywords internal
#'
#xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
predict_amean_byelt_LOO <- function(fct, assMotifs, mOccur) {
fctPrd <- numeric(length(assMotifs))
setMot <- unique(assMotifs)
for (mot in seq_along(setMot)) {
indMot <- which(assMotifs == setMot[mot])
if (length(indMot) > 1) {
fctPrd[indMot] <- amean_byelt_LOO(fct[indMot], mOccur[indMot, ])
} else {
fctPrd[indMot] <- NA
}
}
return(fctPrd)
}
#xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
#'
#' @title Arithmetic mean (amean) by elements occurring within
#' the assemblage and other assemblages belonging to a same motif
#' (byelt), except the assemblage to predict (leave-one-out, LOO).
#' Over several experiments.
#'
#' @description The numeric vector \code{fct} get together the properties
#' of assemblages belonging to different assembly motifs. The properties
#' \code{fct} of assemblages belonging to a given assembly motif and
#' containing a given element are separately averaged. The property of
#' each assemblage is computed as the average of mean values of
#' assemblages containing the same elements as the considered assemblage,
#' except the assemblage to predict (leave-one-out, LOO). The motif
#' of each vector element is specified in the vector \code{assMotifs}.
#' The elemental composition of each assemblage is specified in the
#' binary matrix \code{mOccur}: \code{0} if the element does not occur,
#' \code{1} if the element occurs.
#'
#' @usage predict_amean_byelt_LOO_xpr(fct, assMotifs, mOccur, xpr)
#'
#' @param fct a vector of numeric values (assembly properties).
#'
#' @param assMotifs a vector of labels of \code{length(fct)} (assembly motifs).
#'
#' @param mOccur a matrix of occurrence (occurrence of elements).
#' Its first dimension equals to \code{length(fct)}. Its second dimension
#' equals to the number of elements.
#'
#' @param xpr a vector of labels of same length as \code{fct}
#' (assembly experiments).
#'
#' @return Return a vector of same length as \code{fctMot}, of
#' which values are computed as an arithmetic mean.
#'
#' @details Prediction is computed using arithmetic mean \code{amean}
#' by element \code{byelt} occurring within the assemblage and other
#' assemblages of a same motif, by including all assemblages belonging
#' to a same motif, except the assemblage to predict. Over several
#' experiments.
#'
#' @keywords internal
#'
#xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
predict_amean_byelt_LOO_xpr <- function(fct, assMotifs, mOccur, xpr) {
fctPrd <- numeric(length(assMotifs))
setXpr <- unique(xpr)
for (ix in seq_along(setXpr)) {
indXpr <- which(xpr == setXpr[ix])
fctPrd[indXpr] <- predict_amean_byelt_LOO(fct[indXpr],
assMotifs[indXpr],
mOccur[indXpr, ] )
}
return(fctPrd)
}
#xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
#'
#' @title Arithmetic mean (amean) by motif (bymot) by jackknife (jack)
#' over several experiments (xpr)
#'
#' @description Take a numeric vector and return the predicted vector
#' computed as the arithmetic mean of all elements belonging to the same motif.
#'
#' @usage amean_byelt_jack(fctMot, mOccurMot, jack)
#'
#' @param fctMot a vector of numeric values of elements belonging to
#' a same motif.
#'
#' @param mOccurMot a matrix of occurrence (occurrence of elements).
#' Its first dimension equals to \code{length(fctMot)}. Its second
#' dimension equals to the number of elements.
#'
#' @param jack a vector of two elements. The first one \code{jack[1]}
#' specifies the size of subset, the second one \code{jack[2]} specifies
#' the number of subsets.
#'
#' @return Return a vector of \code{length(fctMot)}, of which values are
#' computed as the arithmetic mean of all vector elements.
#'
#' @details Prediction is computed using arithmetic mean \code{amean}
#' by motif \code{bymot} in a whole (WITHOUT taking into account species
#' contribution). The elements belonging to a same motif are divided
#' into \code{jack[2]} subsets of \code{jack[1]} elements. Prediction
#' is computed by excluding \code{jack[1]} elements, of which the element
#' to predict. If the total number of elements belonging to the motif
#' is lower than \code{jack[1]*jack[2]}, prediction is computed by
#' Leave-One-Out (LOO).
#'
#' @keywords internal
#'
#xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
amean_byelt_jack <- function(fctMot, mOccurMot, jack) {
nbass <- length(fctMot)
fctPrd <- numeric(nbass)
if (nbass > jack[1] * jack[2]) {
index <- sample.int(nbass)
size <- floor(nbass / jack[2])
for (ind in seq_len(jack[2] - 1)) {
indjack <- index[(ind - 1) * size + (1:size)]
indOth <- seq_len(nbass)[-indjack]
tmp <- mOccurMot[indOth, ] * fctMot[indOth]
tmp[tmp == 0] <- NA
vfct <- apply(tmp, MARGIN = 2, FUN = amean)
tmp <- t(t(mOccurMot[indjack, ]) * vfct)
tmp[tmp == 0] <- NA
fctPrd[indjack] <- apply(tmp, MARGIN = 1, FUN = amean)
}
indjack <- index[(ind * size + 1):nbass]
indOth <- seq_len(nbass)[-indjack]
tmp <- mOccurMot[indOth, ] * fctMot[indOth]
tmp[tmp == 0] <- NA
vfct <- apply(tmp, MARGIN = 2, FUN = amean)
tmp <- t(t(mOccurMot[indjack, ]) * vfct)
tmp[tmp == 0] <- NA
fctPrd[indjack] <- apply(tmp, MARGIN = 1, FUN = amean)
} else {
fctPrd[ ] <- amean_byelt_LOO(fctMot, mOccurMot)
}
return(fctPrd)
}
#xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
#'
#' @title Arithmetic mean (amean) by motif (bymot) by jackknife (jack)
#'
#' @description Take a numeric vector and return the predicted vector
#' computed as the arithmetic mean of all elements belonging to the same motif.
#'
#' @usage predict_amean_byelt_jack(fct, assMotifs, mOccur, jack)
#'
#' @param fct a vector of numeric values (assembly properties).
#'
#' @param assMotifs a vector of labels of \code{length(fct)} (assembly motifs).
#'
#' @param mOccur a matrix of occurrence (occurrence of elements).
#' Its first dimension equals to \code{length(fct)}. Its second
#' dimension equals to the number of elements.
#'
#' @param jack a vector of two elements. The first one \code{jack[1]}
#' specifies the size of subset, the second one \code{jack[2]} specifies
#' the number of subsets.
#'
#' @return Return a vector of \code{length(fct)}, of which values are
#' computed as the arithmetic mean of all vector elements.
#'
#' @details Prediction is computed using arithmetic mean \code{amean}
#' by motif \code{bymot} in a whole (WITHOUT taking into account species
#' contribution). The elements belonging to a same motif are divided
#' into \code{jack[2]} subsets of \code{jack[1]} elements. Prediction
#' is computed by excluding \code{jack[1]} elements, of which the
#' element to predict. If the total number of elements belonging to
#' the motif is lower than \code{jack[1]*jack[2]}, prediction is
#' computed by Leave-One-Out (LOO).
#'
#' @keywords internal
#'
#xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
predict_amean_byelt_jack <- function(fct, assMotifs, mOccur, jack) {
fctPrd <- numeric(length(assMotifs))
setMot <- unique(assMotifs)
for (mot in seq_along(setMot)) {
indMot <- which(assMotifs == setMot[mot])
if (length(indMot) > 1) {
fctPrd[indMot] <- amean_byelt_jack(fct[indMot], mOccur[indMot, ], jack)
} else {
fctPrd[indMot] <- NA
}
}
return(fctPrd)
}
#xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
#'
#' @title Arithmetic mean (amean) by motif (bymot) by jackknife (jack)
#' over several experiments (xpr)
#'
#' @description Take a numeric vector and return the predicted vector
#' computed as the arithmetic mean of all elements belonging to a same motif.
#'
#' @usage predict_amean_byelt_jack_xpr(fct, assMotifs, mOccur, jack, xpr)
#'
#' @param fct a vector of numeric values (assembly properties).
#'
#' @param assMotifs a vector of labels of \code{length(fct)} (assembly motifs).
#'
#' @param mOccur a matrix of occurrence (occurrence of elements).
#' Its first dimension equals to \code{length(fct)}. Its second dimension
#' equals to the number of elements.
#'
#' @param jack a vector of two elements. The first one \code{jack[1]}
#' specifies the size of subset, the second one \code{jack[2]} specifies
#' the number of subsets.
#'
#' @param xpr a vector of labels of \code{length(fct)} (assembly experiments).
#'
#' @return Return the arithmetic mean of a vector, as standard \code{mean}
#' function.
#'
#' @details Prediction is computed using arithmetic mean \code{amean} by
#' motif \code{bymot} in a whole (WITHOUT taking into account species
#' contribution). The elements belonging to a same motif are divided
#' into \code{jack[2]} subsets of \code{jack[1]} elements. Prediction
#' is computed by excluding \code{jack[1]} elements, of which the
#' element to predict. If the total number of elements belonging to
#' the motif is lower than \code{jack[1]*jack[2]}, prediction is
#' computed by Leave-One-Out (LOO).
#'
#' @keywords internal
#'
#xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
predict_amean_byelt_jack_xpr <- function(fct, assMotifs, mOccur, jack, xpr) {
fctPrd <- numeric(length(assMotifs))
setXpr <- unique(xpr)
for (ix in seq_along(setXpr)) {
index <- which(xpr == setXpr[ix])
fctPrd[index] <- predict_amean_byelt_jack(fct[index],
assMotifs[index],
mOccur[index, ],
jack )
}
return(fctPrd)
}
#oooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooo
#
# Geometric mean by Element within each Motif ####
#
#oooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooo
#xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
#'
#' @title Geometric mean (gmean) by elements occurring within the assemblage
#' and other assemblages, all assemblages belonging to a same motif (byelt)
#'
#' @description The numeric vector \code{fctMot} get together the properties
#' of assemblages belonging to a same assembly motif. The properties
#' \code{fctMot} of assemblages containing a given element are separately
#' averaged. The property of each assemblage is computed as the average
#' of mean values of assemblages containing the same elements as the
#' considered assemblage. The elemental composition of each assemblage
#' is specified in the binary matrix \code{mOccurMot}: \code{0}
#' if the element does not occur, \code{1} if the element occurs.
#'
#' @usage gmean_byelt(fctMot, mOccurMot)
#'
#' @param fctMot a vector of numeric values (assembly properties).
#'
#' @param mOccurMot a matrix of occurrence (occurrence of elements).
#' Its first dimension equals to \code{length(fctMot)}. Its second
#' dimension equals to the number of elements.
#'
#' @return Return a vector of \code{length(fctMot)}, of which values
#' are computed as an geometric mean.
#'
#' @details Prediction is computed using geometric mean \code{gmean}
#' by element \code{byelt} occurring within the assemblage and other
#' assemblages of a same motif, by including all assemblages belonging
#' to a same motif, even the one to predict.
#'
#' @keywords internal
#'
#xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
gmean_byelt <- function(fctMot, mOccurMot) {
fctPrd <- numeric(length(fctMot))
fctPrd[ ] <- 1
setElt <- unique(which((mOccurMot[ , , drop = FALSE] == 1),
arr.ind = TRUE)[ , 2])
for (elt in seq_along(setElt)) {
indElt <- which(mOccurMot[ , setElt[elt]] == 1)
if (length(indElt) > 0)
fctPrd[indElt] <- fctPrd[indElt] * gmean(fctMot[indElt])
}
fctPrd <- fctPrd ^ (1/apply(mOccurMot, MARGIN = 1, FUN = sum))
return(fctPrd)
}
#xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
#'
#' @title Geometric mean (gmean) by elements occurring within the assemblage
#' and other assemblages, all assemblages belonging to a same motif (byelt)
#'
#' @description The numeric vector \code{fct} get together the properties
#' of assemblages belonging to different assembly motifs. The properties
#' \code{fct} of assemblages belonging to a given assembly motif and
#' containing a given element are separately averaged. The property of
#' each assemblage is computed as the average of mean values of assemblages
#' containing the same elements as the considered assemblage. The
#' motif of each vector element is specified in the vector \code{assMotifs}.
#' The elemental composition of each assemblage is specified in the
#' binary matrix \code{mOccur}: \code{0} if the element does not occur,
#' \code{1} if the element occurs.
#'
#' @usage predict_gmean_byelt(fct, assMotifs, mOccur)
#'
#' @param fct a vector of numeric values (assembly properties).
#'
#' @param assMotifs a vector of labels of \code{length(fct)} (assembly motifs).
#'
#' @param mOccur a matrix of occurrence (occurrence of elements).
#' Its first dimension equals to \code{length(fct)}. Its second dimension
#' equals to the number of elements.
#'
#' @return Return a vector of \code{length(fct)}, of which values are
#' computed as the geometric mean of all vector elements belonging to
#' a same motif.
#'
#' @details Prediction is computed using geometric mean \code{gmean} by
#' element \code{byelt} occurring within the assemblage and other
#' assemblages of a same motif, by including all assemblages belonging
#' to a same motif, even the one to predict.
#'
#' @keywords internal
#'
#xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
predict_gmean_byelt <- function(fct, assMotifs, mOccur) {
fctPrd <- numeric(length(assMotifs))
setMot <- unique(assMotifs)
for (mot in seq_along(setMot)) {
indMot <- which(assMotifs == setMot[mot])
if (length(indMot) > 1) {
fctPrd[indMot] <- gmean_byelt(fct[indMot], mOccur[indMot, ])
} else {
fctPrd[indMot] <- fct[indMot]
}
}
return(fctPrd)
}
#xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
#'
#' @title Geometric mean (gmean) by elements occurring within the assemblage
#' and other assemblages, all assemblages belonging to a same motif
#' (byelt), over several experiments (xpr)
#'
#' @description The numeric vector \code{fct} get together the properties
#' of assemblages belonging to different assembly motifs. The properties
#' \code{fct} of assemblages belonging to a given assembly motif and
#' containing a given element are separately averaged. The property of
#' each assemblage is computed as the average of mean values of assemblages
#' containing the same elements as the considered assemblage.
#' The motif of each vector element is specified in the vector
#' \code{assMotifs}. The elemental composition of each assemblage
#' is specified in the binary matrix \code{mOccur}: \code{0} if
#' the element does not occur, \code{1} if the element occurs.
#'
#' @usage predict_gmean_byelt_xpr(fct, assMotifs, mOccur, xpr)
#'
#' @param fct a vector of numeric values (assembly properties).
#'
#' @param assMotifs a vector of labels of \code{length(fct)} (assembly motifs).
#'
#' @param mOccur a matrix of occurrence (occurrence of elements).
#' Its first dimension equals to \code{length(fct)}. Its second
#' dimension equals to the number of elements.
#'
#' @param xpr a vector of labels of \code{length(fct)} (assembly experiments).
#'
#' @return Return a vector of \code{length(fct)}, of which values
#' are computed as the geometric mean of all vector elements belonging
#' to a same motif.
#'
#' @details Prediction is computed using geometric mean \code{gmean}
#' by element \code{byelt} occurring within the assemblage and other
#' assemblages of a same motif, by including all assemblages belonging
#' to a same motif, even the one to predict.
#'
#' @keywords internal
#'
#xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
predict_gmean_byelt_xpr <- function(fct, assMotifs, mOccur, xpr) {
fctPrd <- numeric(length(assMotifs))
setXpr <- unique(xpr)
for (ix in seq_along(setXpr)) {
index <- which(xpr == setXpr[ix])
fctPrd[index] <- predict_gmean_byelt( fct[index],
assMotifs[index],
mOccur[index, ] )
}
return(fctPrd)
}
#xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
#'
#' @title Geometric mean (amean) by elements occurring within the assemblage
#' and other assemblages belonging to a same motif (byelt),
#' except the assemblage to predict (leave-one-out, LOO)
#'
#' @description The numeric vector \code{fctMot} get together the
#' properties of assemblages belonging to a same assembly motif.
#' The properties \code{fctMot} of assemblages containing a given element
#' are separately averaged, except the assemblage to predict. The property
#' of each assemblage is computed as the average of mean values of
#' assemblages containing the same elements as the considered assemblage.
#' The elemental composition of each assemblage is specified in the
#' binary matrix \code{mOccurMot}: \code{0} if the element does not
#' occur, \code{1} if the element occurs.
#'
#' @usage gmean_byelt_LOO(fctMot, mOccurMot)
#'
#' @param fctMot a vector of numeric values (assembly properties).
#'
#' @param mOccurMot a matrix of occurrence (occurrence of elements).
#' Its first dimension equals to \code{length(fctMot)}. Its second
#' dimension equals to the number of elements.
#'
#' @return Return a vector of \code{length(fct)}, of which values are
#' computed as a geometric mean of all vector elements over several
#' experiments, excepted the value of element to predict that have
#' been left out.
#'
#' @details Prediction is computed using geomùetric mean \code{gmean}
#' by element \code{byelt} occurring within the assemblage and other
#' assemblages of a same motif, by including all assemblages belonging
#' to a same motif, except the assemblage to predict.
#'
#' @keywords internal
#'
#xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
gmean_byelt_LOO <- function(fctMot, mOccurMot) {
nbass <- length(fctMot)
fctPrd <- numeric(nbass)
vfct <- numeric(dim(mOccurMot)[2])
for (ind in seq_len(nbass)) {
vfct[] <- NA
indOth <- seq_len(nbass)[-ind]
setElt <- which(mOccurMot[ind, ] != 0)
for (elt in seq_along(setElt)) {
indElt <- which(mOccurMot[indOth, setElt[elt]] == 1)
if (length(indElt) > 0)
vfct[setElt[elt]] <- gmean(fctMot[indOth[indElt]])
}
fctPrd[ind] <- gmean(vfct[setElt])
}
return(fctPrd)
}
#xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
#'
#' @title Geometric mean (amean) by elements occurring within the assemblage
#' and other assemblages belonging to a same motif (byelt), except the
#' assemblage to predict (leave-one-out, LOO)
#'
#' @description The numeric vector \code{fct} get together the properties
#' of assemblages belonging to different assembly motifs. The properties
#' \code{fct} of assemblages belonging to a given assembly motif and
#' containing a given element are separately averaged. The property of
#' each assemblage is computed as the average of mean values of
#' assemblages containing the same elements as the considered assemblage,
#' except the assemblage to predict (leave-one-out, LOO). The motif
#' of each vector element is specified in the vector \code{assMotifs}.
#' The elemental composition of each assemblage is specified in the
#' binary matrix \code{mOccur}: \code{0} if the element does not occur,
#' \code{1} if the element occurs.
#'
#' @usage predict_gmean_byelt_LOO(fct, assMotifs, mOccur)
#'
#' @param fct a vector of numeric values (assembly properties).
#'
#' @param assMotifs a vector of labels of \code{length(fct)} (assembly motifs).
#'
#' @param mOccur a matrix of occurrence (occurrence of elements). Its first
#' dimension equals to \code{length(fct)}. Its second dimension equals
#' to the number of elements.
#'
#' @return Return a vector of \code{length(fctMot)}, of which values are
#' computed as a geometric mean.
#'
#' @details Prediction is computed using geometric mean \code{gmean} by
#' element \code{byelt} occurring within the assemblage and other
#' assemblages of a same motif, by including all assemblages belonging
#' to a same motif, except the assemblage to predict.
#'
#' @keywords internal
#'
#xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
predict_gmean_byelt_LOO <- function(fct, assMotifs, mOccur) {
fctPrd <- numeric(length(assMotifs))
setMot <- unique(assMotifs)
for (mot in seq_along(setMot)) {
indMot <- which(assMotifs == setMot[mot])
if (length(indMot) > 1) {
fctPrd[indMot] <- gmean_byelt_LOO(fct[indMot], mOccur[indMot, ])
} else {
fctPrd[indMot] <- NA
}
}
return(fctPrd)
}
#xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
#'
#' @title Geometric mean (amean) by elements occurring within the assemblage
#' and other assemblages belonging to a same motif (byelt),
#' except the assemblage to predict (leave-one-out, LOO).
#' Over several experiments.
#'
#' @description The numeric vector \code{fct} get together the properties
#' of assemblages belonging to different assembly motifs. The properties
#' \code{fct} of assemblages belonging to a given assembly motif and
#' containing a given element are separately averaged. The property of
#' each assemblage is computed as the average of mean values of assemblages
#' containing the same elements as the considered assemblage,
#' except the assemblage to predict (leave-one-out, LOO). The motif
#' of each vector element is specified in the vector \code{assMotifs}.
#' The elemental composition of each assemblage is specified in the
#' binary matrix \code{mOccur}: \code{0} if the element does not
#' occur, \code{1} if the element occurs.
#'
#' @usage predict_gmean_byelt_LOO_xpr(fct, assMotifs, mOccur, xpr)
#'
#' @param fct a vector of numeric values (assembly properties).
#'
#' @param assMotifs a vector of labels of \code{length(fct)} (assembly motifs).
#'
#' @param mOccur a matrix of occurrence (occurrence of elements).
#' Its first dimension equals to \code{length(fct)}. Its second dimension
#' equals to the number of elements.
#'
#' @param xpr a vector of labels of \code{length(fct)} (assembly experiments).
#'
#' @return Return a vector of \code{length(fctMot)}, of which
#' values are computed as a geometric mean.
#'
#' @details Prediction is computed using geometric mean \code{gmean}
#' by element \code{byelt} occurring within the assemblage and other
#' assemblages of a same motif, by including all assemblages belonging
#' to a same motif, except the assemblage to predict. Over several
#' experiments.
#'
#' @keywords internal
#'
#xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
predict_gmean_byelt_LOO_xpr <- function(fct, assMotifs, mOccur, xpr) {
fctPrd <- numeric(length(assMotifs))
setXpr <- unique(xpr)
for (ix in seq_along(setXpr)) {
index <- which(xpr == setXpr[ix])
fctPrd[index] <- predict_gmean_byelt_LOO(fct[index],
assMotifs[index],
mOccur[index, ] )
}
return(fctPrd)
}
#xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
#'
#' @title Geometric mean (amean) by motif (bymot) by jackknife (jack)
#' over several experiments (xpr)
#'
#' @description Take a numeric vector and return the predicted vector
#' computed as the arithmetic mean of all elements belonging to the same motif.
#'
#' @usage gmean_byelt_jack(fctMot, mOccurMot, jack)
#'
#' @param fctMot a vector of numeric values of elements belonging
#' to a same motif.
#'
#' @param mOccurMot a matrix of occurrence (occurrence of elements).
#' Its first dimension equals to \code{length(fctMot)}. Its second
#' dimension equals to the number of elements.
#'
#' @param jack a vector of two elements. The first one \code{jack[1]}
#' specifies the size of subset, the second one \code{jack[2]} specifies
#' the number of subsets.
#'
#' @return Return a vector of same length as \code{fctMot}, of which values
#' are computed as the geometric mean of all vector elements.
#'
#' @details Prediction is computed using geometric mean \code{gmean}
#' by motif \code{bymot} in a whole (WITHOUT taking into account species
#' contribution). The elements belonging to a same motif are divided
#' into \code{jack[2]} subsets of \code{jack[1]} elements. Prediction
#' is computed by excluding \code{jack[1]} elements, of which the
#' element to predict. If the total number of elements belonging
#' to the motif is lower than \code{jack[1]*jack[2]}, prediction
#' is computed by Leave-One-Out (LOO).
#'
#' @keywords internal
#'
#xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
gmean_byelt_jack <- function(fctMot, mOccurMot, jack) {
nbass <- length(fctMot)
fctPrd <- numeric(nbass)
if (nbass > jack[1] * jack[2]) {
index <- sample.int(nbass)
size <- floor(nbass / jack[2])
for (ind in seq_len(jack[2] - 1)) {
indjack <- index[(ind - 1) * size + (1:size)]
indOth <- seq_len(nbass)[-indjack]
tmp <- mOccurMot[indOth, ] * fctMot[indOth]
tmp[tmp == 0] <- NA
vfct <- apply(tmp, MARGIN = 2, FUN = gmean)
tmp <- t(t(mOccurMot[indjack, ]) * vfct)
tmp[tmp == 0] <- NA
fctPrd[indjack] <- apply(tmp, MARGIN = 1, FUN = gmean)
}
indjack <- index[(ind * size + 1):nbass]
indOth <- seq_len(nbass)[-indjack]
tmp <- mOccurMot[indOth, ] * fctMot[indOth]
tmp[tmp == 0] <- NA
vfct <- apply(tmp, MARGIN = 2, FUN = gmean)
tmp <- t(t(mOccurMot[indjack, ]) * vfct)
tmp[tmp == 0] <- NA
fctPrd[indjack] <- apply(tmp, MARGIN = 1, FUN = gmean)
} else {
fctPrd[] <- gmean_byelt_LOO(fctMot, mOccurMot)
}
return(fctPrd)
}
#xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
#'
#' @title Geometric mean (amean) by motif (bymot) by jackknife (jack)
#'
#' @description Take a numeric vector and return the predicted vector
#' computed as the arithmetic mean of all elements belonging to the same motif.
#'
#' @usage predict_gmean_byelt_jack(fct, assMotifs, mOccur, jack)
#'
#' @param fct a vector of numeric values (assembly properties).
#'
#' @param assMotifs a vector of labels of \code{length(fct)} (assembly motifs).
#'
#' @param mOccur a matrix of occurrence (occurrence of elements).
#' Its first dimension equals to \code{length(fct)}. Its second
#' dimension equals to the number of elements.
#'
#' @param jack a vector of two elements. The first one\code{jack[1]}
#' specifies the size of subset, the second one \code{jack[2]} specifies
#' the number of subsets.
#'
#' @return Return a vector of \code{length(fct)}, of which values are
#' computed as the geometric mean of all vector elements, excepted the
#' subset left out.
#'
#' @details Prediction is computed using geometric mean \code{gmean} by
#' motif \code{bymot} in a whole (WITHOUT taking into account species
#' contribution). The elements belonging to a same motif are divided
#' into \code{jack[2]} subsets of \code{jack[1]} elements. Prediction
#' is computed by excluding \code{jack[1]} elements, of which the
#' element to predict. If the total number of elements belonging to
#' the motif is lower than \code{jack[1]*jack[2]}, prediction is
#' computed by Leave-One-Out (LOO).
#'
#' @keywords internal
#'
#xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
predict_gmean_byelt_jack <- function(fct, assMotifs, mOccur, jack) {
fctPrd <- numeric(length(assMotifs))
setMot <- unique(assMotifs)
for (mot in seq_along(setMot)) {
indMot <- which(assMotifs == setMot[mot])
if (length(indMot) > 1) {
fctPrd[indMot] <- gmean_byelt_jack(fct[indMot], mOccur[indMot, ], jack)
} else {
fctPrd[indMot] <- NA
}
}
return(fctPrd)
}
#xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
#'
#' @title Geometric mean (amean) by motif (bymot) by jackknife (jack)
#' over several experiments (xpr)
#'
#' @description Take a numeric vector and return the predicted vector
#' computed as the geometric mean of all elements belonging to a same motif.
#'
#' @usage predict_gmean_byelt_jack_xpr(fct, assMotifs, mOccur, jack, xpr)
#'
#' @param fct a vector of numeric values (assembly properties).
#'
#' @param assMotifs a vector of labels of \code{length(fct)} (assembly motifs).
#'
#' @param mOccur a matrix of occurrence (occurrence of elements).
#' Its first dimension equals to \code{length(fct)}. Its second dimension
#' equals to the number of elements.
#'
#' @param jack a vector of two elements. The first one \code{jack[1]}
#' specifies the size of subset, the second one \code{jack[2]} specifies
#' the number of subsets.
#'
#' @param xpr a vector of labels of \code{length(fct)} (assembly experiments).
#'
#' @return Return a vector of \code{length(fct)}, of which values are
#' computed as the geometric mean of all vector elements,
#' over several experiments.
#'
#' @details Prediction is computed using geometric mean \code{gmean}
#' by motif \code{bymot} in a whole (WITHOUT taking into account species
#' contribution). The elements belonging to a same motif are divided
#' into \code{jack[2]} subsets of \code{jack[1]} elements. Prediction
#' is computed by excluding \code{jack[1]} elements, of which
#' the element to predict. If the total number of elements belonging
#' to the motif is lower than \code{jack[1]*jack[2]},
#' prediction is computed by Leave-One-Out (LOO).
#'
#' @keywords internal
#'
#xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
predict_gmean_byelt_jack_xpr <- function(fct, assMotifs, mOccur, jack, xpr) {
fctPrd <- numeric(length(assMotifs))
setXpr <- unique(xpr)
for (ix in seq_along(setXpr)) {
index <- which(xpr == setXpr[ix])
fctPrd[index] <- predict_gmean_byelt_jack(fct[index],
assMotifs[index],
mOccur[index, ],
jack )
}
return(fctPrd)
}
#oooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooo
#
# Supplementary assemblages to predict ####
#
#oooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooo
#xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
#
# Prediction of supplementary assemblages computed
# amean = by using arithmetic mean
# bymot = by motif in a whole (WITHOUT taking into account
# species contribution)
# by including all assemblies, even the one to predict
#'
#' @title Prediction of supplementary assemblages
#'
#' @description Take a numeric f.
#'
#' @usage predict_amean_bymot_supp(appFct, appMotifs, supMotifs)
#'
#' @param appFct a vector of numeric values (assembly properties).
#'
#' @param appMotifs a vector of labels of \code{length(appFct)}
#' (assembly motifs).
#'
#' @param supMotifs a vector of labels of assembly motifs of which values must be predicted.
#'
#' @return Return a vector of \code{length(supMotifs)}. The values are
#' computed using arithmetic mean of elements belonging to \code{appMotifs}
#' and sharing a same motif.
#'
#' @details Prediction ...
#'
#' @keywords internal
#
#xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
predict_amean_bymot_supp <- function(appFct, appMotifs, supMotifs) {
supFct <- numeric(length(supMotifs))
supFct[] <- NA
setMot <- unique(supMotifs)
for (mot in seq_along(setMot)) {
indSup <- which(supMotifs == setMot[mot])
indApp <- which(appMotifs == setMot[mot])
if ( (length(indSup) > 0) & (length(indApp) > 0) )
supFct[indSup] <- amean(appFct[indApp])
}
return(supFct)
}
#xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
#
# Prediction of supplementary assemblages computed
# gmean = by using geometric mean
# bymot = by motif in a whole
# (WITHOUT taking into account species contribution)
# by including all the assemblies, even the one to predict
#
#xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
#xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
#
# Prediction of supplementary assemblages computed
# amean = by using geometric mean
# bymot = by motif in a whole (WITHOUT taking into account
# species contribution)
# by including all assemblies, even the one to predict
#'
#' @title Prediction of supplementary assemblages
#'
#' @description Take a numeric f.
#'
#' @usage predict_gmean_bymot_supp(appFct, appMotifs, supMotifs)
#'
#' @param appFct a vector of numeric values (assembly properties).
#'
#' @param appMotifs a vector of labels of \code{length(appFct)}
#' (assembly motifs).
#'
#' @param supMotifs a vector of labels of assembly motifs of which values
#' must be predicted.
#'
#' @return Return a vector of \code{length(supMotifs)}. The values are
#' computed using arithmetic mean of elements belonging to \code{appMotifs}
#' and sharing a same motif.
#'
#' @details Prediction ...
#'
#' @keywords internal
#
#xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
predict_gmean_bymot_supp <- function(appFct, appMotifs, supMotifs) {
supFct <- numeric(length(supMotifs))
supFct[] <- NA
setMot <- unique(supMotifs)
for (mot in seq_along(setMot)) {
indSup <- which(supMotifs == setMot[mot])
indApp <- which(appMotifs == setMot[mot])
if ( (length(indSup) > 0) & (length(indApp) > 0) )
supFct[indSup] <- gmean(appFct[indApp])
}
return(supFct)
}
#xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
#
# Prediction of supplementary assemblages computed
# amean = by using arithmetic mean
# byelt = by motif WITH taking into account species contribution
# by including all the assemblies, even the one to predict
# for any Function (for instance Fobs)
#
#
#' @title Prediction of supplementary assemblages computed
#'
#' @description Take a numeric f.
#'
#' @usage predict_amean_byelt_supp(appFct, appMotifs, appOccur,
#' supMotifs, supOccur )
#'
#' @param appFct cccc
#'
#' @param appMotifs cccc
#'
#' @param appOccur cccc
#'
#' @param supMotifs cccc
#'
#' @param supOccur cccc
#'
#' @details dd
#'
#' @return cccc
#'
#' @keywords internal
#
#xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
predict_amean_byelt_supp <- function(appFct, appMotifs, appOccur,
supMotifs, supOccur ) {
setAppMot <- unique(appMotifs)
setSupMot <- unique(supMotifs)
setMot <- sort(union(setAppMot, setSupMot))
nbMot <- length(setMot)
mfct <- matrix(NA, nrow = nbMot, ncol = dim(appOccur)[2],
dimnames = list(setMot, colnames(appOccur)))
for (mot in seq_along(setAppMot)) {
motif <- setAppMot[mot]
indApp <- which(appMotifs == motif)
setElt <- unique(which((appOccur[indApp, , drop = FALSE] == 1),
arr.ind = TRUE)[ , 2])
for (elt in seq_along(setElt)) {
element <- setElt[elt]
indElt <- which(appOccur[indApp, element] == 1)
if (length(indElt) > 0)
mfct[motif, element] <- amean(appFct[indApp[indElt]])
}
}
supFct <- numeric(length(supMotifs))
sizeSup <- apply(supOccur, MARGIN = 1, FUN = sum)
for (mot in seq_along(setSupMot)) {
motif <- setSupMot[mot]
indSupMot <- which(supMotifs == motif)
if (length(indSupMot) > 0) {
setSupElt <- unique(which((supOccur[indSupMot, , drop = FALSE] == 1),
arr.ind = TRUE)[ , 2])
for (elt in seq_along(setSupElt)) {
element <- setSupElt[elt]
indSupElt <- which(supOccur[indSupMot, element] == 1)
if (length(indSupElt) > 0) {
index <- indSupMot[indSupElt]
supFct[index] <- supFct[index] + mfct[motif, element]
}
}
supFct[indSupMot] <- supFct[indSupMot] / sizeSup[indSupMot]
}
}
return(supFct)
}
#xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
#
# Prediction of supplementary assemblages computed
# gmean = by using geometric mean
# byelt = by motif WITH taking into account species contribution
# by including all the assemblies, even the one to predict
# for any Function (for instance Fobs)
#
#' @title Prediction of supplementary assemblages computed
#'
#' @description Take a numeric f.
#'
#' @usage predict_gmean_byelt_supp(appFct, appMotifs, appOccur,
#' supMotifs, supOccur )
#'
#' @param appFct cccc
#'
#' @param appMotifs cccc
#'
#' @param appOccur cccc
#'
#' @param supMotifs cccc
#'
#' @param supOccur cccc
#'
#' @details dd
#'
#' @return cccc
#'
#' @keywords internal
#
#xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
predict_gmean_byelt_supp <- function(appFct, appMotifs, appOccur,
supMotifs, supOccur ) {
setAppMot <- unique(appMotifs)
setSupMot <- unique(supMotifs)
setMot <- sort(union(setAppMot, setSupMot))
nbMot <- length(setMot)
mfct <- matrix(NA, nrow = nbMot, ncol = dim(appOccur)[2],
dimnames = list(setMot, colnames(appOccur)))
for (mot in seq_along(setAppMot)) {
motif <- setAppMot[mot]
indApp <- which(appMotifs == motif)
setElt <- unique(which((appOccur[indApp, , drop = FALSE] == 1),
arr.ind = TRUE)[ , 2])
for (elt in seq_along(setElt)) {
element <- setElt[elt]
indElt <- which(appOccur[indApp, element] == 1)
if (length(indElt) > 0)
mfct[motif, element] <- gmean(appFct[indApp[indElt]])
}
}
supFct <- numeric(length(supMotifs))
supFct[] <- 1
sizeSup <- apply(supOccur, MARGIN = 1, FUN = sum)
for (mot in seq_along(setSupMot)) {
motif <- setSupMot[mot]
indSupMot <- which(supMotifs == motif)
if (length(indSupMot) > 0) {
setSupElt <- unique(which((supOccur[indSupMot, , drop = FALSE] == 1),
arr.ind = TRUE)[ , 2])
for (elt in seq_along(setSupElt)) {
element <- setSupElt[elt]
indSupElt <- which(supOccur[indSupMot, element] == 1)
if (length(indSupElt) > 0) {
index <- indSupMot[indSupElt]
supFct[index] <- supFct[index] * mfct[motif, element]
}
}
supFct[indSupMot] <- supFct[indSupMot] ^ (1/sizeSup[indSupMot])
}
}
return(supFct)
}
#oooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooo
#
# Functions for switch on different options ####
#
#oooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooo
#xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
#'
#' @title Arithmetic mean (amean) by motif (bymot) by jackknife (jack) over
#' several experiments (xpr)
#'
#' @description Take a numeric vector and return the predicted vector computed
#' as the arithmetic mean of all elements belonging to a same motif.
#'
#' @usage predict_cal(fct, assMotifs, mOccur, xpr,
#' opt.mean = "amean",
#' opt.mod = "bymot" )
#'
#' @param fct a vector of numeric values (assembly properties).
#'
#' @param assMotifs a vector of labels of \code{length(fct)} (assembly motifs).
#'
#' @param mOccur a matrix of occurrence (occurrence of elements).
#' Its first dimension equals to \code{length(fct)}. Its second dimension
#' equals to the number of elements.
#'
#' @param xpr a vector of labels of \code{length(fct)} (assembly experiments).
#'
#' @param opt.mean equal to \code{"amean"} (by default) or \code{"gmean"}.
#'
#' @param opt.mod equal to \code{"bymot"} (by default) or \code{"byelt"}.
#'
#' @return Return the arithmetic mean of a vector, as standard \code{mean}
#' function.
#'
#' @details Prediction is computed using arithmetic mean \code{amean} by motif
#' \code{bymot} in a whole (WITHOUT taking into account species contribution).
#' The elements belonging to a same motif are divided into \code{jack[2]}
#' subsets of \code{jack[1]} elements. Prediction is computed by excluding
#' \code{jack[1]} elements, of which the element to predict. If the total
#' number of elements belonging to the motif is lower than
#' \code{jack[1]*jack[2]}, prediction is computed by Leave-One-Out (LOO).
#'
#' @keywords internal
#'
#xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
predict_cal <- function(fct, assMotifs, mOccur, xpr,
opt.mean = "amean",
opt.mod = "bymot") {
optmean <- "amean"
if (opt.mean == "gmean") optmean <- "gmean"
optmod <- "bymot"
if (opt.mod == "byelt") optmod <- "byelt"
optxpr <- ""
if (length(unique(xpr)) != 1) optxpr <- "xpr"
option <- paste(optmean, optmod, optxpr, sep = ".")
return(
switch(option,
amean.bymot. =
predict_amean_bymot(fct, assMotifs),
amean.bymot.xpr =
predict_amean_bymot_xpr(fct, assMotifs, xpr),
gmean.bymot. =
predict_gmean_bymot(fct, assMotifs),
gmean.bymot.xpr =
predict_gmean_bymot_xpr(fct, assMotifs, xpr),
amean.byelt. =
predict_amean_byelt(fct, assMotifs, mOccur),
amean.byelt.xpr =
predict_amean_byelt_xpr(fct, assMotifs, mOccur, xpr),
gmean.byelt. =
predict_gmean_byelt(fct, assMotifs, mOccur),
gmean.byelt.xpr =
predict_gmean_byelt_xpr(fct, assMotifs, mOccur, xpr) )
)
}
#xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
#'
#' @title Predition of assembly property by cross-validation
#'
#' @description Take a numeric vector and return the predicted vector
#' computed as the arithmetic mean of all elements belonging to a same motif.
#'
#' @usage predict_prd(fct, assMotifs, mOccur, xpr,
#' opt.mean = "amean",
#' opt.mod = "bymot",
#' opt.jack = FALSE, jack = c(2, 5) )
#'
#' @param fct a vector of numeric values (assembly properties).
#'
#' @param assMotifs a vector of labels of \code{length(fct)} (assembly motifs).
#'
#' @param mOccur a matrix of occurrence (occurrence of elements). Its first
#' dimension equals to \code{length(fct)}. Its second dimension equals to
#' the number of elements.
#'
#' @param xpr a vector of labels of \code{length(fct)} (assembly experiments).
#'
#' @param opt.mean equal to \code{"amean"} (by default) or to \code{"gmean"}.
#'
#' @param opt.mod equal to \code{"bymot"} (by default) or to \code{"byelt"}.
#'
#' @param opt.jack a logical to block or switch to jackknife cross-validation.
#'
#' @param jack \code{jack = c(2, 5)} by default.
#' @return Return the arithmetic mean of a vector, as standard \code{mean}
#' function.
#'
#' @details Prediction is computed using arithmetic mean \code{amean}
#' by motif \code{bymot} in a whole (WITHOUT taking into account species
#' contribution). The elements belonging to a same motif are divided
#' into \code{jack[2]} subsets of \code{jack[1]} elements. Prediction
#' is computed by excluding \code{jack[1]} elements, of which the
#' element to predict. If the total number of elements belonging to
#' the motif is lower than \code{jack[1]*jack[2]}, prediction is
#' computed by Leave-One-Out (LOO).
#'
#' @keywords internal
#'
#xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
predict_prd <- function(fct, assMotifs, mOccur, xpr,
opt.mean = "amean",
opt.mod = "bymot",
opt.jack = FALSE, jack = c(2, 5)) {
optmean <- "amean"
if (opt.mean == "gmean") optmean <- "gmean"
optmod <- "bymot"
if (opt.mod == "byelt") optmod <- "byelt"
optjack <- ""
if (opt.jack == TRUE) optjack <- "jack"
optxpr <- ""
if (length(unique(xpr)) != 1) optxpr <- "xpr"
option <- paste(optmean, optmod, optjack, optxpr, sep = ".")
return(
switch(option,
amean.bymot.. =
predict_amean_bymot_LOO(fct, assMotifs),
amean.bymot..xpr =
predict_amean_bymot_LOO_xpr(fct, assMotifs, xpr),
amean.bymot.jack. =
predict_amean_bymot_jack(fct, assMotifs, jack),
amean.bymot.jack.xpr =
predict_amean_bymot_jack_xpr(fct, assMotifs, jack, xpr),
gmean.bymot.. =
predict_gmean_bymot_LOO(fct, assMotifs),
gmean.bymot..xpr =
predict_gmean_bymot_LOO_xpr(fct, assMotifs, xpr),
gmean.bymot.jack. =
predict_gmean_bymot_jack(fct, assMotifs, jack),
gmean.bymot.jack.xpr =
predict_gmean_bymot_jack_xpr(fct, assMotifs, jack, xpr),
amean.byelt.. =
predict_amean_byelt_LOO(fct, assMotifs, mOccur),
amean.byelt..xpr =
predict_amean_byelt_LOO_xpr(fct, assMotifs, mOccur, xpr),
amean.byelt.jack. =
predict_amean_byelt_jack(fct, assMotifs, mOccur, jack),
amean.byelt.jack.xpr =
predict_amean_byelt_jack_xpr(fct, assMotifs, mOccur, jack, xpr),
gmean.byelt.. =
predict_gmean_byelt_LOO(fct, assMotifs, mOccur),
gmean.byelt..xpr =
predict_gmean_byelt_LOO_xpr(fct, assMotifs, mOccur, xpr),
gmean.byelt.jack. =
predict_gmean_byelt_jack(fct, assMotifs, mOccur, jack),
gmean.byelt.jack.xpr =
predict_gmean_byelt_jack_xpr(fct, assMotifs, mOccur, jack, xpr) )
)
}
#xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
#
# Prediction computed by excluding (LOO) the assembly to predict
#
#xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
#xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
#'
#' @title Arithmetic mean (amean) by motif (bymot) by jackknife (jack) over
#' several experiments (xpr)
#'
#' @description Take a numeric vector and return the predicted vector computed
#' as the arithmetic mean of all elements belonging to a same motif.
#'
#' @usage predict_supp(appFct, appMotifs, appOccur,
#' supMotifs, supOccur,
#' opt.mean = "amean",
#' opt.mod = "bymot" )
#'
#' @param appFct a vector of numeric values (assembly properties).
#'
#' @param appMotifs a vector of labels of \code{length(fct)} (assembly motifs).
#'
#' @param appOccur a matrix of occurrence (occurrence of elements).
#' Its first dimension equals to \code{length(fct)}. Its second dimension
#' equals to the number of elements.
#'
#' @param supMotifs a vector of labels of \code{length(fct)} (assembly motifs).
#'
#' @param supOccur a matrix of occurrence (occurrence of elements).
#' Its first dimension equals to \code{length(fct)}. Its second dimension
#' equals to the number of elements.
#'
#' @param opt.mean equal to \code{"amean"} (by default) or \code{"gmean"}.
#'
#' @param opt.mod equal to \code{"bymot"} (by default) or \code{"byelt"}.
#'
#' @return Return the arithmetic mean of a vector, as standard \code{mean}
#' function.
#'
#' @details Prediction is computed using arithmetic mean \code{amean} by motif
#' \code{bymot} in a whole (WITHOUT taking into account species contribution).
#' The elements belonging to a same motif are divided into \code{jack[2]}
#' subsets of \code{jack[1]} elements. Prediction is computed by excluding
#' \code{jack[1]} elements, of which the element to predict. If the total
#' number of elements belonging to the motif is lower than
#' \code{jack[1]*jack[2]}, prediction is computed by Leave-One-Out (LOO).
#'
#' @keywords internal
#'
#xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
predict_supp <- function(appFct, appMotifs, appOccur,
supMotifs, supOccur,
opt.mean = "amean",
opt.mod = "bymot" ) {
option <- paste(opt.mean, opt.mod, sep = ".")
return(
switch(option,
amean.bymot =
predict_amean_bymot_supp(appFct, appMotifs, supMotifs) ,
gmean.bymot =
predict_gmean_bymot_supp(appFct, appMotifs, supMotifs) ,
amean.byelt =
predict_amean_byelt_supp(appFct, appMotifs, appOccur,
supMotifs, supOccur) ,
gmean.byelt =
predict_gmean_byelt_supp(appFct, appMotifs, appOccur,
supMotifs, supOccur)
)
)
}
#xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
#xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
#
# END of FILE
#
#xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
#xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
|
a74cdf8ee2b2ecae3348b7dbd37b6ac51b050888
|
83f2b4095087ed04b6354b7d1638399fb88e64ea
|
/cachematrix.R
|
3fe2b8de06b305f5c070994ef887485dc534f1db
|
[] |
no_license
|
nudnik/ProgrammingAssignment2
|
fefb5a7b17fe9336ecc6ffc5a9b0070fda00e2ef
|
34c329ddce07dccc49137578b8c46190cb7b6f26
|
refs/heads/master
| 2020-04-05T18:30:21.383517
| 2014-12-21T23:34:14
| 2014-12-21T23:34:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,657
|
r
|
cachematrix.R
|
## The following code consists of a function that calculates
## the inverse of a square, invertible matrix. Also, there is
## a function used to cache the inverse of the square matrix
## to preclude repeated, lengthy matrix inversion calculations.
## makeCacheMatrix is a function that creates an object, involving
## a square, invertible matrix. It also creates a cache of the
## inverse matrix if it has already been calculated.
makeCacheMatrix <- function(x = matrix()) {
I <- NULL
set <- function(y) { # set matrix values
x <<- y
I <<- NULL
}
get <- function() x # get matrix values
setinv <- function(inverse) I <<- inverse # set inverse value
getinv <- function() I
# get inverse value
list(set = set, get = get, setinv = setinv, getinv = getinv)
}
## cacheSolve is a function that calculates the inverse of the
## square, invertible matrix returned from the makeCacheMatrix
## function. If the inverse was previously calculated, the
## function retrieves it from the cache.
cacheSolve <- function(x, ...) {
I <- x$getinv() # check if inverse previously calculated
if(!is.null(I)) {
message("getting cached data")
return(I) # return cached inverse
}
data <- x$get() # calculate matrix inverse
I <- solve(data, ...)
x$setinv(I)
I # Return a matrix that is the inverse of 'x'
}
|
5457895bb8d68515d5f9cb60d26f72b28bc361c4
|
74de4a91086a9387e9c03cf11c73f7c62c798a8b
|
/man/timss.rho.pv.Rd
|
961f89aac700c533051dfa3aa8a51764b7962236
|
[] |
no_license
|
eldafani/intsvy
|
51593ae6fb256e9287a7d89a263f833cd45ff99a
|
8cfade54a71b2d1d6222c048dd2997b11e6528d9
|
refs/heads/master
| 2023-07-19T23:04:54.181692
| 2023-07-11T16:39:46
| 2023-07-11T16:39:46
| 19,273,232
| 17
| 14
| null | 2019-09-18T05:43:42
| 2014-04-29T10:51:02
|
R
|
UTF-8
|
R
| false
| false
| 1,350
|
rd
|
timss.rho.pv.Rd
|
\name{timss.rho.pv}
\alias{timss.rho.pv}
\title{Two-way weighted correlation with plausible values
}
\description{
timss.rho.pv calculates the correlation and standard error among two achievement variables each based on 5 plausible values or one achievement variable and an observed variable (i.e., with observed scores rather than plausible values).
}
\usage{
timss.rho.pv(variable, pvlabel, by,
data, export = FALSE, name = "output", folder = getwd())
}
\arguments{
\item{variable}{
A data label for the observed variable
}
\item{pvlabel}{
One or two labels describing the achievement variables.
}
\item{by}{
The label for the grouping variable, usually the countries (i.e., by="IDCNTRYL"), but could be any other categorical variable.
}
\item{data}{
An R object, normally a data frame, containing the data from TIMSS.
}
\item{export}{
A logical value. If TRUE, the output is exported to a file in comma-separated value format (.csv) that can be opened from LibreOffice or Excel.
}
\item{name}{
The name of the exported file.
}
\item{folder}{
The folder where the exported file is located.
}
}
\value{
timss.rho.pv returns a matrix with correlations and standard errors.
}
\seealso{
pirls.rho.pv, pirls.rho, timss.rho
}
\examples{
\dontrun{
timss.rho.pv(variable="BSDGEDUP", pvlabel="BSMMAT", by="IDCNTRYL", data=timss)
}
}
|
37b522c2d8b5fbb3a8f75606972d757d7a1b2eae
|
578f6537a1b45f9ed95a9a79897ea52acac5feec
|
/R/spatialEnhance.R
|
3244b2b122b7c7d0b612742abc9d740ac1d8d18f
|
[
"MIT"
] |
permissive
|
JunqiangWang/BayesSpace
|
0e77a6b293444bd16155fc0391b5819a61728d28
|
cd2c155f457b3bf05986b2961fa8f4d2937769fe
|
refs/heads/master
| 2023-08-30T08:05:39.870689
| 2021-11-04T00:00:01
| 2021-11-04T00:00:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 13,607
|
r
|
spatialEnhance.R
|
#' Enhance spot resolution
#'
#' Enhanced clustering of a spatial expression dataset to subspot resolution.
#'
#' @param sce A SingleCellExperiment object containing the spatial data.
#' @param q The number of clusters.
#' @param platform Spatial transcriptomic platform. Specify 'Visium' for hex
#' lattice geometry or 'ST' for square lattice geometry. Specifying this
#' parameter is optional when analyzing SingleCellExperiments processed using
#' \code{\link{readVisium}}, \code{\link{spatialPreprocess}}, or
#' \code{\link{spatialCluster}}, as this information is included in their
#' metadata.
#' @param use.dimred Name of a reduced dimensionality result in
#' \code{reducedDims(sce)}. If provided, cluster on these features directly.
#' @param d Number of top principal components to use when clustering.
#' @param init Initial cluster assignments for spots.
#' @param init.method If \code{init} is not provided, cluster the top \code{d}
#' PCs with this method to obtain initial cluster assignments.
#' @param model Error model. ('normal' or 't')
#' @param nrep The number of MCMC iterations.
#' @param gamma Smoothing parameter. (Values in range of 1-3 seem to work well.)
#' @param mu0 Prior mean hyperparameter for mu. If not provided, mu0 is set to
#' the mean of PCs over all spots.
#' @param lambda0 Prior precision hyperparam for mu. If not provided, lambda0
#' is set to a diagonal matrix \eqn{0.01 I}.
#' @param alpha Hyperparameter for Wishart distributed precision lambda.
#' @param beta Hyperparameter for Wishart distributed precision lambda.
#' @param save.chain If true, save the MCMC chain to an HDF5 file.
#' @param chain.fname File path for saved chain. Tempfile used if not provided.
#' @param burn.in Number of iterations to exclude as burn-in period. The MCMC
#' iterations are currently thinned to every 100; accordingly \code{burn.in}
#' is rounded down to the nearest multiple of 100.
#' @param jitter_scale Controls the amount of jittering. Small amounts of
#' jittering are more likely to be accepted but result in exploring the space
#' more slowly. We suggest tuning \code{jitter_scale} so that Ychange is on
#' average around 25\%-40\%. Ychange can be accessed via \code{mcmcChain()}.
#' @param jitter_prior Scale factor for the prior variance, parameterized as the
#' proportion (default = 0.3) of the mean variance of the PCs.
#' We suggest making \code{jitter_prior} smaller if the jittered values are
#' not expected to vary much from the overall mean of the spot.
#' @param verbose Log progress to stderr.
#'
#' @return Returns a new SingleCellExperiment object. By default, the
#' \code{assays} of this object are empty, and the enhanced resolution PCs
#' are stored as a reduced dimensionality result accessible with
#' \code{reducedDim(sce, 'PCA')}.
#'
#' @details
#' The enhanced \code{SingleCellExperiment} has most of the properties of the
#' input SCE - \code{rowData}, \code{colData}, \code{reducedDims} - but does
#' not include expression data in \code{counts} or \code{logcounts}. To impute
#' enhanced expression vectors, please use [enhanceFeatures()] after
#' running \code{spatialEnhance}.
#'
#' The \code{colData} of the enhanced \code{SingleCellExperiment} includes the
#' following columns to permit referencing the subspots in spatial context and
#' linking back to the original spots:
#' \itemize{
#' \item \code{spot.idx}: Index of the spot this subspot belongs to (with
#' respect to the input SCE).
#' \item \code{subspot.idx}: Index of the subspot within its parent spot.
#' \item \code{spot.row}: Array row of the subspot's parent spot.
#' \item \code{spot.col}: Array col of the subspot's parent spot.
#' \item \code{row}: Array row of the subspot. This is the parent spot's row
#' plus an offset based on the subspot's position within the spot.
#' \item \code{col}: Array col of the subspot. This is the parent spot's col
#' plus an offset based on the subspot's position within the spot.
#' \item \code{imagerow}: Pixel row of the subspot. This is the parent spot's
#' row plus an offset based on the subspot's position within the spot.
#' \item \code{imagecol}: Pixel col of the subspot. This is the parent spot's
#' col plus an offset based on the subspot's position within the spot.
#' }
#'
#' @examples
#' set.seed(149)
#' sce <- exampleSCE()
#' sce <- spatialCluster(sce, 7, nrep=100, burn.in=10)
#' enhanced <- spatialEnhance(sce, 7, nrep=100, burn.in=10)
#'
#' @seealso \code{\link{spatialCluster}} for clustering at the spot level
#' before enhancing, \code{\link{clusterPlot}} for visualizing the cluster
#' assignments, \code{\link{enhanceFeatures}} for imputing enhanced
#' expression, and \code{\link{mcmcChain}} for examining the full MCMC chain
#' associated with the enhanced clustering.
#' .
#'
#' @name spatialEnhance
NULL
#' Wrapper around C++ \code{iterate_deconv()} function
#'
#' @return List of enhancement parameter values at each iteration
#'
#' @keywords internal
#' @importFrom stats cov
deconvolve <- function(Y, positions, xdist, ydist, q, init, nrep = 1000,
model = "normal", platform = c("Visium", "ST"), verbose = TRUE,
jitter_scale = 5, jitter_prior = 0.01, mu0 = colMeans(Y), gamma = 2,
lambda0 = diag(0.01, nrow = ncol(Y)), alpha = 1, beta = 0.01) {
d <- ncol(Y)
n0 <- nrow(Y)
Y <- as.matrix(Y)
c <- jitter_prior * 1 / (2 * mean(diag(cov(Y))))
positions <- as.matrix(positions)
colnames(positions) <- c("x", "y")
platform <- match.arg(platform)
subspots <- ifelse(platform == "Visium", 6, 9)
init1 <- rep(init, subspots)
Y2 <- Y[rep(seq_len(n0), subspots), ] # rbind 6 or 9 times
positions2 <- positions[rep(seq_len(n0), subspots), ] # rbind 7 times
shift <- .make_subspot_offsets(subspots)
shift <- t(t(shift) * c(xdist, ydist))
dist <- max(rowSums(abs(shift))) * 1.05
if (platform == "ST") {
dist <- dist/2
}
shift_long <- shift[rep(seq_len(subspots), each=n0), ]
positions2[, "x"] <- positions2[, "x"] + shift_long[, "Var1"]
positions2[, "y"] <- positions2[, "y"] + shift_long[, "Var2"]
n <- nrow(Y2)
if (verbose)
message("Calculating neighbors...")
df_j <- find_neighbors(positions2, dist, "manhattan")
if (verbose)
message("Fitting model...")
tdist <- (model == "t")
out <- iterate_deconv(Y=Y2, df_j=df_j, tdist=tdist, nrep=nrep, n=n, n0=n0,
d=d, gamma=gamma, q=q, init=init1, subspots=subspots, verbose=verbose,
jitter_scale=jitter_scale, c=c, mu0=mu0, lambda0=lambda0, alpha=alpha,
beta=beta)
out$positions <- positions2
out
}
#' Define offsets for each subspot layout.
#'
#' Hex spots are divided into 6 triangular subspots, square spots are divided
#' into 9 squares. Offsets are relative to the spot center.
#'
#' @param n_subspots_per Number of subspots per spot
#' @return Matrix of x and y offsets, one row per subspot
#'
#' @keywords internal
.make_subspot_offsets <- function(n_subspots_per) {
if (n_subspots_per == 6) {
rbind(expand.grid(c(1/3, -1/3), c(1/3,-1/3)), expand.grid(c(2/3, -2/3), 0))
# } else if (n_subspots_per == 7) {
# rbind(expand.grid(c(1/3, -1/3), c(1/3, -1/3)), expand.grid(c(2/3, -2/3, 0), 0))
} else if (n_subspots_per == 9) {
rbind(expand.grid(c(1/3, -1/3, 0), c(1/3, -1/3, 0)))
} else {
stop("Only 6 and 9 subspots currently supported.")
}
}
#' Add subspot labels and offset row/col locations before making enhanced SCE.
#'
#' Subspots are stored as (1.1, 2.1, 3.1, ..., 1.2, 2.2, 3.2, ...)
#'
#' @param cdata Table of colData (imagerow and imagecol; from deconv$positions)
#' @param sce Original sce (to obtain number of spots and original row/col)
#' @param n_subspots_per Number of subspots per spot
#'
#' @return Data frame with added subspot names, parent spot indices, and offset
#' row/column coordinates
#'
#' @keywords internal
#' @importFrom assertthat assert_that
.make_subspot_coldata <- function(positions, sce, n_subspots_per) {
cdata <- as.data.frame(positions)
colnames(cdata) <- c("imagecol", "imagerow")
n_spots <- ncol(sce)
n_subspots <- nrow(cdata)
assert_that(nrow(cdata) == n_spots * n_subspots_per)
## Index of parent spot is (subspot % n_spots)
idxs <- seq_len(n_subspots)
spot_idxs <- ((idxs - 1) %% n_spots) + 1
subspot_idxs <- rep(seq_len(n_subspots_per), each=n_spots)
cdata$spot.idx <- spot_idxs
cdata$subspot.idx <- subspot_idxs
rownames(cdata) <- paste0("subspot_", spot_idxs, ".", subspot_idxs)
offsets <- .make_subspot_offsets(n_subspots_per)
cdata$spot.row <- rep(sce$row, n_subspots_per)
cdata$spot.col <- rep(sce$col, n_subspots_per)
cdata$col <- cdata$spot.col + rep(offsets[, 1], each=n_spots)
cdata$row <- cdata$spot.row + rep(offsets[, 2], each=n_spots)
cols <- c("spot.idx", "subspot.idx", "spot.row", "spot.col", "row", "col", "imagerow", "imagecol")
cdata[, cols]
}
#' @export
#' @rdname spatialEnhance
#' @importFrom SingleCellExperiment SingleCellExperiment reducedDim<-
#' @importFrom SummarizedExperiment rowData
#' @importFrom assertthat assert_that
spatialEnhance <- function(sce, q, platform = c("Visium", "ST"),
use.dimred = "PCA", d = 15,
init = NULL, init.method = c("spatialCluster", "mclust", "kmeans"),
model = c("t", "normal"), nrep = 200000, gamma = NULL,
mu0 = NULL, lambda0 = NULL, alpha = 1, beta = 0.01,
save.chain = FALSE, chain.fname = NULL, burn.in=10000,
jitter_scale = 5, jitter_prior = 0.3, verbose = FALSE) {
assert_that(nrep >= 100) # require at least one iteration after thinning
assert_that(burn.in >= 0)
if (burn.in >= nrep)
stop("Please specify a burn-in period shorter than the total number of iterations.")
## Thinning interval; only every 100 iterations are kept to reduce memory
## This is temporarily hard-coded into the C++ code
thin <- 100
## If user didn't specify a platform, attempt to parse from SCE metadata
## otherwise check against valid options
if (length(platform) > 1) {
platform <- .bsData(sce, "platform", match.arg(platform))
} else {
platform <- match.arg(platform)
}
if (platform == "Visium") {
position.cols <- c("imagecol", "imagerow")
xdist <- ydist <- NULL # Compute with .prepare_inputs
} else if (platform == "ST") {
position.cols <- c("col", "row")
xdist <- ydist <- 1
}
inputs <- .prepare_inputs(sce, use.dimred=use.dimred, d=d,
positions=NULL, position.cols=position.cols,
xdist=xdist, ydist=ydist)
## Initialize cluster assignments (use spatialCluster by default)
if (is.null(init)) {
init.method <- match.arg(init.method)
if (init.method == "spatialCluster") {
msg <- paste0("Must run spatialCluster on sce before enhancement ",
"if using spatialCluster to initialize.")
assert_that("spatial.cluster" %in% colnames(colData(sce)), msg=msg)
init <- sce$spatial.cluster
} else {
init <- .init_cluster(inputs$PCs, q, init, init.method)
}
}
## Set model parameters
model <- match.arg(model)
if (is.null(mu0))
mu0 <- colMeans(inputs$PCs)
if (is.null(lambda0))
lambda0 <- diag(0.01, ncol(inputs$PCs))
if (is.null(gamma)) {
if (platform == "Visium") {
gamma <- 3
} else if (platform == "ST") {
gamma <- 2
}
}
deconv <- deconvolve(inputs$PCs, inputs$positions, nrep=nrep, gamma=gamma,
xdist=inputs$xdist, ydist=inputs$ydist, q=q, init=init, model=model,
platform=platform, verbose=verbose, jitter_scale=jitter_scale,
jitter_prior=jitter_prior, mu0=mu0, lambda0=lambda0, alpha=alpha,
beta=beta)
## Create enhanced SCE
n_subspots_per <- ifelse(platform == "Visium", 6, 9)
cdata <- .make_subspot_coldata(deconv$positions, sce, n_subspots_per)
enhanced <- SingleCellExperiment(assays=list(),
rowData=rowData(sce), colData=cdata)
## Scale burn.in period to thinned intervals, and
## add one to skip initialization values stored before first iteration
burn.in <- (burn.in %/% thin) + 1
## Average PCs, excluding burn-in
deconv_PCs <- Reduce(`+`, deconv$Y[-seq_len(burn.in)]) / (length(deconv$Y) - burn.in)
colnames(deconv_PCs) <- paste0("PC", seq_len(ncol(deconv_PCs)))
reducedDim(enhanced, "PCA") <- deconv_PCs
## Choose modal cluster label, excluding burn-in
message("Calculating labels using iterations ", (burn.in - 1) * thin,
" through ", nrep, ".")
zs <- deconv$z[seq(burn.in, (nrep %/% thin) + 1), ]
if (burn.in == (nrep %/% thin) + 1)
labels <- matrix(zs, nrow=1)
else
labels <- apply(zs, 2, Mode)
enhanced$spatial.cluster <- unname(labels)
if (save.chain) {
deconv <- .clean_chain(deconv, method="enhance")
params <- c("z", "mu", "lambda", "weights", "Y", "Ychange")
metadata(enhanced)$chain.h5 <- .write_chain(deconv, chain.fname, params)
}
## Add metadata to new SingleCellExperiment object
metadata(enhanced)$BayesSpace.data <- list()
metadata(enhanced)$BayesSpace.data$platform <- platform
metadata(enhanced)$BayesSpace.data$is.enhanced <- TRUE
enhanced
}
|
c7065a8e2a68c1015def616aeabf59dbf4f028f1
|
5046a9af6b43475dc3788b8f1073d96e764add30
|
/man/hybrid.cpt.Rd
|
706be1a27f45d55fd53e804262bb827775f63b41
|
[] |
no_license
|
ChuangWan/breakfast
|
c63d45acc79c8de7addec825b77726f80633c526
|
e42966b333dc56da6ee8d05970070cb7a8d1f157
|
refs/heads/master
| 2020-04-04T23:55:52.924587
| 2017-09-28T14:49:36
| 2017-09-28T14:49:36
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 2,896
|
rd
|
hybrid.cpt.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/hybrid_cpt.R
\name{hybrid.cpt}
\alias{hybrid.cpt}
\title{Multiple change-point detection in the mean of a vector using a hybrid between the TGUH and Adaptive WBS methods.}
\usage{
hybrid.cpt(x, M = 1000, sigma = stats::mad(diff(x)/sqrt(2)), th.const = 1,
p = 0.01, minseglen = 1, bal = 1/20, num.zero = 10^(-5))
}
\arguments{
\item{x}{A vector containing the data in which you wish to find change-points.}
\item{M}{The same as the corresponding parameter in \code{\link{wbs.K.cpt}}.}
\item{sigma}{The same as the corresponding parameter in \code{\link{tguh.cpt}}.}
\item{th.const}{The same as the corresponding parameter in \code{\link{tguh.cpt}}.}
\item{p}{The same as the corresponding parameter in \code{\link{tguh.cpt}}.}
\item{minseglen}{The same as the corresponding parameter in \code{\link{tguh.cpt}}.}
\item{bal}{The same as the corresponding parameter in \code{\link{tguh.cpt}}.}
\item{num.zero}{The same as the corresponding parameter in \code{\link{tguh.cpt}}.}
}
\value{
A list with the following components:
\item{est}{The estimated piecewise-constant mean of \code{x}.}
\item{no.of.cpt}{The estimated number of change-points in the piecewise-constant mean of \code{x}.}
\item{cpt}{The estimated locations of change-points in the piecewise-contant mean of \code{x} (these
are the final indices \emph{before} the location of each change-point).}
}
\description{
This function estimates the number and locations of change-points in the
piecewise-constant mean of the noisy input vector, combining the Tail-Greedy Unbalanced Haar and Adaptive Wild Binary Segmentation
methods (see Details for the relevant literature references).
The constant means between each pair
of neighbouring change-points are also estimated. The method works best when the noise in the
input vector is independent and identically distributed Gaussian.
}
\details{
This is a hybrid method, which first estimates the number of change-points using
\code{\link{tguh.cpt}} and then estimates their locations using \code{\link{wbs.K.cpt}}.
The change-point detection algorithms used in \code{tguh.cpt} are: the
Tail-Greedy Unbalanced Haar method as described in "Tail-greedy bottom-up data
decompositions and fast multiple change-point detection", P. Fryzlewicz (2017),
preprint, and Adaptive Wild Binary Segmentation as described in "Data-adaptive Wild Binary Segmentation",
P. Fryzlewicz (2017), in preparation as of September 28th, 2017.
}
\examples{
teeth <- rep(rep(0:1, each=5), 20)
teeth.noisy <- teeth + rnorm(200)/5
teeth.cleaned <- hybrid.cpt(teeth.noisy)
ts.plot(teeth.cleaned$est)
}
\seealso{
\code{\link{segment.mean}}, \code{\link{wbs.bic.cpt}},
\code{\link{wbs.thresh.cpt}}, \code{\link{wbs.cpt}}, \code{\link{tguh.cpt}}, \code{\link{wbs.K.cpt}}
}
\author{
Piotr Fryzlewicz, \email{p.fryzlewicz@lse.ac.uk}
}
|
72bd74635a68fd056d90a1fc9f3ba149e919a163
|
407fbef642b54d0d3c0933b8d2a9ecc870fdf856
|
/R/TextClassification/Driver.R
|
dc8bd4d2439e4534416321b81c9e531c4a86e22b
|
[] |
no_license
|
xzb/ml_practice
|
f527a9d2d2ec1828d16e7b2568f6ec8f8dc60b0a
|
661582022588bc4fc4a425bd6020eadbbf7ce8f9
|
refs/heads/master
| 2021-01-21T12:58:57.254797
| 2016-04-26T06:37:42
| 2016-04-26T06:37:42
| 52,067,636
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,162
|
r
|
Driver.R
|
#install.packages("RTextTools")
library("RTextTools")
TrainingSets <- list(
"data/20news-bydate/20news-bydate-train/alt.atheism",
"data/20news-bydate/20news-bydate-train/comp.graphics",
"data/20news-bydate/20news-bydate-train/misc.forsale",
"data/20news-bydate/20news-bydate-train/rec.autos",
"data/20news-bydate/20news-bydate-train/sci.crypt"
)
TrainingLabels <- list(
"data/20news-bydate/20news-bydate-train/alt_atheism_label.csv",
"data/20news-bydate/20news-bydate-train/comp_graphics_label.csv",
"data/20news-bydate/20news-bydate-train/misc_forsale_label.csv",
"data/20news-bydate/20news-bydate-train/rec_autos_label.csv",
"data/20news-bydate/20news-bydate-train/sci_crypt_label.csv"
)
TestingSets <- list(
"data/20news-bydate/20news-bydate-test/alt.atheism",
"data/20news-bydate/20news-bydate-test/comp.graphics",
"data/20news-bydate/20news-bydate-test/misc.forsale",
"data/20news-bydate/20news-bydate-test/rec.autos",
"data/20news-bydate/20news-bydate-test/sci.crypt"
)
TestingLabels <- list(
"data/20news-bydate/20news-bydate-test/alt_atheism_label.csv",
"data/20news-bydate/20news-bydate-test/comp_graphics_label.csv",
"data/20news-bydate/20news-bydate-test/misc_forsale_label.csv",
"data/20news-bydate/20news-bydate-test/rec_autos_label.csv",
"data/20news-bydate/20news-bydate-test/sci_crypt_label.csv"
)
# ====== load training data =======
rm(dataFrame)
for (index in 1 : length(TrainingSets))
{
dataDir <- TrainingSets[[index]]
labelPath <- TrainingLabels[[index]]
if(!exists("dataFrame"))
{
dataFrame <- read_data(dataDir,type = "folder",index = labelPath, warn=F)
}
else
{
dataFrame <- rbind(dataFrame, read_data(dataDir,type = "folder",index = labelPath, warn=F))
}
}
trainingSize <- length(dataFrame$Text.Data)
# ====== load testing data ======
for (index in 1 : length(TestingSets))
{
dataDir <- TestingSets[[index]]
labelPath <- TestingLabels[[index]]
dataFrame <- rbind(dataFrame, read_data(dataDir,type = "folder",index = labelPath, warn=F))
}
wholeSize <- length(dataFrame$Text.Data)
doc_matrix <- create_matrix(dataFrame$Text.Data, language="english", removeNumbers=TRUE, stemWords=TRUE, removeSparseTerms=.998)
container <- create_container(doc_matrix, dataFrame$Labels, trainSize=1:trainingSize, testSize=(trainingSize + 1) : wholeSize, virgin=FALSE)
# ====== create model ======
SVM <- train_model(container,"SVM")
GLMNET <- train_model(container,"GLMNET")
MAXENT <- train_model(container,"MAXENT")
BOOSTING <- train_model(container,"BOOSTING")
#BAGGING <- train_model(container,"BAGGING")
#RF <- train_model(container,"RF")
NNET <- train_model(container,"NNET")
TREE <- train_model(container,"TREE")
# ====== test model ======
SVM_CLASSIFY <- classify_model(container, SVM)
GLMNET_CLASSIFY <- classify_model(container, GLMNET)
MAXENT_CLASSIFY <- classify_model(container, MAXENT)
BOOSTING_CLASSIFY <- classify_model(container, BOOSTING)
#BAGGING_CLASSIFY <- classify_model(container, BAGGING)
#RF_CLASSIFY <- classify_model(container, RF)
NNET_CLASSIFY <- classify_model(container, NNET)
TREE_CLASSIFY <- classify_model(container, TREE)
# ====== summary ======
analytics <- create_analytics(container, cbind(SVM_CLASSIFY, GLMNET_CLASSIFY, MAXENT_CLASSIFY, BOOSTING_CLASSIFY, NNET_CLASSIFY, TREE_CLASSIFY))
summary(analytics)
write.csv(analytics@algorithm_summary, "AlgorithmSummary.csv")
# ====== calculate accuracy ======
predictResult <- list(
analytics@document_summary$SVM_LABEL,
analytics@document_summary$LOGITBOOST_LABEL,
analytics@document_summary$GLMNET_LABEL,
analytics@document_summary$TREE_LABEL,
analytics@document_summary$NNETWORK_LABEL,
analytics@document_summary$MAXENTROPY_LABEL
)
testingLabel <- dataFrame$Labels[(trainingSize + 1) : wholeSize]
comp <- predictResult[[1]] == testingLabel
accuracy <- sum(comp == TRUE) / length(comp)
for (index in 2 : length(predictResult))
{
comp <- predictResult[[index]] == testingLabel
accuracy <- cbind(accuracy, sum(comp == TRUE) / length(comp))
}
colnames(accuracy) <- c("SVM", "LOGITBOOST", "GLMNET", "TREE", "NNETWORK", "MAXENTROPY")
write.csv(accuracy, "Accuracy.csv")
|
046fd81be17ac0e1c03870129b5ce2abafdf7e88
|
1bf22fc2ca2bd11cba52a3b3f8d196228016d0d6
|
/setup.R
|
c33273713544ed9d9eb37269b3a9a8b7a8e2e4bb
|
[
"MIT"
] |
permissive
|
dudek-com/gvtp
|
eddead78816f219febaafd77bee527c2381db4f3
|
a547af4d17ef637bd32eebbdc255d2d0b09810c8
|
refs/heads/main
| 2023-03-14T07:08:19.358677
| 2021-03-02T23:01:27
| 2021-03-02T23:01:27
| 304,129,414
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 22,795
|
r
|
setup.R
|
# load libraries ----
if (!require(librarian)){
install.packages(librarian)
library(librarian)
}
# load libraries, install if missing
shelf(
# tables
DT,
# plots
ggplot2, trelliscopejs, gapminder, # kassambara/easyGgplot2
# modeling
broom, caret, corrplot, corrr, dismo, mgcv, randomForest, sdm,
# data wrangling
dplyr, purrr, readr, readxl, tibble, tidyr,
# utilities
fs, glue, here, htmltools, skimr, stringr, units)
# sdm::installAll() # run once
select = dplyr::select
# paths & variables ----
dir_gdrive <- case_when(
Sys.info()[['user']] == "bbest" ~ "/Volumes/GoogleDrive/My Drive/projects/dudek")
d_xls <- path(dir_gdrive, "data/gvtp_hab_char_data_compiled_Final.xls")
cols_csv <- path(dir_gdrive, "data/gvtp_hab_char_columns.csv")
cols_new_csv <- path(dir_gdrive, "data/gvtp_columns.csv")
d_csv <- here("data/pred_resp.csv")
dir_models <- here("data/models")
# excel_sheets(d_xls)
# read sheets of data ----
plot <- read_excel(d_xls, "Plot", na = c("na", "n d"))
spp <- read_excel(d_xls, "Species")
soil <- read_excel(d_xls, "Soil Layers")
veg <- read_excel(d_xls, "Veg")
herbht <- read_excel(d_xls, "Herb Height")
gvtpht <- read_excel(d_xls, "GVTP Height Heads")
rdm <- read_excel(d_xls, "RDM")
# Soil Layers
# plot 5x5-meter plot unique ID (1-40)
# spatial_data "Gaviota tarplant 2019 areal extent (n=20): As mapped during focused surveys in 2019;
# Gaviota tarplant CNDDB occurrences (n=10): Gaviota tarplant 2019 areal extent excluded;
# Gaviota tarplant suitable but unoccupied habitat (n=10): Suitable habitat defined for these purposes as all native and non-native grassland communities, and all soil textures occupied by Gaviota tarplant on site.
# "
# soil_layer Depth of the layer in inches starting from the soil surface.
# soil_texture Soil texture of layer using USDA NRCS's "A flow diagram for teaching texture by feel analysis"
# notes_soil notes regarding soil texture analysis
# transformations ----
plot <- plot %>%
# Important vars:
# X Soil texture: soil_texture
# X Soil disturbance (5x5): soil_dist_cover
# X Soil disturbance cir (20m diameter): soil_dist_cover_cir
# X Bare ground: bare_grnd_cover
# X Average weight of Residual dry matter: dry_wt_lbs_acre
# X Herbaceous plant height: herbheight_avg_cm
# X Total plant species cover: sum(Species.percent_cover_abs) = pct_plant_cover
# X Non-native plant species cover: TODO: sum(native), sum(naturalized): pct_native_cover, pct_nonnative_cover
# X GVTP abundance: gvtp_count
# X GVTP cover: gvtp_cover
# X GVTP plant heights (average): gvtp_height_cm_avg
# X Number of GVTP inflorescences per plant (average): gvtp_heads
filter(!is.na(plot)) %>%
select(
# not for modeling
-spatial_data, -photo_sq_plot, -photo_circular_plot, -soil_dist_cause, -notes_plot, -`sample ID`, -description,
# skipping
-lime) %>%
mutate(
# RESPONSE: gvtp_count, gvtp_presence, gvtp_performance, gvtp_cover, gvtp_height_cm_avg, gvtp_heads, gvtp_reproductive_potential
gvtp_count = recode(gvtp_count, "100-499"="299") %>% as.numeric(),
gvtp_presence = as.integer(gvtp_count > 0),
gvtp_cover = as.numeric(gvtp_cover),
gvtp_height_cm_avg = as.numeric(gvtp_height_cm_avg),
gvtp_heads = as.numeric(heads_no_avg),
gvtp_percent_vegetative = as.numeric(gvtp_percent_vegetative),
gvtp_percent_flowering = as.numeric(gvtp_percent_flowering),
gvtp_percent_fruiting = as.numeric(gvtp_percent_fruiting),
) %>%
replace_na(list(
gvtp_performance = 0,
gvtp_height_cm_avg = 0,
gvtp_heads = 0,
molybdenum = 0,
aluminum = 0,
chromium = 0,
mercury = 0,
selenium = 0,
silver = 0)) %>%
mutate(
gvtp_reproductive_potential = gvtp_count * gvtp_heads,
# SOIL:
`estimated soil texture` = as.factor(`estimated soil texture`),
`relative infiltration rate` = as.factor(`relative infiltration rate`),
`organic matter` = as.factor(`organic matter`),
# LANDSCAPE: aspect_north, aspect_east, aspect_cir_north, aspect_cir_east, slope_degrees, slope_degrees_cir
slope_degrees = as.integer(slope_degrees),
slope_degrees_cir = as.integer(slope_degrees_cir),
# aspect
aspect_deg = recode(
aspect,
N=0, NW=45, W=90, SW=135, S=180, SE=225, E=270, NE=315) %>%
as_units("degrees"),
aspect = as.factor(aspect),
aspect_rad = set_units(aspect_deg, "radians"),
aspect_north = cos(aspect_rad) %>% as.numeric(),
aspect_east = sin(aspect_rad) %>% as.numeric(),
aspect_deg = as.numeric(aspect_deg),
aspect_rad = as.numeric(aspect_rad),
aspect_cir_deg = recode(
aspect_cir,
N=0, NW=45, W=90, SW=135, S=180, SE=225, E=270, NE=315) %>%
as_units("degrees"),
aspect_cir = as.factor(aspect_cir),
aspect_cir_rad = set_units(aspect_cir_deg, "radians"),
aspect_cir_north = cos(aspect_cir_rad) %>% as.numeric(),
aspect_cir_east = sin(aspect_cir_rad) %>% as.numeric(),
aspect_cir_deg = as.numeric(aspect_cir_deg),
aspect_cir_rad = as.numeric(aspect_cir_rad),
# LANDSCAPE: concavity, concavity_cir, slope_degrees
slope_shape = recode(
slope_shape,
concave=1, flat=0, convex=-1),
slope_shape_cir = recode(
slope_shape_cir,
concave=1, flat=0, convex=-1),
slope_degrees = as.integer(slope_degrees),
slope_position = recode(
slope_position,
"top of slope"=1, "mid-slope"=0, "toe of slope"=-1),
# LANDSCAPE: soil_dist_cover_cir
# Q: [soil_dist_cover_cir] as abs percentage (%)
soil_dist_cover_cir = as.integer(soil_dist_cover_cir),
# BIOTIC: bare_grnd_cover, soil_dist_cover
# Q: [bare_grnd_cover] as abs percentage (%)
# Q: [soil_dist_cover] as abs percentage (%)
bare_grnd_cover = as.integer(bare_grnd_cover),
soil_dist_cover = as.integer(soil_dist_cover)) %>%
# BIOTIC: pct_plant_cover
left_join(
spp %>%
group_by(plot) %>%
summarize(
pct_plant_cover = sum(percent_cover_abs, na.rm = T)),
by = "plot") %>%
# BIOTIC: pct_native
# Q: does [pct_native] work vs original ask for [Native] & [Naturalized] "as relative cover like we did during the meeting"
# TODO: sum(native), sum(naturalized)
left_join(
spp %>%
group_by(plot, `Native/Naturalized`) %>%
summarize(
pct_cover = sum(percent_cover_abs, na.rm = T)) %>%
pivot_wider(names_from = `Native/Naturalized`, values_from = pct_cover) %>%
replace_na(list(NATIVE = 0, NATURALIZED = 0)) %>%
mutate(
pct_nativity = NATIVE / (NATIVE + NATURALIZED)),
by = "plot") %>%
rename(
pct_native_cover = NATIVE,
pct_nonnative_cover = NATURALIZED) %>%
# BIOTIC: dry_wt_g
left_join(
rdm %>%
group_by(Plot) %>%
summarize(
dry_wt_g = mean(`Weight Sample (g)`, na.rm=T)) %>%
mutate(
dry_wt_lbs_acre = dry_wt_g * 96.03341929),
by = c("plot" = "Plot")) %>%
# transformed vars w/ diff't names
select(-heads_no_avg)
# ensure all columns converted to numeric or factor
stopifnot(ncol(select_if(plot, is.character)) == 0)
# drop columns without variance: mercury, selenium, silver
plot <- select_if(plot, function(x) var(as.numeric(x), na.rm=T) != 0)
# setup main data frame (d) ----
responses <- select(plot, starts_with("gvtp")) %>% names()
predictors <- setdiff(names(plot), c("plot", responses)) # %>% sort()
d <- plot %>%
select(
plot,
all_of(responses),
all_of(predictors))
# abbreviate names
d_cols_abbr <- abbreviate(names(d), minlength=4)
d_cols <- tibble(
column_original = names(d_cols_abbr),
column_abbreviated = d_cols_abbr)
#write_csv(d_cols, cols_csv)
d_cols <- read_csv(cols_new_csv, col_types = cols()) %>%
select(cat = category, new = column_new, old = column_original) %>%
arrange(cat, new)
stopifnot(all(names(d) %in% d_cols$old))
cols_resp <- d_cols %>%
filter(
cat == "_response",
old %in% names(d)) %>%
pull(new)
cols_pred <- d_cols %>%
filter(
str_detect(cat, "^[^_]"),
old %in% names(d))%>%
pull(new)
names(d) = d_cols$new[match(names(d), d_cols$old)]
# get columns ordered by category, variable
d_all <- select(d, any_of(d_cols$new))
d <- d_all
write_csv(d_all, d_csv)
cols_cat <- unique(d_cols$cat) %>% setdiff(c("_other", "_response")) %>% sort()
cols_resp <- cols_resp %>% setdiff("g_perf") %>% sort()
# cols_pred
# functions ----
get_resp_pred_num_long <- function(d_all, col_resp, cols_pred){
# col_resp = "g_pres"
d_all %>%
select(all_of(col_resp), any_of(cols_pred)) %>%
select_if(is.numeric) %>%
pivot_longer(-all_of(col_resp), "var") %>%
arrange(var, all_of(col_resp), value)
}
get_resp_pred_smry_stats <- function(d_resp_pred_num_long, col_resp){
d_resp_pred_num_long %>%
group_by(var, all_of(col_resp)) %>%
summarize(
min = min(value, na.rm=T),
avg = mean(value, na.rm=T),
max = max(value, na.rm=T),
sd = sd(value, na.rm=T),
.groups="drop")
}
get_var_ttest <- function(d_resp_pred_num_long, col_pred){
# col_pred = "g_cnt"; d_resp_pred_num_long = d_cnt_lng
frmla <- as.formula(glue("value ~ {col_pred}"))
d_resp_pred_num_long %>%
group_by(var) %>%
summarize(
t_test = oneway.test(frmla, data = ., var.equal = F)$p.value) %>%
arrange(desc(t_test))
}
get_pred1_mdls <- function(cat, resp){
mdls <- read_csv(here("data/pred_lm_p05.csv"), col_types = cols()) %>%
left_join(
d_cols %>%
filter(!str_starts(cat, "_")) %>%
select(pred = new, cat), by = "pred")
mdls %>%
filter(
# cat == "landscape",
# resp == "g_pres") %>%
cat == !!cat,
resp == !!resp) %>%
# get lowest AIC of both linear models, without (lm1) and with x^2 (lm2) [n = 8 to 6]
arrange(pred, AIC) %>%
group_by(pred) %>%
summarize(
mdl = first(mdl),
AIC = first(AIC))
}
get_cor <- function(d, preds, cor_threshold = 0.7){
# preds = preds_lnd$pred; cor_threshold = 0.7
d %>%
select(all_of(preds)) %>%
select(!where(is.factor)) %>% # drop: asp_cir;
# TODO: figure out factors
correlate(method = "spearman") %>%
shave %>%
gather(-term, key = "term2", value = "cor") %>%
filter(abs(cor) > cor_threshold) %>%
rowid_to_column(var = "cor_id") %>%
pivot_longer(
cols = starts_with("term"),
names_to="term", values_to="pred") %>%
select(-term)
}
filt_mdls_cor <- function(d_mdls, d_cor){
# d_mdls = pred1_mdls; d_cor = cor_pred1
# choose lowest AIC between auto-correlated predictors
mdls_cor <- d_mdls %>%
left_join(d_cor, by="pred")
preds_notcor <- mdls_cor %>%
filter(is.na(cor_id)) %>%
pull(pred)
preds_cor <- mdls_cor %>%
filter(!is.na(cor_id)) %>%
arrange(AIC) %>%
pull(pred) %>%
unique()
i = 2
while(length(preds_cor > 0)){
message(glue("filt_mdls_cor() pass {i} (n={length(preds_cor)}): {paste(preds_cor, collapse = ' + ')}"))
d_cor_w <- get_cor(d, preds_cor)
preds_cor <- d_mdls %>%
filter(pred %in% d_cor_w$pred) %>%
left_join(d_cor_w, by="pred") %>%
arrange(AIC) %>%
group_by(cor_id) %>%
summarize(
pred = first(pred),
mdl = first(mdl),
AIC = first(AIC)) %>%
pull(pred) %>%
unique()
if (length(preds_cor) == 1){
preds_notcor <- c(preds_notcor, preds_cor)
break()
}
d_cor_w <- get_cor(d, preds_cor)
preds_notcor <- c(
preds_notcor,
d_cor_w %>%
filter(is.na(cor_id)) %>%
pull(pred))
preds_cor <- d_cor_w %>%
filter(!is.na(cor_id)) %>%
pull(pred) %>%
unique()
if (length(preds_cor) == 1){
preds_notcor <- c(preds_notcor, preds_cor)
break()
}
i <- i+1
}
d_mdls %>%
filter(pred %in% preds_notcor)
}
d_pres_num_lng <- get_resp_pred_num_long(d_all, "g_pres", cols_pred)
d_pres_num_avg <- get_resp_pred_smry_stats(d_pres_num_lng, "g_pres")
is_binary <- function(x){ length(unique(x)) == 2 }
get_preds_sign_notcor <- function(cat, resp){
# get significant predictors based individual models:
# resp ~ pred (lm1); or resp ~ pred + pred^2 (lm2)
pred1_mdls <- get_pred1_mdls(resp = resp, cat = cat)
if (nrow(pred1_mdls) <= 1)
return(pred1_mdls)
# evaluate correlation between predictors
cor_pred1 <- get_cor(d, pred1_mdls$pred)
# of auto-correlated predictors, choose one with lowest AIC
pred1_mdls_notcor <- filt_mdls_cor(pred1_mdls, cor_pred1)
# check that no correlations still exist
pred1_mdls_notcor2 <- get_cor(d, pred1_mdls_notcor$pred)
stopifnot(nrow(pred1_mdls_notcor2) == 0)
pred1_mdls_notcor
}
run_maxent <- function(cat, resp, preds, use_cache = T){
# silent args: d, dir_models
# cat = "landscape"
# resp = "g_pres"
# preds = d_preds$pred
library(dismo)
dir_mx <- glue("{dir_models}/{cat}.{resp}.mx")
factors_csv <- glue("{dir_mx}/data_factors.csv")
train_csv <- glue("{dir_mx}/data_train.csv")
test_csv <- glue("{dir_mx}/data_test.csv")
fit_rds <- glue("{dir_mx}/maxent_fit.rds")
evaluate_rds <- glue("{dir_mx}/maxent_evaluate.rds")
dir_create(dir_mx)
has_cache <- all(file_exists(c(
factors_csv, train_csv, test_csv, fit_rds, evaluate_rds)))
if (has_cache & use_cache){
d_train <- read_csv(train_csv, col_types = cols())
d_test <- read_csv(test_csv, col_types = cols())
d_preds_fac <- read_csv(factors_csv, col_types = cols())
preds_fac <- unique(d_preds_fac$pred)
mx <- read_rds(fit_rds)
e <- read_rds(evaluate_rds)
return(list(
d_train = d_train,
d_test = d_test,
preds_fac = preds_fac,
d_preds_fac = d_preds_fac,
maxent = mx,
evaluate = e))
}
# prep data ----
d_preds <- d %>%
select(all_of(preds))
# flag predictors that are of class factor
preds_fac <- d_preds %>%
select_if(is.factor) %>%
names()
d_preds_fac <- tibble(
pred = preds_fac) %>%
mutate(
levels = map(pred, function(x){
tibble(
level = levels(d[[x]])) %>%
rowid_to_column("integer")})) %>%
unnest(levels)
preds_fac <- unique(d_preds_fac$pred)
# convert factor predictors to integer for maxent to run
d_preds <- d_preds %>%
mutate(
across(where(is.factor), ~ as.integer(.x)))
# combine response and predictors
d_rp <- d %>%
select(all_of(resp)) %>%
bind_cols(
d_preds)
# split data into training (to fit maxent) and test (to evaluate), 80% and 20% respectively
i_k <- kfold(d_rp, 5)
d_train <- d_rp[i_k != 1, ]
d_test <- d_rp[i_k == 1, ]
d_train %>% write_csv(train_csv)
d_test %>% write_csv(test_csv)
d_preds_fac %>% write_csv(factors_csv)
message("d_train response counts:")
print(table(d_train[[resp]]))
message("d_test response counts:")
print(table(d_test[[resp]]))
# run maxent ----
# setup input args for maxent
v_resp <- d_train %>% pull(resp)
d_preds <- d_train %>% select(-all_of(resp))
mx <- maxent(
p = v_resp,
x = d_preds,
factors = preds_fac,
path = dir_mx)
write_rds(mx, fit_rds)
# evaluate ----
d_test_present <- d_test %>%
filter(across(all_of(resp), ~ .x == 1)) %>%
select(-all_of(resp))
d_test_absent <- d_test %>%
filter(across(all_of(resp), ~ .x == 0)) %>%
select(-all_of(resp))
e <- evaluate(
p = d_test_present,
a = d_test_absent,
mx)
write_rds(e, evaluate_rds)
list(
d_train = d_train,
d_test = d_test,
preds_fac = preds_fac,
d_preds_fac = d_preds_fac,
maxent = mx,
evaluate = e)
}
run_randomForest <- function(cat, resp, preds, use_cache = T){
# silent args: d, dir_models
# cat = "landscape"; resp = "g_pres"; preds = d_preds$pred; use_cache = F
dir_mx <- glue("{dir_models}/{cat}.{resp}.rf")
train_csv <- glue("{dir_mx}/data_train.csv")
test_csv <- glue("{dir_mx}/data_test.csv")
fit_rds <- glue("{dir_mx}/randomForest_fit.rds")
evaluate_rds <- glue("{dir_mx}/randomForest_evaluate.rds")
dir_create(dir_mx)
has_cache <- all(file_exists(c(
train_csv, test_csv, fit_rds, evaluate_rds)))
if (has_cache & use_cache){
return(list(
d_train = read.csv(train_csv, stringsAsFactors = T) %>% tibble(),
d_test = read.csv(test_csv, stringsAsFactors = T) %>% tibble(),
randomForest = read_rds(fit_rds),
evaluate = read_rds(evaluate_rds)))
}
# prep data ----
d_rp <- d %>%
select(all_of(resp), all_of(preds))
if (!is_binary(d[[resp]])){
## http://www.sthda.com/english/articles/38-regression-model-validation/157-cross-validation-essentials-in-r/
## https://stat-ata-asu.github.io/MachineLearningToolbox/classification-models-fitting-them-and-evaluating-their-performance.html#customizing-traincontrol
rf_trControl <- trainControl(
method = "cv", number = 10, verboseIter = F)
} else {
d_rp[[resp]] <- factor(c('absent','present')[d_rp$g_pres + 1])
rf_trControl <- trainControl(
method = "cv", number = 10, verboseIter = F,
# https://stat-ata-asu.github.io/MachineLearningToolbox/classification-models-fitting-them-and-evaluating-their-performance.html#customizing-traincontrol
summaryFunction = twoClassSummary, classProbs = T)
}
# split data into training (to fit randomForest) and test (to evaluate), 80% and 20% respectively
i_k <- kfold(d_rp, 5)
d_train <- d_rp[i_k != 1, ]
d_test <- d_rp[i_k == 1, ]
d_train %>% write_csv(train_csv)
d_test %>% write_csv(test_csv)
# run randomForest ----
rf_formula <- as.formula(glue("{resp} ~ ."))
# OLD: direct randomForest
# rf <- randomForest(
# as.formula(rf_formula_str),
# data = d_train,
# importance = T, proximity = T)
# NEW: caret with 10-fold cross-validation
rf <- train(
rf_formula, d_train,
method = "rf",
trControl = rf_trControl,
preProcess = c("center", "scale", "YeoJohnson", "nzv"))
# rf$finalModel
write_rds(rf, fit_rds)
e <- postResample(pred = predict(rf, d_test), obs = d_test[[resp]])
write_rds(e, evaluate_rds)
# binary response:
## Accuracy Kappa
## 0.8460000 0.6081345
#
# continuous response:
## RMSE Rsquared MAE
## 4.0927043 0.8234427 2.8163731
#
# https://stats.stackexchange.com/questions/22344/which-performance-measure-to-use-when-using-svm-mse-or-mae
# MAE≤RMSE≤MAE2 (for regression)
# - if RMSE is close to MAE, the model makes many relatively small errors
# - if RMSE is close to MAE2, the model makes few but large errors
# https://www.datatechnotes.com/2019/02/regression-model-accuracy-mae-mse-rmse.html
list(
d_train = d_train,
d_test = d_test,
randomForest = rf,
evaluate = e)
}
get_mdl_y <- function(mdl){
# mdl <- rf
m <- ifelse(
"randomForest" %in% names(mdl),
mdl['randomForest'],
mdl['maxent'])
d_p <- bind_rows(
tibble(
mdl$d_test,
partition = "test"),
tibble(
mdl$d_train,
partition = "train")) %>%
rename(observed = 1)
d_p %>%
mutate(
predicted = predict(m, d_p)[[1]]) %>%
select(partition, observed, predicted)
}
get_nrmse <- function(cat, resp, mdl="rf", method = "sd"){
# cat = "biotic"; resp = "g_cnt"; mdl="rf"
# normalize root mean square error for comparing performance of models with different response terms
# * [How to normalize the RMSE](https://www.marinedatascience.co/blog/2019/01/07/normalizing-the-rmse/)
# * [nrmse: Normalized Root Mean Square Error in saskiaotto/INDperform: Evaluation of Indicator Performances for Assessing Ecosystem States](https://rdrr.io/github/saskiaotto/INDperform/man/nrmse.html)
# * [saskiaotto/INDperform: INDperform is an R package for evaluating ecological state indicators and assessing the ecological status based on a suite of indicators.](https://github.com/saskiaotto/INDperform/)
shelf(saskiaotto/INDperform)
fxn <- ifelse(mdl == "rf", "randomForest", "maxent")
dir_mdl <- glue("{dir_models}/{cat}.{resp}.{mdl}")
m <- read_rds(glue("{dir_mdl}/{fxn}_fit.rds"))
d_test <- read_csv(glue("{dir_mdl}/data_test.csv"), col_types = cols())
nrmse(
pred = predict(m, d_test),
obs = d_test %>% pull(resp),
method = method)
}
get_terms <- function(cat, resp, mdl="rf"){
# cat = "biotic"; resp = "g_cnt"; mdl="rf"
# cat = "landscape"; resp = "g_repro"; mdl="rf"
# cat = "landscape"; resp = "g_repro"; mdl="rf"
fxn <- ifelse(mdl == "rf", "randomForest", "maxent")
dir_mdl <- glue("{dir_models}/{cat}.{resp}.{mdl}")
m <- read_rds(glue("{dir_mdl}/{fxn}_fit.rds"))
paste(attr(terms(m), "term.labels"), collapse = " + ")
}
get_accuracy <- function(cat, resp, mdl="rf"){
fxn <- ifelse(mdl == "rf", "randomForest", "maxent")
dir_mdl <- glue("{dir_models}/{cat}.{resp}.{mdl}")
e <- read_rds(glue("{dir_mdl}/{fxn}_evaluate.rds"))
if(mdl == "rf")
a <- e[["Accuracy"]]
if (mdl == "mx"){
thr <- threshold(e, 'spec_sens')
m <- read_rds(glue("{dir_models}/{cat}.{resp}.{mdl}/{fxn}_fit.rds"))
d_test <- read_csv(glue("{dir_models}/{cat}.{resp}.{mdl}/data_test.csv"), col_types = cols())
y_prob <- suppressWarnings(predict(m, d_test))
y_int <- as.integer(y_prob > thr)
y_yes <- d_test[,1] == y_int
a <- sum(y_yes)/nrow(d_test)
}
a
}
get_stat <- function(cat, resp, mdl="rf", fxn="evaluate", stat = "RMSE"){
# cat="landscape"; resp="g_pres"; mdl="rf"; fxn="evaluate"; stat = "Accuracy"
# cat="landscape"; resp="g_pres"; mdl="rf"; fxn="fit"; stat = "Accuracy"
# cat="landscape"; resp="g_pres"; mdl="mx"; fxn="evaluate"; stat = "Accuracy"
fit <- ifelse(mdl == "rf", "randomForest", "maxent")
o_rds <- glue("{dir_models}/{cat}.{resp}.{mdl}/{fit}_{fxn}.rds")
o <- read_rds(o_rds)
if (mdl == "mx" & stat == "Accuracy"){
thr <- threshold(o, 'spec_sens')
m <- read_rds(glue("{dir_models}/{cat}.{resp}.{mdl}/{fit}_fit.rds"))
d_test <- read_csv(glue("{dir_models}/{cat}.{resp}.{mdl}/data_test.csv"), col_types = cols())
y_prob <- suppressWarnings(predict(m, d_test))
y_int <- as.integer(y_prob > thr)
y_yes <- d_test[,1] == y_int
Accuracy <- sum(y_yes)/nrow(d_test)
return(Accuracy)
}
o[[stat]]
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.