content
large_stringlengths
0
6.46M
path
large_stringlengths
3
331
license_type
large_stringclasses
2 values
repo_name
large_stringlengths
5
125
language
large_stringclasses
1 value
is_vendor
bool
2 classes
is_generated
bool
2 classes
length_bytes
int64
4
6.46M
extension
large_stringclasses
75 values
text
stringlengths
0
6.46M
testlist <- list(x = structure(c(2.87034528687764e-306, 2.69356484133005e-164, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = 7:6)) result <- do.call(bravo:::colSumSq_matrix,testlist) str(result)
/bravo/inst/testfiles/colSumSq_matrix/libFuzzer_colSumSq_matrix/colSumSq_matrix_valgrind_files/1609959437-test.R
no_license
akhikolla/updated-only-Issues
R
false
false
278
r
testlist <- list(x = structure(c(2.87034528687764e-306, 2.69356484133005e-164, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = 7:6)) result <- do.call(bravo:::colSumSq_matrix,testlist) str(result)
# macd strategies on all series maxRows <- 1100 # used to initialize a matrix to store closing prices getOrders <- function(store, newRowList, currentPos, params) { allzero <- rep(0,length(newRowList)) pos <- allzero if (is.null(store)) store <- initStore(newRowList,params$series) else store <- updateStore(store, newRowList, params$series) cl7<- newRowList[[7]]$Close #input all close price data of series 7 clv7<-as.vector(cl7)#convert the data in to vector format(clv7 means close price of series 7) mean7ToDay<-mean(clv7[1:store$iter],na.rm = TRUE)#calculate the mean of all cuurent known data #of series 7 everyday (no over looking) if (store$iter > params$lookback) { startIndex <- store$iter - params$lookback for (i in 1:length(params$series)) { cl <- newRowList[[params$series[i]]]$Close clv<-as.vector(cl) meanToDay<-mean(clv[1:store$iter],na.rm = TRUE) ratioTo7 <-mean7ToDay/meanToDay ratioTo7<-floor(ratioTo7) xdata <- matrix(store$cl[startIndex:store$iter,i]) MACD <- last(MACD(xdata,12,26,9,maType="EMA"))[c("macd","signal")] if (MACD["macd"] > MACD["signal"]){ #if (MACD["macd"]>0){ pos[params$series[i]] <- -ratioTo7 #short } else if (MACD["macd"] < MACD["signal"]) { #else if (MACD["macd"]<0){ pos[params$series[i]] <- ratioTo7 #long } } } marketOrders <- pos return(list(store=store,marketOrders=marketOrders, limitOrders1=allzero, limitPrices1=allzero, limitOrders2=allzero, limitPrices2=allzero)) } initClStore <- function(newRowList,series) { clStore <- matrix(0,nrow=maxRows,ncol=length(series)) clStore <- updateClStore(clStore, newRowList, series, iter=1) return(clStore) } updateClStore <- function(clStore, newRowList, series, iter) { for (i in 1:length(series)) clStore[iter,i] <- as.numeric(newRowList[[series[i]]]$Close) return(clStore) } initStore <- function(newRowList,series) { return(list(iter=1,cl=initClStore(newRowList,series))) } updateStore <- function(store, newRowList, series) { store$iter <- store$iter + 1 store$cl <- updateClStore(store$cl,newRowList,series,store$iter) return(store) }
/strategies/macd.R
no_license
happy369300/R-project-1
R
false
false
2,317
r
# macd strategies on all series maxRows <- 1100 # used to initialize a matrix to store closing prices getOrders <- function(store, newRowList, currentPos, params) { allzero <- rep(0,length(newRowList)) pos <- allzero if (is.null(store)) store <- initStore(newRowList,params$series) else store <- updateStore(store, newRowList, params$series) cl7<- newRowList[[7]]$Close #input all close price data of series 7 clv7<-as.vector(cl7)#convert the data in to vector format(clv7 means close price of series 7) mean7ToDay<-mean(clv7[1:store$iter],na.rm = TRUE)#calculate the mean of all cuurent known data #of series 7 everyday (no over looking) if (store$iter > params$lookback) { startIndex <- store$iter - params$lookback for (i in 1:length(params$series)) { cl <- newRowList[[params$series[i]]]$Close clv<-as.vector(cl) meanToDay<-mean(clv[1:store$iter],na.rm = TRUE) ratioTo7 <-mean7ToDay/meanToDay ratioTo7<-floor(ratioTo7) xdata <- matrix(store$cl[startIndex:store$iter,i]) MACD <- last(MACD(xdata,12,26,9,maType="EMA"))[c("macd","signal")] if (MACD["macd"] > MACD["signal"]){ #if (MACD["macd"]>0){ pos[params$series[i]] <- -ratioTo7 #short } else if (MACD["macd"] < MACD["signal"]) { #else if (MACD["macd"]<0){ pos[params$series[i]] <- ratioTo7 #long } } } marketOrders <- pos return(list(store=store,marketOrders=marketOrders, limitOrders1=allzero, limitPrices1=allzero, limitOrders2=allzero, limitPrices2=allzero)) } initClStore <- function(newRowList,series) { clStore <- matrix(0,nrow=maxRows,ncol=length(series)) clStore <- updateClStore(clStore, newRowList, series, iter=1) return(clStore) } updateClStore <- function(clStore, newRowList, series, iter) { for (i in 1:length(series)) clStore[iter,i] <- as.numeric(newRowList[[series[i]]]$Close) return(clStore) } initStore <- function(newRowList,series) { return(list(iter=1,cl=initClStore(newRowList,series))) } updateStore <- function(store, newRowList, series) { store$iter <- store$iter + 1 store$cl <- updateClStore(store$cl,newRowList,series,store$iter) return(store) }
start=189 end=260 longplot=FALSE tower.raw.syv<-read.csv('Syv_TowerData_2015.csv',skip=1, header=TRUE) tower.names.syv<-colnames(read.csv('Syv_TowerData_2015.csv')) names(tower.raw.syv)<-tower.names.syv tower.match.syv<-tower.raw.syv[tower.raw.syv$DTIME>=start & tower.raw.syv$DTIME<end,] #Ustar filter #tower.match.syv[tower.match.syv$UST<0.2,6:45]<-(-9999) #tower.match.syv[tower.match.syv$WD>330 | tower.match.syv$WD<90, 6:45]<-(-9999) tower.match.syv[tower.match.syv==-9999]<-NA #WCR Tower tower.raw.wcr<-read.csv('WCR_TowerData_2015.csv',skip=1, header=TRUE) tower.names.wcr<-colnames(read.csv('WCR_TowerData_2015.csv')) names(tower.raw.wcr)<-tower.names.wcr tower.match.wcr<-tower.raw.wcr[tower.raw.wcr$DTIME>=start & tower.raw.wcr$DTIME<end,] #tower.match.wcr<-tower.match.wcr[1:(nrow(tower.match.wcr)-1),] tower.match.wcr[tower.match.wcr==-9999]<-NA source('Sapflow_plotscale.R') #rm(list=setdiff(ls(), c("tower.match.syv", "syv.flow","flux.syv", 'syv.tree', # "tower.match.wcr", "wcr.flow", 'flux.wcr','wcr.tree',"start","end"))) timecol<-rep('black', 3408) timecol[tower.match.syv$HRMIN>600 & tower.match.syv$HRMIN<=1300]<-'orange' timecol[tower.match.syv$HRMIN>1300 & tower.match.syv$HRMIN<=2200]<-'blue' #These take a good bit of time #par(mfrow=c(2,2)) if (longplot==TRUE){ for (i in 1:20){ plot(tower.match.syv$VPD[timecol!='black'], flux.syv[timecol!='black',i], main=paste('syv',syv.tree$CC[i], syv.tree$SPP[i], i), col=timecol[timecol!='black'], pch=".", xlab='VPD', ylab='Flux', cex=6) print(paste(round(i*100/34), '%', sep='')) } for (i in 1:14){ plot((tower.match.wcr$VPD[timecol!='black']), (flux.wcr[timecol!='black',i]), main=paste('wcr',wcr.tree$CC[i], wcr.tree$SPP[i], i), col=timecol[timecol!='black'], pch=".", xlab='VPD', ylab='Flux', cex=6) print(paste(round((i+20)*100/34), '%', sep='')) } } #For PAR; will normally comment out par(mfrow=c(2,2)) if (longplot==TRUE){ for (i in 1:20){ plot(tower.match.syv$PAR[timecol!='black'], flux.syv[timecol!='black',i], main=paste('syv',syv.tree$CC[i], syv.tree$SPP[i], i), col=timecol[timecol!='black'], pch=".", cex=6) print(paste(round(i*100/34), '%', sep='')) } for (i in 1:14){ plot((tower.match.wcr$PAR[timecol!='black']), (flux.wcr[timecol!='black',i]), main=paste('wcr',wcr.tree$CC[i], wcr.tree$SPP[i], i), col=timecol[timecol!='black'], pch=".", cex=6) print(paste(round((i+20)*100/34), '%', sep='')) } } if (prepMEEC=TRUE){ par(mfrow=c(2,2), mar=c(4.1,4.5,2,2)) timecol<-rep('black', 3408) timecol[tower.match.syv$HRMIN>600 & tower.match.syv$HRMIN<=1300]<-'light blue' timecol[tower.match.syv$HRMIN>1300 & tower.match.syv$HRMIN<=2200]<-'blue' for (i in 12){ plot(tower.match.syv$VPD[timecol!='black'], flux.syv[timecol!='black',i], main="Vapor Pressure Deficit", col=timecol[timecol!='black'], pch="." ,cex=6, ylab="VPD", xaxt='n', xlab='', cex.axis=1.5,cex.main=1.5,cex.lab=1.5, font.axis=2,font.main=2,font.lab=2) print(paste(round(i*100/34), '%', sep='')) } for (i in 12){ plot(tower.match.syv$PAR[timecol!='black'], flux.syv[timecol!='black',i], main="Light", col=timecol[timecol!='black'], pch=".", cex=6, ylab="PAR", xlab='', xaxt='n', cex.axis=1.5,cex.main=1.5,cex.lab=1.5, font.axis=2,font.main=2,font.lab=2) print(paste(round(i*100/34), '%', sep='')) } for (i in 12){ plot(tower.match.syv$TA[timecol!='black'], flux.syv[timecol!='black',i], main="Air Temperature", col=timecol[timecol!='black'], pch=".", cex=6, ylab="TA",xlab="Flux", cex.axis=1.5,cex.main=1.5, cex.lab=1.5, font.axis=2,font.main=2,font.lab=2, xaxt='n') print(paste(round(i*100/34), '%', sep='')) } for (i in 12){ plot(syv.tempavg[timecol!='black'], flux.syv[timecol!='black',i], main="Surface Temperature", col=timecol[timecol!='black'], pch=".", cex=6, ylab="TS",xlab="Flux", cex.axis=1.5,cex.main=1.5,cex.lab=1.5, font.axis=2,font.main=2,font.lab=2, xaxt='n') print(paste(round(i*100/34), '%', sep='')) } } #Hysterisis par(mfrow=c(2,2)) ylim.syv<-c(2e-05,4.2e-05,1.5e-05,3.5e-05) ylim.wcr<-c(5e-05,3e-05,3.8e-05,6e-06) for (j in dry[1:9]){ #dry[1:9] day<-j ylim.count<-1 for (i in c(2,6,7,9)){ plot(tower.match.wcr$VPD[tower.match.wcr$DOY==day], flux.wcr[tower.match.wcr$DOY==day,i], main=paste('wcr',wcr.tree$CC[i], wcr.tree$SPP[i], i, 'DOY',j), col=timecol[tower.match.wcr$DOY==day], pch="*", xlim=c(0,3), ylim=c(0,ylim.wcr[ylim.count]), cex=2) #print(paste(round(i*100/34), '%', sep='')) ylim.count<-ylim.count+1 } ylim.count<-1 for (i in c(2,5,9,12)){ # c(2,5,9,12) plot(tower.match.syv$VPD[tower.match.syv$DOY==day], flux.syv[tower.match.syv$DOY==day,i], main=paste('syv',syv.tree$CC[i], syv.tree$SPP[i], i, 'DOY',j), col=timecol[tower.match.syv$DOY==day], pch="*", xlim=c(0,3),ylim=c(0,ylim.syv[ylim.count]), cex=2, xlab='VPD', ylab='Flux') #print(paste(round(i*100/34), '%', sep='')) ylim.count<-ylim.count+1 } } #Now with light as covariate par(mfrow=c(2,2)) for (j in dry[1:9]){ day<-j for (i in c(2,6,7,9)){ plot(tower.match.wcr$PAR[tower.match.wcr$DOY==day], flux.wcr[tower.match.wcr$DOY==day,i], main=paste('wcr',wcr.tree$CC[i], wcr.tree$SPP[i], i, 'DOY',j), col=timecol[tower.match.wcr$DOY==day], pch="*", cex=2) #print(paste(round(i*100/34), '%', sep='')) ylim.count<-ylim.count+1 } for (i in c(2,5,9,12)){ plot(tower.match.syv$PAR[tower.match.syv$DOY==day], flux.syv[tower.match.syv$DOY==day,i], main=paste('syv',syv.tree$CC[i], syv.tree$SPP[i], i, 'DOY',j), col=timecol[tower.match.syv$DOY==day], pch="*", cex=2) #print(paste(round(i*100/34), '%', sep='')) } } #Fuck around with some plots and stuff #nigttime LE, paired t.test(tower.match.syv$LE[tower.match.syv$HRMIN<600 | tower.match.syv$HRMIN>2100], tower.match.wcr$LE[tower.match.syv$HRMIN<600 | tower.match.syv$HRMIN>2100],paired=TRUE) #nighttime VPD, paired t.test(tower.match.syv$VPD[tower.match.syv$HRMIN<600 | tower.match.syv$HRMIN>2100], tower.match.wcr$VPD[tower.match.syv$HRMIN<600 | tower.match.syv$HRMIN>2100],paired=TRUE) #daytime LE, paired t.test(tower.match.syv$LE[tower.match.syv$HRMIN>600 & tower.match.syv$HRMIN<2100], tower.match.wcr$LE[tower.match.syv$HRMIN>600 & tower.match.syv$HRMIN<2100],paired=TRUE) #daytime VPD, paired t.test(tower.match.syv$VPD[tower.match.syv$HRMIN>600 & tower.match.syv$HRMIN<2100], tower.match.wcr$VPD[tower.match.syv$HRMIN>600 & tower.match.syv$HRMIN<2100],paired=TRUE) # #LE diel profile # plot(aggregate(tower.match.wcr$LE, by=list(tower.match.wcr$HRMIN), FUN=mean, na.rm=TRUE), ylim=c(-20,250), col='dark gray',main='LE', pch='*', cex=2, xlab='HH:MM') # points(aggregate(tower.match.syv$LE, by=list(tower.match.syv$HRMIN), FUN=mean, na.rm=TRUE), ylim=c(-20,250), col='black', pch='*', cex=2) # legend(0,225,legend=c('SYV', 'WCR'), col=c('black', 'dark gray'), pch='*') par(mfrow=c(2,2), mar=c(4,4.5,2,1.5)) #VPD diel profile plot(aggregate(tower.match.wcr$VPD, by=list(tower.match.wcr$HRMIN), FUN=mean, na.rm=TRUE), ylim=c(0,1.3), main='Vapor Pressure Deficit', col='dark gray', pch='*', cex=2, xlab='HH:MM', ylab='VPD', cex.lab=1.5,cex.axis=1.5,cex.main=1.5,font.main=2,font.axis=2,font.lab=2) points(aggregate(tower.match.syv$VPD, by=list(tower.match.syv$HRMIN), FUN=mean, na.rm=TRUE), ylim=c(-0,1.3), col='black', pch='*', cex=2) legend(0,1.18,legend=c('SYV', 'WCR'), col=c('black', 'dark gray'), pch='*') #PAR diel profile plot(aggregate(tower.match.wcr$PAR, by=list(tower.match.wcr$HRMIN), FUN=mean, na.rm=TRUE), ylim=c(0,1700), col='dark gray',main='Light', pch='*', cex=2, xlab='HH:MM', ylab='PAR', cex.lab=1.5,cex.axis=1.5,cex.main=1.5,font.main=2,font.axis=2,font.lab=2) points(aggregate(tower.match.syv$PAR, by=list(tower.match.syv$HRMIN), FUN=mean, na.rm=TRUE), ylim=c(0,1700), col='black', pch='*', cex=2) legend(0,1600,legend=c('SYV', 'WCR'), col=c('black', 'dark gray'), pch='*') #TA diel profile plot(aggregate(tower.match.wcr$TA, by=list(tower.match.wcr$HRMIN), FUN=mean, na.rm=TRUE), ylim=c(14,24), main='Air Temperature', col='dark gray', pch='*', cex=2, xlab='HH:MM', ylab="TA", cex.lab=1.5,cex.axis=1.5,cex.main=1.5,font.main=2,font.axis=2,font.lab=2) points(aggregate(tower.match.syv$TA, by=list(tower.match.syv$HRMIN), FUN=mean, na.rm=TRUE), ylim=c(14,24), col='black', pch='*', cex=2) legend(0,23.4,legend=c('SYV', 'WCR'), col=c('black', 'dark gray'), pch='*') #TS diel profile (must have run apogee simple process) plot(wcr.rp.temp$x, ylim=c(14,24), main='Surface Temperature', col='dark gray', pch='*', cex=2, xlab='HH:MM', xaxt='n', ylab="TS", cex.lab=1.5,cex.axis=1.5,cex.main=1.5,font.main=2,font.axis=2,font.lab=2) points(syv.rp.temp$x, ylim=c(14,24), col='black', pch='*', cex=2) legend(0,23.4,legend=c('SYV', 'WCR'), col=c('black', 'dark gray'), pch='*') axis(side=1, at=c(0,10,20,30,40), labels=c(0,500,1000,1500,2000), cex.axis=1.5,font=2)
/OlderCode/Tower_vs_sap.R
no_license
bblakely/SapFlux
R
false
false
9,018
r
start=189 end=260 longplot=FALSE tower.raw.syv<-read.csv('Syv_TowerData_2015.csv',skip=1, header=TRUE) tower.names.syv<-colnames(read.csv('Syv_TowerData_2015.csv')) names(tower.raw.syv)<-tower.names.syv tower.match.syv<-tower.raw.syv[tower.raw.syv$DTIME>=start & tower.raw.syv$DTIME<end,] #Ustar filter #tower.match.syv[tower.match.syv$UST<0.2,6:45]<-(-9999) #tower.match.syv[tower.match.syv$WD>330 | tower.match.syv$WD<90, 6:45]<-(-9999) tower.match.syv[tower.match.syv==-9999]<-NA #WCR Tower tower.raw.wcr<-read.csv('WCR_TowerData_2015.csv',skip=1, header=TRUE) tower.names.wcr<-colnames(read.csv('WCR_TowerData_2015.csv')) names(tower.raw.wcr)<-tower.names.wcr tower.match.wcr<-tower.raw.wcr[tower.raw.wcr$DTIME>=start & tower.raw.wcr$DTIME<end,] #tower.match.wcr<-tower.match.wcr[1:(nrow(tower.match.wcr)-1),] tower.match.wcr[tower.match.wcr==-9999]<-NA source('Sapflow_plotscale.R') #rm(list=setdiff(ls(), c("tower.match.syv", "syv.flow","flux.syv", 'syv.tree', # "tower.match.wcr", "wcr.flow", 'flux.wcr','wcr.tree',"start","end"))) timecol<-rep('black', 3408) timecol[tower.match.syv$HRMIN>600 & tower.match.syv$HRMIN<=1300]<-'orange' timecol[tower.match.syv$HRMIN>1300 & tower.match.syv$HRMIN<=2200]<-'blue' #These take a good bit of time #par(mfrow=c(2,2)) if (longplot==TRUE){ for (i in 1:20){ plot(tower.match.syv$VPD[timecol!='black'], flux.syv[timecol!='black',i], main=paste('syv',syv.tree$CC[i], syv.tree$SPP[i], i), col=timecol[timecol!='black'], pch=".", xlab='VPD', ylab='Flux', cex=6) print(paste(round(i*100/34), '%', sep='')) } for (i in 1:14){ plot((tower.match.wcr$VPD[timecol!='black']), (flux.wcr[timecol!='black',i]), main=paste('wcr',wcr.tree$CC[i], wcr.tree$SPP[i], i), col=timecol[timecol!='black'], pch=".", xlab='VPD', ylab='Flux', cex=6) print(paste(round((i+20)*100/34), '%', sep='')) } } #For PAR; will normally comment out par(mfrow=c(2,2)) if (longplot==TRUE){ for (i in 1:20){ plot(tower.match.syv$PAR[timecol!='black'], flux.syv[timecol!='black',i], main=paste('syv',syv.tree$CC[i], syv.tree$SPP[i], i), col=timecol[timecol!='black'], pch=".", cex=6) print(paste(round(i*100/34), '%', sep='')) } for (i in 1:14){ plot((tower.match.wcr$PAR[timecol!='black']), (flux.wcr[timecol!='black',i]), main=paste('wcr',wcr.tree$CC[i], wcr.tree$SPP[i], i), col=timecol[timecol!='black'], pch=".", cex=6) print(paste(round((i+20)*100/34), '%', sep='')) } } if (prepMEEC=TRUE){ par(mfrow=c(2,2), mar=c(4.1,4.5,2,2)) timecol<-rep('black', 3408) timecol[tower.match.syv$HRMIN>600 & tower.match.syv$HRMIN<=1300]<-'light blue' timecol[tower.match.syv$HRMIN>1300 & tower.match.syv$HRMIN<=2200]<-'blue' for (i in 12){ plot(tower.match.syv$VPD[timecol!='black'], flux.syv[timecol!='black',i], main="Vapor Pressure Deficit", col=timecol[timecol!='black'], pch="." ,cex=6, ylab="VPD", xaxt='n', xlab='', cex.axis=1.5,cex.main=1.5,cex.lab=1.5, font.axis=2,font.main=2,font.lab=2) print(paste(round(i*100/34), '%', sep='')) } for (i in 12){ plot(tower.match.syv$PAR[timecol!='black'], flux.syv[timecol!='black',i], main="Light", col=timecol[timecol!='black'], pch=".", cex=6, ylab="PAR", xlab='', xaxt='n', cex.axis=1.5,cex.main=1.5,cex.lab=1.5, font.axis=2,font.main=2,font.lab=2) print(paste(round(i*100/34), '%', sep='')) } for (i in 12){ plot(tower.match.syv$TA[timecol!='black'], flux.syv[timecol!='black',i], main="Air Temperature", col=timecol[timecol!='black'], pch=".", cex=6, ylab="TA",xlab="Flux", cex.axis=1.5,cex.main=1.5, cex.lab=1.5, font.axis=2,font.main=2,font.lab=2, xaxt='n') print(paste(round(i*100/34), '%', sep='')) } for (i in 12){ plot(syv.tempavg[timecol!='black'], flux.syv[timecol!='black',i], main="Surface Temperature", col=timecol[timecol!='black'], pch=".", cex=6, ylab="TS",xlab="Flux", cex.axis=1.5,cex.main=1.5,cex.lab=1.5, font.axis=2,font.main=2,font.lab=2, xaxt='n') print(paste(round(i*100/34), '%', sep='')) } } #Hysterisis par(mfrow=c(2,2)) ylim.syv<-c(2e-05,4.2e-05,1.5e-05,3.5e-05) ylim.wcr<-c(5e-05,3e-05,3.8e-05,6e-06) for (j in dry[1:9]){ #dry[1:9] day<-j ylim.count<-1 for (i in c(2,6,7,9)){ plot(tower.match.wcr$VPD[tower.match.wcr$DOY==day], flux.wcr[tower.match.wcr$DOY==day,i], main=paste('wcr',wcr.tree$CC[i], wcr.tree$SPP[i], i, 'DOY',j), col=timecol[tower.match.wcr$DOY==day], pch="*", xlim=c(0,3), ylim=c(0,ylim.wcr[ylim.count]), cex=2) #print(paste(round(i*100/34), '%', sep='')) ylim.count<-ylim.count+1 } ylim.count<-1 for (i in c(2,5,9,12)){ # c(2,5,9,12) plot(tower.match.syv$VPD[tower.match.syv$DOY==day], flux.syv[tower.match.syv$DOY==day,i], main=paste('syv',syv.tree$CC[i], syv.tree$SPP[i], i, 'DOY',j), col=timecol[tower.match.syv$DOY==day], pch="*", xlim=c(0,3),ylim=c(0,ylim.syv[ylim.count]), cex=2, xlab='VPD', ylab='Flux') #print(paste(round(i*100/34), '%', sep='')) ylim.count<-ylim.count+1 } } #Now with light as covariate par(mfrow=c(2,2)) for (j in dry[1:9]){ day<-j for (i in c(2,6,7,9)){ plot(tower.match.wcr$PAR[tower.match.wcr$DOY==day], flux.wcr[tower.match.wcr$DOY==day,i], main=paste('wcr',wcr.tree$CC[i], wcr.tree$SPP[i], i, 'DOY',j), col=timecol[tower.match.wcr$DOY==day], pch="*", cex=2) #print(paste(round(i*100/34), '%', sep='')) ylim.count<-ylim.count+1 } for (i in c(2,5,9,12)){ plot(tower.match.syv$PAR[tower.match.syv$DOY==day], flux.syv[tower.match.syv$DOY==day,i], main=paste('syv',syv.tree$CC[i], syv.tree$SPP[i], i, 'DOY',j), col=timecol[tower.match.syv$DOY==day], pch="*", cex=2) #print(paste(round(i*100/34), '%', sep='')) } } #Fuck around with some plots and stuff #nigttime LE, paired t.test(tower.match.syv$LE[tower.match.syv$HRMIN<600 | tower.match.syv$HRMIN>2100], tower.match.wcr$LE[tower.match.syv$HRMIN<600 | tower.match.syv$HRMIN>2100],paired=TRUE) #nighttime VPD, paired t.test(tower.match.syv$VPD[tower.match.syv$HRMIN<600 | tower.match.syv$HRMIN>2100], tower.match.wcr$VPD[tower.match.syv$HRMIN<600 | tower.match.syv$HRMIN>2100],paired=TRUE) #daytime LE, paired t.test(tower.match.syv$LE[tower.match.syv$HRMIN>600 & tower.match.syv$HRMIN<2100], tower.match.wcr$LE[tower.match.syv$HRMIN>600 & tower.match.syv$HRMIN<2100],paired=TRUE) #daytime VPD, paired t.test(tower.match.syv$VPD[tower.match.syv$HRMIN>600 & tower.match.syv$HRMIN<2100], tower.match.wcr$VPD[tower.match.syv$HRMIN>600 & tower.match.syv$HRMIN<2100],paired=TRUE) # #LE diel profile # plot(aggregate(tower.match.wcr$LE, by=list(tower.match.wcr$HRMIN), FUN=mean, na.rm=TRUE), ylim=c(-20,250), col='dark gray',main='LE', pch='*', cex=2, xlab='HH:MM') # points(aggregate(tower.match.syv$LE, by=list(tower.match.syv$HRMIN), FUN=mean, na.rm=TRUE), ylim=c(-20,250), col='black', pch='*', cex=2) # legend(0,225,legend=c('SYV', 'WCR'), col=c('black', 'dark gray'), pch='*') par(mfrow=c(2,2), mar=c(4,4.5,2,1.5)) #VPD diel profile plot(aggregate(tower.match.wcr$VPD, by=list(tower.match.wcr$HRMIN), FUN=mean, na.rm=TRUE), ylim=c(0,1.3), main='Vapor Pressure Deficit', col='dark gray', pch='*', cex=2, xlab='HH:MM', ylab='VPD', cex.lab=1.5,cex.axis=1.5,cex.main=1.5,font.main=2,font.axis=2,font.lab=2) points(aggregate(tower.match.syv$VPD, by=list(tower.match.syv$HRMIN), FUN=mean, na.rm=TRUE), ylim=c(-0,1.3), col='black', pch='*', cex=2) legend(0,1.18,legend=c('SYV', 'WCR'), col=c('black', 'dark gray'), pch='*') #PAR diel profile plot(aggregate(tower.match.wcr$PAR, by=list(tower.match.wcr$HRMIN), FUN=mean, na.rm=TRUE), ylim=c(0,1700), col='dark gray',main='Light', pch='*', cex=2, xlab='HH:MM', ylab='PAR', cex.lab=1.5,cex.axis=1.5,cex.main=1.5,font.main=2,font.axis=2,font.lab=2) points(aggregate(tower.match.syv$PAR, by=list(tower.match.syv$HRMIN), FUN=mean, na.rm=TRUE), ylim=c(0,1700), col='black', pch='*', cex=2) legend(0,1600,legend=c('SYV', 'WCR'), col=c('black', 'dark gray'), pch='*') #TA diel profile plot(aggregate(tower.match.wcr$TA, by=list(tower.match.wcr$HRMIN), FUN=mean, na.rm=TRUE), ylim=c(14,24), main='Air Temperature', col='dark gray', pch='*', cex=2, xlab='HH:MM', ylab="TA", cex.lab=1.5,cex.axis=1.5,cex.main=1.5,font.main=2,font.axis=2,font.lab=2) points(aggregate(tower.match.syv$TA, by=list(tower.match.syv$HRMIN), FUN=mean, na.rm=TRUE), ylim=c(14,24), col='black', pch='*', cex=2) legend(0,23.4,legend=c('SYV', 'WCR'), col=c('black', 'dark gray'), pch='*') #TS diel profile (must have run apogee simple process) plot(wcr.rp.temp$x, ylim=c(14,24), main='Surface Temperature', col='dark gray', pch='*', cex=2, xlab='HH:MM', xaxt='n', ylab="TS", cex.lab=1.5,cex.axis=1.5,cex.main=1.5,font.main=2,font.axis=2,font.lab=2) points(syv.rp.temp$x, ylim=c(14,24), col='black', pch='*', cex=2) legend(0,23.4,legend=c('SYV', 'WCR'), col=c('black', 'dark gray'), pch='*') axis(side=1, at=c(0,10,20,30,40), labels=c(0,500,1000,1500,2000), cex.axis=1.5,font=2)
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/sample_data_info.R \docType{data} \name{candidate_ffls_mirna_all_part2} \alias{candidate_ffls_mirna_all_part2} \title{candidate_ffls_mirna_all_part2} \format{ A DataFrame (13240974 x 9) with the following columns: \describe{ \item{mirna}{miRNA in candidate FFL [Accession Number]} \item{tf}{TF in candidate FFL [Ensembl ID]} \item{targetgene}{target gene in candidate FFL [Ensembl ID]} \item{TARGETSCAN}{indicator of whether miRNA-target gene or miRNA-TF interaction is in TargetScan v7.2 database} \item{MIRTARBASE}{indicator of whether miRNA-target gene or miRNA-TF interaction is in miRTarBase v7.0 database} \item{MIRDB}{indicator of whether miRNA-target gene or miRNA-TF interaction is in miRDB v6.0 database} \item{MIRANDA}{indicator of whether miRNA-target gene or miRNA-TF interaction is in miRanda v3.0 database} \item{TRRUST}{indicator of whether TF-target gene or miRNA-TF interaction is in TRRUST v2.0 database} \item{ENCODE}{indicator of whether TF-target gene or miRNA-TF interaction is in ENCODE database} } } \usage{ candidate_ffls_mirna_all_part2 } \description{ Second half of all possible candidate miRNA FFLs assembled from databases } \keyword{datasets}
/man/candidate_ffls_mirna_all_part2.Rd
no_license
th789/fflmediation
R
false
true
1,253
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/sample_data_info.R \docType{data} \name{candidate_ffls_mirna_all_part2} \alias{candidate_ffls_mirna_all_part2} \title{candidate_ffls_mirna_all_part2} \format{ A DataFrame (13240974 x 9) with the following columns: \describe{ \item{mirna}{miRNA in candidate FFL [Accession Number]} \item{tf}{TF in candidate FFL [Ensembl ID]} \item{targetgene}{target gene in candidate FFL [Ensembl ID]} \item{TARGETSCAN}{indicator of whether miRNA-target gene or miRNA-TF interaction is in TargetScan v7.2 database} \item{MIRTARBASE}{indicator of whether miRNA-target gene or miRNA-TF interaction is in miRTarBase v7.0 database} \item{MIRDB}{indicator of whether miRNA-target gene or miRNA-TF interaction is in miRDB v6.0 database} \item{MIRANDA}{indicator of whether miRNA-target gene or miRNA-TF interaction is in miRanda v3.0 database} \item{TRRUST}{indicator of whether TF-target gene or miRNA-TF interaction is in TRRUST v2.0 database} \item{ENCODE}{indicator of whether TF-target gene or miRNA-TF interaction is in ENCODE database} } } \usage{ candidate_ffls_mirna_all_part2 } \description{ Second half of all possible candidate miRNA FFLs assembled from databases } \keyword{datasets}
library(dplyr) library(ggplot2) library(plotly) library(DT) ## Import and read data setwd("~/Documents/bootcamp007_project/Project3-WebScraping/ReganYee/Wayback/") reddit = read.csv("wayback.csv") #reddit[reddit$snapshot_datetime == '20161109103912',] reddit$upvotes = as.numeric(as.character(reddit$upvotes)) reddit$upvotes = imputedupvotes = Hmisc::impute(reddit$upvotes, "random") reddit$snapshot_datetime = as.character(reddit$snapshot_datetime) reddit$snapshot_date = as.character(reddit$snapshot_date) reddit$snapshot_time = sapply(reddit$snapshot_datetime, function(x) substring(x,9,14)) reddit$submit_date = sapply(reddit$submit_datetime, function(x) substring(x,1,10)) reddit$submit_hour = sapply(reddit$submit_datetime, function(x) substring(x,12,13)) reddit = reddit %>% filter(snapshot_date != 20160629) reddit$comments = as.character(reddit$comments) delimited_list = strsplit(reddit$comments, split = " ") comms = sapply(delimited_list,function(x) x[1]) reddit$comments = comms reddit$comments_num = as.numeric(reddit$comments) #Convert NAs to 0 reddit$comments_num[which(reddit$comments_num %in% NA)] = 0 reddit = reddit %>% mutate(comment_ratio = comments_num/upvotes) saveRDS(reddit, "reddit.RDS") ## All caps analysis temp = reddit %>% select(titles) temp1 = unique(temp) %>% mutate(upper = toupper(temp1$titles)) temp2 = temp1 %>% mutate(eq = titles==upper) %>% group_by(eq) %>% summarize(count=n()) ## Unique titles per day date_analysis = reddit %>% select(titles,snapshot_date) date_analysis = unique(date_analysis) %>% group_by(snapshot_date) %>% summarize(count=n()) colnames(reddit) df = reddit %>% mutate(datehour = sapply(reddit$snapshot_datetime, function(x) substring(x,1,10))) %>% mutate(rank_hour = paste0(datehour,rank)) %>% select(1:10, rank_hour, submit_date, submit_hour) ## Return a list of top 10 items that are unique per day and hour df_unique_hour = df[!duplicated(df$rank_hour),] df_unique_hour = df_unique_hour %>% select(snapshot_date, rank, titles, subreddit, upvotes, comments, submitter, snapshot_time, url, submit_datetime, submit_date, submit_hour) ## Reindex row indices rownames(df_unique_hour) = NULL df_unique_hour = df_unique_hour %>% mutate(snapshot_hour = substring(snapshot_time,1,2)) unique_subreddits_per_day = df_unique_hour %>% group_by(snapshot_date, subreddit) %>% summarize(count=n()) ## Items per day_hour head(df_unique_hour) ipdh = df_unique_hour %>% select(titles, snapshot_date, snapshot_hour) counts = unique(ipdh) %>% group_by(snapshot_date) %>% summarize(count=n()) %>% filter(count != 240) counts_2 = unique(ipdh) %>% group_by(snapshot_date,snapshot_hour) %>% filter(snapshot_date %in% counts$snapshot_date) ######### Broken down by titles per day df_date_title = reddit %>% mutate(date_title = paste0(snapshot_date,titles)) %>% select(2:10, date_title) df_unique_title = df_date_title[!duplicated(df_date_title$date_title),] unique_titles_per_day_r1 = df_unique_title %>% filter(rank==1) ######## Count of front page posts by user front_page_by_user = reddit %>% select(titles, submitter) t = unique(front_page_by_user) %>% filter(submitter=='Nameless696') r = t %>% group_by(submitter) %>% summarize(count=n()) %>% filter(count > 2) %>% arrange(desc(count)) ### ### Hottest Upvote time? ### mean and median reddit score by time (aggregate per day/ per hour) reddit_score_mean = df_unique_hour %>% select(snapshot_date,upvotes) %>% group_by(snapshot_date) %>% summarize(avg_score_mean = mean(upvotes)) reddit_score_median = df_unique_hour %>% select(snapshot_date,upvotes) %>% group_by(snapshot_date) %>% summarize(avg_score_median = median(upvotes)) plot_ly ( x = reddit_score_mean$snapshot_date, y = reddit_score_mean$avg_score_mean, type = 'scatter', mode = 'markers') plot_ly ( x = reddit_score_median$snapshot_date, y = reddit_score_median$avg_score_median, type = 'scatter', mode = 'markers') ## by hour reddit_score_by_hour = df_unique_hour %>% group_by(snapshot_hour) %>% summarize(score_mean = mean(upvotes), score_median = median(upvotes)) plot_ly ( x = reddit_score_by_hour$snapshot_hour, y = reddit_score_by_hour$score_mean, type = 'scatter', mode = 'lines') %>% add_trace( x = reddit_score_by_hour$snapshot_hour, y = reddit_score_by_hour$score_median, type = 'scatter', mode = 'lines' ) plot_ly ( x = median_reddit_score_by_hour$snapshot_hour, y = median_reddit_score_by_hour$score, type = 'scatter', mode = 'markers') ##by submit time mean_reddit_score_by_submit_hour = df_unique_hour %>% group_by(submit_hour) %>% summarize(score = mean(upvotes)) median_reddit_score_by_submit_hour = df_unique_hour %>% group_by(submit_hour) %>% summarize(score = median(upvotes)) p = plot_ly ( x = mean_reddit_score_by_submit_hour$submit_hour, y = mean_reddit_score_by_submit_hour$score, type = 'scatter', mode = 'markers') ###Submit vs Snapshot! q = plot_ly ( x = median_reddit_score_by_submit_hour$submit_hour, y = median_reddit_score_by_submit_hour$score, type = 'scatter', mode = 'markers') plot_ly() %>% add_trace( x = mean_reddit_score_by_submit_hour$submit_hour, y = mean_reddit_score_by_submit_hour$score, type = 'scatter', mode = 'lines', name = 'mean') %>% add_trace( x = median_reddit_score_by_submit_hour$submit_hour, y = median_reddit_score_by_submit_hour$score, type = 'scatter', mode = 'lines', name = 'median') ### word cloud? ### subreddit analysis - what is popular? test = reddit %>% group_by(subreddit,titles) %>% summarize(count=n()) %>% arrange(desc(count)) subreddit_analysis = test %>% select(subreddit) %>% summarize(count=n()) %>% filter(count >=10) %>% arrange(desc(count)) ## Reorder to be descending subreddit_analysis$subreddit = factor(subreddit_analysis$subreddit, levels = unique(subreddit_analysis$subreddit)[order(subreddit_analysis$count, decreasing = TRUE)]) plot_ly ( x = subreddit_analysis$subreddit, y = subreddit_analysis$count, type = 'bar', mode = 'markers', title = 'Posts by Subreddit' ) ### Relationship between number of comments and upvotes? comment_analysis1 = reddit %>% group_by(titles,subreddit) %>% summarize(max = max(comment_ratio)) %>% arrange(desc(max)) head(comment_analysis1) datatable(head(comment_analysis1), extensions = 'FixedColumns', options = list( dom = 't', scrollX = TRUE, scrollCollapse = TRUE)) %>% formatStyle('titles', `font-size` = '11px') %>% formatStyle('subreddit', `font-size` = '11px') %>% formatStyle('max', `font-size` = '11px') maxup = reddit %>% group_by(titles) %>% summarize(submit_hour = round(mean(as.numeric(submit_hour)))) head(maxup,25) joined_df = left_join(reddit, maxup, by="titles") head(joined_df) maxup_1 = maxup %>% group_by(submit_hour) %>% summarize(count = n()) g = ggplot(maxup_1, aes(x = submit_hour, y = count)) g+ geom_point() lm = joined_df %>% select(titles, submit_hour.x, subreddit, upvotes) head(lm) gogo = reddit %>% filter(titles == '\'Anti-Vaxxer\' Mom Changes Mind After Her Three Kids Fall Ill') g = ggplot(reddit, aes(x=comments_num, y=upvotes)) g + geom_point() reddit$comments_num reddit[reddit$comments_num == max(reddit$comments_num),] reddit[reddit$upvotes == max(reddit$upvotes),] # Upvotes = Subreddit + submit_time ### boxplot of subreddit ### average life of top10? ### career redditors?
/Project3-WebScraping/ReganYee/Analysis.R
no_license
vuchau/bootcamp007_project
R
false
false
7,525
r
library(dplyr) library(ggplot2) library(plotly) library(DT) ## Import and read data setwd("~/Documents/bootcamp007_project/Project3-WebScraping/ReganYee/Wayback/") reddit = read.csv("wayback.csv") #reddit[reddit$snapshot_datetime == '20161109103912',] reddit$upvotes = as.numeric(as.character(reddit$upvotes)) reddit$upvotes = imputedupvotes = Hmisc::impute(reddit$upvotes, "random") reddit$snapshot_datetime = as.character(reddit$snapshot_datetime) reddit$snapshot_date = as.character(reddit$snapshot_date) reddit$snapshot_time = sapply(reddit$snapshot_datetime, function(x) substring(x,9,14)) reddit$submit_date = sapply(reddit$submit_datetime, function(x) substring(x,1,10)) reddit$submit_hour = sapply(reddit$submit_datetime, function(x) substring(x,12,13)) reddit = reddit %>% filter(snapshot_date != 20160629) reddit$comments = as.character(reddit$comments) delimited_list = strsplit(reddit$comments, split = " ") comms = sapply(delimited_list,function(x) x[1]) reddit$comments = comms reddit$comments_num = as.numeric(reddit$comments) #Convert NAs to 0 reddit$comments_num[which(reddit$comments_num %in% NA)] = 0 reddit = reddit %>% mutate(comment_ratio = comments_num/upvotes) saveRDS(reddit, "reddit.RDS") ## All caps analysis temp = reddit %>% select(titles) temp1 = unique(temp) %>% mutate(upper = toupper(temp1$titles)) temp2 = temp1 %>% mutate(eq = titles==upper) %>% group_by(eq) %>% summarize(count=n()) ## Unique titles per day date_analysis = reddit %>% select(titles,snapshot_date) date_analysis = unique(date_analysis) %>% group_by(snapshot_date) %>% summarize(count=n()) colnames(reddit) df = reddit %>% mutate(datehour = sapply(reddit$snapshot_datetime, function(x) substring(x,1,10))) %>% mutate(rank_hour = paste0(datehour,rank)) %>% select(1:10, rank_hour, submit_date, submit_hour) ## Return a list of top 10 items that are unique per day and hour df_unique_hour = df[!duplicated(df$rank_hour),] df_unique_hour = df_unique_hour %>% select(snapshot_date, rank, titles, subreddit, upvotes, comments, submitter, snapshot_time, url, submit_datetime, submit_date, submit_hour) ## Reindex row indices rownames(df_unique_hour) = NULL df_unique_hour = df_unique_hour %>% mutate(snapshot_hour = substring(snapshot_time,1,2)) unique_subreddits_per_day = df_unique_hour %>% group_by(snapshot_date, subreddit) %>% summarize(count=n()) ## Items per day_hour head(df_unique_hour) ipdh = df_unique_hour %>% select(titles, snapshot_date, snapshot_hour) counts = unique(ipdh) %>% group_by(snapshot_date) %>% summarize(count=n()) %>% filter(count != 240) counts_2 = unique(ipdh) %>% group_by(snapshot_date,snapshot_hour) %>% filter(snapshot_date %in% counts$snapshot_date) ######### Broken down by titles per day df_date_title = reddit %>% mutate(date_title = paste0(snapshot_date,titles)) %>% select(2:10, date_title) df_unique_title = df_date_title[!duplicated(df_date_title$date_title),] unique_titles_per_day_r1 = df_unique_title %>% filter(rank==1) ######## Count of front page posts by user front_page_by_user = reddit %>% select(titles, submitter) t = unique(front_page_by_user) %>% filter(submitter=='Nameless696') r = t %>% group_by(submitter) %>% summarize(count=n()) %>% filter(count > 2) %>% arrange(desc(count)) ### ### Hottest Upvote time? ### mean and median reddit score by time (aggregate per day/ per hour) reddit_score_mean = df_unique_hour %>% select(snapshot_date,upvotes) %>% group_by(snapshot_date) %>% summarize(avg_score_mean = mean(upvotes)) reddit_score_median = df_unique_hour %>% select(snapshot_date,upvotes) %>% group_by(snapshot_date) %>% summarize(avg_score_median = median(upvotes)) plot_ly ( x = reddit_score_mean$snapshot_date, y = reddit_score_mean$avg_score_mean, type = 'scatter', mode = 'markers') plot_ly ( x = reddit_score_median$snapshot_date, y = reddit_score_median$avg_score_median, type = 'scatter', mode = 'markers') ## by hour reddit_score_by_hour = df_unique_hour %>% group_by(snapshot_hour) %>% summarize(score_mean = mean(upvotes), score_median = median(upvotes)) plot_ly ( x = reddit_score_by_hour$snapshot_hour, y = reddit_score_by_hour$score_mean, type = 'scatter', mode = 'lines') %>% add_trace( x = reddit_score_by_hour$snapshot_hour, y = reddit_score_by_hour$score_median, type = 'scatter', mode = 'lines' ) plot_ly ( x = median_reddit_score_by_hour$snapshot_hour, y = median_reddit_score_by_hour$score, type = 'scatter', mode = 'markers') ##by submit time mean_reddit_score_by_submit_hour = df_unique_hour %>% group_by(submit_hour) %>% summarize(score = mean(upvotes)) median_reddit_score_by_submit_hour = df_unique_hour %>% group_by(submit_hour) %>% summarize(score = median(upvotes)) p = plot_ly ( x = mean_reddit_score_by_submit_hour$submit_hour, y = mean_reddit_score_by_submit_hour$score, type = 'scatter', mode = 'markers') ###Submit vs Snapshot! q = plot_ly ( x = median_reddit_score_by_submit_hour$submit_hour, y = median_reddit_score_by_submit_hour$score, type = 'scatter', mode = 'markers') plot_ly() %>% add_trace( x = mean_reddit_score_by_submit_hour$submit_hour, y = mean_reddit_score_by_submit_hour$score, type = 'scatter', mode = 'lines', name = 'mean') %>% add_trace( x = median_reddit_score_by_submit_hour$submit_hour, y = median_reddit_score_by_submit_hour$score, type = 'scatter', mode = 'lines', name = 'median') ### word cloud? ### subreddit analysis - what is popular? test = reddit %>% group_by(subreddit,titles) %>% summarize(count=n()) %>% arrange(desc(count)) subreddit_analysis = test %>% select(subreddit) %>% summarize(count=n()) %>% filter(count >=10) %>% arrange(desc(count)) ## Reorder to be descending subreddit_analysis$subreddit = factor(subreddit_analysis$subreddit, levels = unique(subreddit_analysis$subreddit)[order(subreddit_analysis$count, decreasing = TRUE)]) plot_ly ( x = subreddit_analysis$subreddit, y = subreddit_analysis$count, type = 'bar', mode = 'markers', title = 'Posts by Subreddit' ) ### Relationship between number of comments and upvotes? comment_analysis1 = reddit %>% group_by(titles,subreddit) %>% summarize(max = max(comment_ratio)) %>% arrange(desc(max)) head(comment_analysis1) datatable(head(comment_analysis1), extensions = 'FixedColumns', options = list( dom = 't', scrollX = TRUE, scrollCollapse = TRUE)) %>% formatStyle('titles', `font-size` = '11px') %>% formatStyle('subreddit', `font-size` = '11px') %>% formatStyle('max', `font-size` = '11px') maxup = reddit %>% group_by(titles) %>% summarize(submit_hour = round(mean(as.numeric(submit_hour)))) head(maxup,25) joined_df = left_join(reddit, maxup, by="titles") head(joined_df) maxup_1 = maxup %>% group_by(submit_hour) %>% summarize(count = n()) g = ggplot(maxup_1, aes(x = submit_hour, y = count)) g+ geom_point() lm = joined_df %>% select(titles, submit_hour.x, subreddit, upvotes) head(lm) gogo = reddit %>% filter(titles == '\'Anti-Vaxxer\' Mom Changes Mind After Her Three Kids Fall Ill') g = ggplot(reddit, aes(x=comments_num, y=upvotes)) g + geom_point() reddit$comments_num reddit[reddit$comments_num == max(reddit$comments_num),] reddit[reddit$upvotes == max(reddit$upvotes),] # Upvotes = Subreddit + submit_time ### boxplot of subreddit ### average life of top10? ### career redditors?
library(ape) library(geiger) ##################### # solve for moments # ##################### get_params_for_var = function(process_var=1,process_kurt=0,frac_of_var=0,halflife=0,decay=0,tip=0) { ret = list() ret$bm = list(sigma.bm=sqrt(process_var)) ret$ou = get_params_OU(process_var,halflife) ret$eb = get_params_EB(process_var,decay) ret$jn = get_params_JN(process_var,process_kurt,1) ret$vg = get_params_VG(process_var,process_kurt,1) ret$nig = get_params_NIG(process_var,process_kurt,1) ret$bmjn = get_params_JN(process_var,process_kurt,frac_of_var) ret$bmvg = get_params_VG(process_var,process_kurt,frac_of_var) ret$bmnig = get_params_NIG(process_var,process_kurt,frac_of_var) ret$tip = tip return(ret) } get_params_JN = function(process_var,process_kurt,frac_of_var) { lambda = 3*frac_of_var^2/process_kurt delta2 = (process_kurt*process_var)/(3*frac_of_var) sigma2 = process_var-frac_of_var*process_var return(list(lambda.jn=lambda, delta.jn=sqrt(delta2), sigma.bm=sqrt(sigma2))) } get_params_VG = function(process_var,process_kurt,frac_of_var) { nu = process_kurt/(3*frac_of_var^2) tau2 = frac_of_var*process_var sigma2 = process_var-frac_of_var*process_var return(list(nu.vg=nu, sigma.vg=sqrt(tau2), sigma.bm=sqrt(sigma2))) } get_params_OU = function(process_var,halflife) { return(list(sigma.bm=sqrt(2*process_var*log(2)/halflife),alpha.ou=log(2)/halflife)) } get_params_EB = function(process_var,decay) { # stationary variance of process is zero as t->Inf return(list(sigma.bm=sqrt(process_var), decay.eb=decay)) } get_params_NIG = function(process_var,process_kurt,frac_of_var) { alpha = sqrt(3/(frac_of_var*process_kurt*process_var)) delta = sqrt(3*frac_of_var*process_var/process_kurt) sigma2 = process_var - frac_of_var*process_var return(list(sigma.bm = sqrt(sigma2), alpha.nig = alpha, delta.nig = delta)) } ################### # compute moments # ################### get_moments_JN = function(lambda.jn, delta.jn, sigma.bm) { v = lambda.jn*delta.jn^2 + sigma.bm^2 k = 3*lambda.jn*delta.jn^4/(sigma.bm^2+lambda.jn*delta.jn^2)^2 return(list(var=v,kurt=k)) } get_moments_VG = function(nu.vg, sigma.vg, sigma.bm) { v = sigma.bm^2+sigma.vg^2 k = 3*nu.vg return(list(var = v, kurt = k)) } get_moments_NIG = function(alpha.nig, delta.nig, sigma.bm) { v = sigma.bm^2 + delta.nig/alpha.nig k = 3*delta.nig/(alpha.nig*(sigma.bm^2*alpha.nig+delta.nig)^2) return(list(var = v, kurt = k)) } ################# # Levy measures # ################# JN_measure = function(x, lambda, delta, log = FALSE) { ret = log(lambda) + dnorm(x,sd=delta,log=TRUE) if (log) { return(ret) } else { return(exp(ret)) } } VG_measure = function(x, nu, sigma, mu = 0 ,log=FALSE) { ret = -log(nu*abs(x)) + mu*x/sigma^2 - sqrt(2/nu-mu^2/sigma^2)/sigma*abs(x) if (log) { return(ret) } else { return(exp(ret)) } } NIG_measure = function(x, alpha, delta, beta = 0, log=FALSE) { ret = log(alpha) + log(delta) - log(pi) - log(abs(x)) + beta*x + log(besselK(alpha*abs(x),1)) if (log) { return(ret) } else { return(exp(ret)) } } ################# # rate of jumps # ################# jump_rate = function(x, levy_measure, ...) { if (x == 0) { total = integrate(levy_measure, -Inf, Inf, ..., stop.on.error=FALSE)$value } else { negative = integrate(levy_measure, -Inf, -x, ..., stop.on.error=FALSE)$value positive = integrate(levy_measure, x, Inf, ..., stop.on.error=FALSE)$value total = negative + positive } return(total) } ########################### # moment from Levy measure# ########################### moment_from_measure = function(k, levy_measure, ...) { integrate(function(y){y^k*levy_measure(y,...)},-Inf,Inf) } ######################### # brlen transformations # ######################### OU.brlen = function(phy,theta=1e-6) { phy = reorder(phy,"postorder") n_tip = phy$Nnode + 1 # get raw ages/times a = branching.times(phy) T = max(a) t = T - a # get OU-scaled ages/times t.ou = 1/(2*theta) * exp(-2*theta*(T-t)) * (1-exp(-2*theta*t)) h.ou = 1/(2*theta) * exp(-2*theta*(T-T)) * (1-exp(-2*theta*T)) a.ou = h.ou - t.ou a.ou = c( rep(0, n_tip), a.ou) # assign OU-scaled times to tree for (i in 1:nrow(phy$edge)) { phy$edge.length[i] = a.ou[phy$edge[i,1]] - a.ou[phy$edge[i,2]] } return(phy) } EB.brlen = function(phy,r=1e-6) { phy = reorder(phy,"postorder") n_tip = length(phy$tip.label) # get raw ages/times a = branching.times(phy) T = max(a) t = T - a t = c(rep(T, n_tip), t) # assign EB-scaled times to tree for (i in 1:nrow(phy$edge)) { t_pa = t[phy$edge[i,1]] t_ch = t[phy$edge[i,2]] dx = exp(r*t_ch) - exp(r*t_pa) phy$edge.length[i] = dx/r } return(phy) } #################### # parameter labels # #################### # parameter labeling format_params = function(p, m) { param_names = c("sigma_bm", "lambda_jn", "delta_jn", "sigma_vg", "nu_vg", "alpha_nig", "delta_nig", "alpha_ou", "decay_eb", "sigma_tip") n_param = length(param_names) x = rep(0, n_param) names(x) = param_names x[n_param] = p[length(p)] if (m=="BM") { x[1]=p[1] } else if (m=="BMJN") { x[1]=p[1] x[2]=p[2] x[3]=p[3] } else if (m=="BMVG") { x[1]=p[1] x[4]=p[2] x[5]=p[3] } else if (m=="BMNIG") { x[1]=p[1] x[6]=p[2] x[7]=p[3] } else if (m=="JN") { x[2]=p[1] x[3]=p[2] } else if (m=="VG") { x[4]=p[1] x[5]=p[2] } else if (m=="NIG") { x[6]=p[1] x[7]=p[2] } else if (m=="OU") { x[1]=p[1] x[8]=p[2] } else if (m=="EB") { x[1]=p[1] x[9]=p[2] } return(x) } ################# # data cleaning # ################# drop.outlier = function(phy,dat,n=1,drop_zero=T,verbose=F) { # expects dat to be a named vector if (!is.vector(dat)) { stop("ERROR: dat is not a vector") } if (is.null(names(dat))) { stop("ERROR: dat does not have names") } to.drop = c() to.drop.zero = c() to.drop.outlier = c() if (drop_zero) { td = drop.zero(phy,dat) phy = td$phy dat = td$dat if (length(td$dropped) > 0) { to.drop.zero = td$dropped } } phy = reorder(phy,'postorder') dat = dat[phy$tip.label] # only drop outliers if n > 0 if (n > 0) { contrast = pic(dat,phy) nodes = order(abs(contrast),decreasing=TRUE)[1:n] nodes = length(phy$tip.label)+nodes for (node in nodes) { clade = extract.clade(phy,node) to.drop.outlier = clade$tip.label } } to.drop = unique(c(to.drop.outlier, to.drop.zero)) #to.drop = unique(to.drop) #cat("to.drop\n") #print(to.drop) #cat("phy$tip.labels\n") #print(phy$tip.label) #print(c(length(to.drop), length(phy$tip.label))) if (length(to.drop) != length(phy$tip.label)) { newPhy = drop.tip(phy, to.drop) newDat = dat[newPhy$tip.label] } else { newPhy = phy newDat = dat[newPhy$tip.label] } if (verbose) { cat("Dropped taxa\n") print(to.drop) cat("Dropped contrasts\n") print(contrast[order(abs(contrast),decreasing=TRUE)[1:n]]) cat("Dropped taxa (drop_zero) =",length(to.drop.zero),"\n") cat("Dropped taxa (drop_outlier) =",length(to.drop.outlier),"\n") cat("\n") } return(list(phy=newPhy,dat=newDat)) } drop.zero = function(phy,dat,eps=0) { #expects dat to be a named vector if (!is.vector(dat)) { stop("ERROR: dat is not a vector") } if (is.null(names(dat))) { stop("ERROR: dat does not have names") } phy = reorder(phy,"postorder") dat = dat[phy$tip.label] bad_nodes = c() pseudo_obs = rep(0,Nnode(phy,internal.only=FALSE)) for (i in 1:length(phy$tip.label)) { pseudo_obs[i] = dat[i] } for (i in seq(1,nrow(phy$edge),2)) { cur_node = phy$edge[i,1] cur_left = phy$edge[i,2] cur_right = phy$edge[i+1,2] t_l = phy$edge.length[i] t_r = phy$edge.length[i+1] contrast = pseudo_obs[cur_left]-pseudo_obs[cur_right] if (!is.nan(contrast) && abs(contrast) <= eps) { bad_nodes = c(bad_nodes,cur_node) } pseudo_obs[cur_node] = t_r*pseudo_obs[cur_left]+t_l*pseudo_obs[cur_right] pseudo_obs[cur_node] = pseudo_obs[cur_node]/(t_l+t_r) } to.drop = c() for (node in bad_nodes) { clade = extract.clade(phy,node) to.drop = c(to.drop,clade$tip.label) } to.drop = unique(to.drop) newPhy = drop.tip(phy,to.drop) newDat = dat[newPhy$tip.label] return(list(phy=newPhy,dat=newDat,bad_nodes=bad_nodes,dropped=to.drop)) } ### plot PIC outliers for BM component of BM+LP pic_outliers = function(phy,dat,sigma_bm) { cur_pic = pic(dat,phy) z_vals = cur_pic p_vals = pnorm(abs(z_vals),sd=sigma_bm,lower.tail=FALSE) return(-log10(p_vals)) } get_p_cols = function(p_vals,phy) { normed = p_vals/max(p_vals) cols_rgb = colorRamp(c("white","red"),bias=100)(normed) cols_hex = apply(cols_rgb,1,function(x){rgb(x[1],x[2],x[3],maxColorValue=255)}) names(cols_hex) = names(p_vals) return(cols_hex) } plot_jumps = function(x,cutoff=-log10(0.05),cex=.5,adj=.5,main="") { if (is.null(x$phy)) stop("x does not contain phy object!") if (is.null(x$dat)) stop("x does not contain dat object!") if (is.null(x$params)) stop("x does not contain params object!") if (!("sigma_bm" %in% names(x$params))) stop("x does not contain sigma_bm parameter!") sigma = x$params["sigma_bm"] dat_p_vals = pic_outliers(x$phy, x$dat, sigma) max_p_val = max(dat_p_vals[dat_p_vals != Inf]) if (any(dat_p_vals==Inf)) { inf_idx = dat_p_vals==Inf dat_p_vals[inf_idx] = 1.5 * max_p_val } filt_p_vals = dat_p_vals[dat_p_vals>=cutoff] p_cols = get_p_cols(filt_p_vals) plot(x$phy,adj=adj,cex=cex,main=main) nodelabels(pch=16,node = as.numeric(names(p_cols)), col=p_cols,frame="circle") invisible(dat_p_vals) }
/R/levy_pruning_tools.r
no_license
Schraiber/pulsR
R
false
false
10,354
r
library(ape) library(geiger) ##################### # solve for moments # ##################### get_params_for_var = function(process_var=1,process_kurt=0,frac_of_var=0,halflife=0,decay=0,tip=0) { ret = list() ret$bm = list(sigma.bm=sqrt(process_var)) ret$ou = get_params_OU(process_var,halflife) ret$eb = get_params_EB(process_var,decay) ret$jn = get_params_JN(process_var,process_kurt,1) ret$vg = get_params_VG(process_var,process_kurt,1) ret$nig = get_params_NIG(process_var,process_kurt,1) ret$bmjn = get_params_JN(process_var,process_kurt,frac_of_var) ret$bmvg = get_params_VG(process_var,process_kurt,frac_of_var) ret$bmnig = get_params_NIG(process_var,process_kurt,frac_of_var) ret$tip = tip return(ret) } get_params_JN = function(process_var,process_kurt,frac_of_var) { lambda = 3*frac_of_var^2/process_kurt delta2 = (process_kurt*process_var)/(3*frac_of_var) sigma2 = process_var-frac_of_var*process_var return(list(lambda.jn=lambda, delta.jn=sqrt(delta2), sigma.bm=sqrt(sigma2))) } get_params_VG = function(process_var,process_kurt,frac_of_var) { nu = process_kurt/(3*frac_of_var^2) tau2 = frac_of_var*process_var sigma2 = process_var-frac_of_var*process_var return(list(nu.vg=nu, sigma.vg=sqrt(tau2), sigma.bm=sqrt(sigma2))) } get_params_OU = function(process_var,halflife) { return(list(sigma.bm=sqrt(2*process_var*log(2)/halflife),alpha.ou=log(2)/halflife)) } get_params_EB = function(process_var,decay) { # stationary variance of process is zero as t->Inf return(list(sigma.bm=sqrt(process_var), decay.eb=decay)) } get_params_NIG = function(process_var,process_kurt,frac_of_var) { alpha = sqrt(3/(frac_of_var*process_kurt*process_var)) delta = sqrt(3*frac_of_var*process_var/process_kurt) sigma2 = process_var - frac_of_var*process_var return(list(sigma.bm = sqrt(sigma2), alpha.nig = alpha, delta.nig = delta)) } ################### # compute moments # ################### get_moments_JN = function(lambda.jn, delta.jn, sigma.bm) { v = lambda.jn*delta.jn^2 + sigma.bm^2 k = 3*lambda.jn*delta.jn^4/(sigma.bm^2+lambda.jn*delta.jn^2)^2 return(list(var=v,kurt=k)) } get_moments_VG = function(nu.vg, sigma.vg, sigma.bm) { v = sigma.bm^2+sigma.vg^2 k = 3*nu.vg return(list(var = v, kurt = k)) } get_moments_NIG = function(alpha.nig, delta.nig, sigma.bm) { v = sigma.bm^2 + delta.nig/alpha.nig k = 3*delta.nig/(alpha.nig*(sigma.bm^2*alpha.nig+delta.nig)^2) return(list(var = v, kurt = k)) } ################# # Levy measures # ################# JN_measure = function(x, lambda, delta, log = FALSE) { ret = log(lambda) + dnorm(x,sd=delta,log=TRUE) if (log) { return(ret) } else { return(exp(ret)) } } VG_measure = function(x, nu, sigma, mu = 0 ,log=FALSE) { ret = -log(nu*abs(x)) + mu*x/sigma^2 - sqrt(2/nu-mu^2/sigma^2)/sigma*abs(x) if (log) { return(ret) } else { return(exp(ret)) } } NIG_measure = function(x, alpha, delta, beta = 0, log=FALSE) { ret = log(alpha) + log(delta) - log(pi) - log(abs(x)) + beta*x + log(besselK(alpha*abs(x),1)) if (log) { return(ret) } else { return(exp(ret)) } } ################# # rate of jumps # ################# jump_rate = function(x, levy_measure, ...) { if (x == 0) { total = integrate(levy_measure, -Inf, Inf, ..., stop.on.error=FALSE)$value } else { negative = integrate(levy_measure, -Inf, -x, ..., stop.on.error=FALSE)$value positive = integrate(levy_measure, x, Inf, ..., stop.on.error=FALSE)$value total = negative + positive } return(total) } ########################### # moment from Levy measure# ########################### moment_from_measure = function(k, levy_measure, ...) { integrate(function(y){y^k*levy_measure(y,...)},-Inf,Inf) } ######################### # brlen transformations # ######################### OU.brlen = function(phy,theta=1e-6) { phy = reorder(phy,"postorder") n_tip = phy$Nnode + 1 # get raw ages/times a = branching.times(phy) T = max(a) t = T - a # get OU-scaled ages/times t.ou = 1/(2*theta) * exp(-2*theta*(T-t)) * (1-exp(-2*theta*t)) h.ou = 1/(2*theta) * exp(-2*theta*(T-T)) * (1-exp(-2*theta*T)) a.ou = h.ou - t.ou a.ou = c( rep(0, n_tip), a.ou) # assign OU-scaled times to tree for (i in 1:nrow(phy$edge)) { phy$edge.length[i] = a.ou[phy$edge[i,1]] - a.ou[phy$edge[i,2]] } return(phy) } EB.brlen = function(phy,r=1e-6) { phy = reorder(phy,"postorder") n_tip = length(phy$tip.label) # get raw ages/times a = branching.times(phy) T = max(a) t = T - a t = c(rep(T, n_tip), t) # assign EB-scaled times to tree for (i in 1:nrow(phy$edge)) { t_pa = t[phy$edge[i,1]] t_ch = t[phy$edge[i,2]] dx = exp(r*t_ch) - exp(r*t_pa) phy$edge.length[i] = dx/r } return(phy) } #################### # parameter labels # #################### # parameter labeling format_params = function(p, m) { param_names = c("sigma_bm", "lambda_jn", "delta_jn", "sigma_vg", "nu_vg", "alpha_nig", "delta_nig", "alpha_ou", "decay_eb", "sigma_tip") n_param = length(param_names) x = rep(0, n_param) names(x) = param_names x[n_param] = p[length(p)] if (m=="BM") { x[1]=p[1] } else if (m=="BMJN") { x[1]=p[1] x[2]=p[2] x[3]=p[3] } else if (m=="BMVG") { x[1]=p[1] x[4]=p[2] x[5]=p[3] } else if (m=="BMNIG") { x[1]=p[1] x[6]=p[2] x[7]=p[3] } else if (m=="JN") { x[2]=p[1] x[3]=p[2] } else if (m=="VG") { x[4]=p[1] x[5]=p[2] } else if (m=="NIG") { x[6]=p[1] x[7]=p[2] } else if (m=="OU") { x[1]=p[1] x[8]=p[2] } else if (m=="EB") { x[1]=p[1] x[9]=p[2] } return(x) } ################# # data cleaning # ################# drop.outlier = function(phy,dat,n=1,drop_zero=T,verbose=F) { # expects dat to be a named vector if (!is.vector(dat)) { stop("ERROR: dat is not a vector") } if (is.null(names(dat))) { stop("ERROR: dat does not have names") } to.drop = c() to.drop.zero = c() to.drop.outlier = c() if (drop_zero) { td = drop.zero(phy,dat) phy = td$phy dat = td$dat if (length(td$dropped) > 0) { to.drop.zero = td$dropped } } phy = reorder(phy,'postorder') dat = dat[phy$tip.label] # only drop outliers if n > 0 if (n > 0) { contrast = pic(dat,phy) nodes = order(abs(contrast),decreasing=TRUE)[1:n] nodes = length(phy$tip.label)+nodes for (node in nodes) { clade = extract.clade(phy,node) to.drop.outlier = clade$tip.label } } to.drop = unique(c(to.drop.outlier, to.drop.zero)) #to.drop = unique(to.drop) #cat("to.drop\n") #print(to.drop) #cat("phy$tip.labels\n") #print(phy$tip.label) #print(c(length(to.drop), length(phy$tip.label))) if (length(to.drop) != length(phy$tip.label)) { newPhy = drop.tip(phy, to.drop) newDat = dat[newPhy$tip.label] } else { newPhy = phy newDat = dat[newPhy$tip.label] } if (verbose) { cat("Dropped taxa\n") print(to.drop) cat("Dropped contrasts\n") print(contrast[order(abs(contrast),decreasing=TRUE)[1:n]]) cat("Dropped taxa (drop_zero) =",length(to.drop.zero),"\n") cat("Dropped taxa (drop_outlier) =",length(to.drop.outlier),"\n") cat("\n") } return(list(phy=newPhy,dat=newDat)) } drop.zero = function(phy,dat,eps=0) { #expects dat to be a named vector if (!is.vector(dat)) { stop("ERROR: dat is not a vector") } if (is.null(names(dat))) { stop("ERROR: dat does not have names") } phy = reorder(phy,"postorder") dat = dat[phy$tip.label] bad_nodes = c() pseudo_obs = rep(0,Nnode(phy,internal.only=FALSE)) for (i in 1:length(phy$tip.label)) { pseudo_obs[i] = dat[i] } for (i in seq(1,nrow(phy$edge),2)) { cur_node = phy$edge[i,1] cur_left = phy$edge[i,2] cur_right = phy$edge[i+1,2] t_l = phy$edge.length[i] t_r = phy$edge.length[i+1] contrast = pseudo_obs[cur_left]-pseudo_obs[cur_right] if (!is.nan(contrast) && abs(contrast) <= eps) { bad_nodes = c(bad_nodes,cur_node) } pseudo_obs[cur_node] = t_r*pseudo_obs[cur_left]+t_l*pseudo_obs[cur_right] pseudo_obs[cur_node] = pseudo_obs[cur_node]/(t_l+t_r) } to.drop = c() for (node in bad_nodes) { clade = extract.clade(phy,node) to.drop = c(to.drop,clade$tip.label) } to.drop = unique(to.drop) newPhy = drop.tip(phy,to.drop) newDat = dat[newPhy$tip.label] return(list(phy=newPhy,dat=newDat,bad_nodes=bad_nodes,dropped=to.drop)) } ### plot PIC outliers for BM component of BM+LP pic_outliers = function(phy,dat,sigma_bm) { cur_pic = pic(dat,phy) z_vals = cur_pic p_vals = pnorm(abs(z_vals),sd=sigma_bm,lower.tail=FALSE) return(-log10(p_vals)) } get_p_cols = function(p_vals,phy) { normed = p_vals/max(p_vals) cols_rgb = colorRamp(c("white","red"),bias=100)(normed) cols_hex = apply(cols_rgb,1,function(x){rgb(x[1],x[2],x[3],maxColorValue=255)}) names(cols_hex) = names(p_vals) return(cols_hex) } plot_jumps = function(x,cutoff=-log10(0.05),cex=.5,adj=.5,main="") { if (is.null(x$phy)) stop("x does not contain phy object!") if (is.null(x$dat)) stop("x does not contain dat object!") if (is.null(x$params)) stop("x does not contain params object!") if (!("sigma_bm" %in% names(x$params))) stop("x does not contain sigma_bm parameter!") sigma = x$params["sigma_bm"] dat_p_vals = pic_outliers(x$phy, x$dat, sigma) max_p_val = max(dat_p_vals[dat_p_vals != Inf]) if (any(dat_p_vals==Inf)) { inf_idx = dat_p_vals==Inf dat_p_vals[inf_idx] = 1.5 * max_p_val } filt_p_vals = dat_p_vals[dat_p_vals>=cutoff] p_cols = get_p_cols(filt_p_vals) plot(x$phy,adj=adj,cex=cex,main=main) nodelabels(pch=16,node = as.numeric(names(p_cols)), col=p_cols,frame="circle") invisible(dat_p_vals) }
# Dependencies library(tidyverse) library(igraph) library(ggraph) library(boot) library(ergm) # Load socio matrix load('data/socio_matrix.RData') # Load datasets users <- read_csv('data/users.csv', col_types='ncnccccclcnnn') %>% filter(!is.na(id)) %>% distinct friendship <- read_csv('data/friendship.csv', col_types='cc') positions <- read_csv('data/positions.csv', col_types='cnccnnc') # Keep only friendship with valid users friendship <- friendship %>% inner_join(users %>% select(id), by=c('from'='id')) %>% inner_join(users %>% select(id), by=c('to'='id')) # Get positions for each univeristy user positions <- positions %>% select(id=id, name=university, country, location, lat, lng) %>% mutate(id=as.character(id)) %>% distinct # Remove duplicated positions positions <- positions %>% group_by(id) %>% slice(1) %>% ungroup # Add position info to university data users <- users %>% select(-location, -country) %>% left_join(positions, by=c('id'='id')) # Create igraph from adjacency matrix net.full = graph_from_adjacency_matrix(socio.matrix, mode='directed', weighted=NULL, diag=F) net.full # Show network info str(net.full) # Network analysis E(net.full) # Show edges V(net.full) # Show vertices # Compute and show edge density edge_density(net.full) # Add degree column to users users <- users %>% add_column(degree=degree(net.full)) # Get users which have no friends (i.e. overall degree 0) v.isolated <- which(degree(net.full) < 1) str(v.isolated) # Remove isolated vertices net = delete_vertices(net.full, v.isolated) net # Outdegree analysis (sociability?) # Add outdegree column to users users <- users %>% add_column(out_degree = apply(socio.matrix, 1, sum)) # Show outdegree ranking barplot ggplot(data=users, aes(x=reorder(twitter_name, -out_degree), y=out_degree, fill=out_degree)) + geom_bar(stat='identity') + theme(axis.text.x = element_text(angle=90, size=6)) + theme(legend.position = 'none') + labs(x='University', y='Outdegree') # Comment on the plot: units with highest outdegree seem to be the less known ones # Show outdegree histogram ggplot(data=users, aes(x=out_degree, y=..density.., fill=1)) + geom_histogram(bins=30) + theme(legend.position = 'none') + labs(x='Outdegree', y='Frequency') # Indegree analysis (leader) # Add indegree column to users users <- users %>% add_column(in_degree = apply(socio.matrix, 2, sum)) # Show indegree ranking braplot ggplot(data=users, aes(x=reorder(twitter_name, -in_degree), y=in_degree, fill=in_degree)) + geom_bar(stat='identity') + theme(axis.text.x = element_text(angle=90, size=6)) + theme(legend.position = 'none') + labs(x='University', y='Indegree') # About the plot: most followed universities seem to be the better known ones # Show indegree histogram ggplot(data=users, aes(x=in_degree, y=..density.., fill=1)) + geom_histogram(bins=30) + theme(legend.position = 'none') + labs(x='Indegree', y='Frequency') # Joint outdegree and indegree analysis (is the outdegree inversely proportional to indegree?) # Show scatter plot of indegree and outdegree ggplot(data=users, aes(x=out_degree, y=in_degree, colour=twitter_name, label=twitter_name)) + geom_point() + geom_text(aes(label=twitter_name), hjust=-0.2, vjust=0, size=3) + theme(legend.position = 'none') + labs(x='Outdegree', y='Indegree') # Compute distribution of indegree indeg.dist <- users %>% select(in_degree) %>% count(in_degree) %>% mutate(f=n/sum(n)) # Compute distribution of indegree outdeg.dist <- users %>% select(out_degree) %>% count(out_degree) %>% mutate(f=n/sum(n)) # Plot degree distribution ggplot(data=NULL) + geom_point(data=indeg.dist, aes(x=in_degree, y=f, color=as.factor('In'))) + geom_point(data=outdeg.dist, aes(x=out_degree, y=f, color=as.factor('Out'))) + labs(x='Degree', y='Frequency', color='Degree type') + theme(legend.position=c(.91, .91)) # Plot degree distribution ggplot(data=NULL) + geom_point(data=indeg.dist, aes(x=log10(in_degree), y=log10(f), color=as.factor('In'))) + geom_point(data=outdeg.dist, aes(x=log10(out_degree), y=log10(f), color=as.factor('Out'))) + labs(x='Degree', y='Frequency', color='Degree type') + theme(legend.position=c(.91, .91)) # Plot cumulative degree distribution ggplot(data=NULL) + geom_point(data=indeg.dist, aes(x=log10(in_degree), y=cumsum(log10(f)), color=as.factor('In'))) + geom_point(data=outdeg.dist, aes(x=log10(out_degree), y=cumsum(log10(f)), color=as.factor('Out'))) + labs(x='Degree', y='Frequency', color='Degree type') + theme(legend.position=c(.91, .91)) # Check shortest paths # Note that these shortest paths take into account directionality shortest_paths(net.full, from='39585367') # From harvard shortest_paths(net.full, from='562781948') # From UCT_news # Other measures # Compute and add betweenness score to users table users <- users %>% add_column(betweenness = sqrt(betweenness(net.full))) # Show betweenness histogram ggplot(data=users, aes(x=reorder(twitter_name, -betweenness), y=betweenness, fill=betweenness)) + geom_bar(stat='identity') + theme(axis.text.x=element_text(angle=90, size=6)) + theme(legend.position='none') + labs(x='University', y='Betweenness') # Compute diameter diameter(net.full) # # Plot network # plot(net.full, edge.arrow.size=0.2, vertex.label=NA) # plot(net.full, layout=layout.fruchterman.reingold, vertex.label=NA) # Inference # SRG MODEL # Evaluation of the model is carried out through parametric bootstrap # (As seen in the slides) # Define of a function for calculating standard error std.error <- function(x) { # Get the mean of x x.mu <- mean(x) # Get squared deviations x.sq <- sqrt(sum((x-x.mu)**2)/length(x)) # Return squared deviation return(x.sq) } # Get standard errors of outdegree and indegree std.error(users %>% select(out_degree) %>% pull) # Outdegree std.error(users %>% select(in_degree) %>% pull) # Indegree # # scrivo la funzione per calcolare lo std error di outDegree e inDegree # stat <- function(data) # { # out.deg <- apply(data, 1,sum, na.rm= T) # in.deg <- apply(data, 2,sum, na.rm = T) # out.mu <- mean(out.deg) # in.mu <- mean(in.deg) # out.sq <- sqrt(sum((out.deg-out.mu)**2)/length(data[,1])) # in.sq <- sqrt(sum((in.deg-in.mu)**2)/length(data[,1])) # c(in.sq,out.sq) # } # stat(socio.matrix) # scrivo la funzione per generare la rete secondo RGM # in pratica genero 199*199 bernuolli indipendenti di probabilit? la densit? della rete # e le salvo in una matrice # Define a simple random graph (SRG) random graph model (RGM) # It is composed of N*N i.i.d. Bernoulli, wose probabilities are given by network density # It takes as input the adjacency matrix and a probability rgm.gen <- function(socio.matrix, prob) { # Define size of the adjacency matrix socio.matrix.dim <- dim(socio.matrix) # Define number of nodes n.nodes <- socio.matrix.dim[1] # Define number of edges (directed graph) n.edges <- n.nodes ** 2 # Create new adjacency matrix out.matrix <- matrix(data=rbinom(n=n.edges, size=1, prob=prob), ncol=n.nodes) diag(out.matrix) <- NA # Remove elements on the diagonal # Return the newly generated matrix return(out.matrix) } # Define statistic function for SRG rgm.stat <- function(socio.matrix) { # Get outdegree and indegree out.deg <- apply(socio.matrix, 1, sum, na.rm=T) in.deg <- apply(socio.matrix, 2, sum, na.rm=T) # Compute standard errors out.std.error <- std.error(out.deg) in.std.error <- std.error(in.deg) # Return computed standard errors return(c(in.std.error, out.std.error)) } # Compute paraemtrix bootstrap (using boot(...) function) R <- 10**4 # Define number of replicates mu <- mean(socio.matrix) # Define mean mu srg.boot <- boot(data=socio.matrix, statistic=rgm.stat, R=R, sim='parametric', ran.gen=rgm.gen, mle=mu) # Save the bootstrapped model save(srg.boot, file='data/models/srg_bootstrap') # Analysis of the distribution of the standard error statistic for indegree, under SRG hypotesis ggplot(data=NULL, aes(x=srg.boot$t[, 1], y=..density.., fill=1)) + geom_histogram(bins=200) + geom_vline(xintercept=srg.boot$t0[1], color=2) + theme(legend.position='none') + labs(title='Bootstrapped indegree coefficients distribution', x='Bootstrapped coefficients', y='Density') # head(srg.boot$t) # hist(srg.boot$t[,1], nclass = 50, xlim = c(2,16)) # abline(v=srg.boot$t0[1],col=2) # Analysis of the distribution of the standard error statistic for outdegree, under SRG hypotesis ggplot(data=NULL, aes(x=srg.boot$t[, 2], y=..density.., fill=1)) + geom_histogram(bins=200) + geom_vline(xintercept=srg.boot$t0[2], color=2) + theme(legend.position='none') + labs(title='Bootstrapped outdegree coefficients distribution', x='Bootstrapped coefficients', y='Density') # hist(boot.SRG$t[,2], nclass = 50, xlim = c(2,16)) # abline(v=boot.SRG$t0[2],col=2) # mean(boot.SRG$t[,2]>boot.SRG$t0[2]) # per quello che vale # Conclusions: data seem to sightly refuse the hypotesis of the network boing a simple random graph (SRG). # Nodes show much higher enerogeneity either in indegree and outdegree coefficeints # ANOVA model # bisogna classificare come fattori indivuduali sia la socialit? che l'attrativit? # Initialize new sociomatrix Y <- socio.matrix # Copy sociomatrix by value diag(Y) <- NA # Set values on the diagonal to NA # Define latent factors # Rows matrix: each cell contains row index row.matrix <- matrix(data=(1:nrow(Y)), nrow=nrow(Y), ncol=nrow(Y)) # Columns matrix: each cell contains column index col.matrix <- t(row.matrix) # Just transpose previous matrix # Ridx[1:4,1:4] # Cidx[1:4,1:4] # # Y[1:4,1:4] # Vectorization y <- c(Y) # Vectorize sociomatrix row.v <- c(row.matrix) # Vectorize rows matrix col.v <- c(col.matrix) # vectorize columns matrix row.v[1:20] # Fit logistic regression fit.rce.nocent <- glm(y ~ factor(row.v) + factor(col.v), family=binomial) # Check fit summary summary(fit.rce.nocent) summary(glm(y~1)) # RCE ha troppo adattamento # Save the model save(fit.rce.nocent, file='data/models/rce_nocent') # Estimation of the model in which individual factors are related to deviation from the mean # C(...) function creates the contrasts fit.rce.cent <- glm(y ~ C(factor(row.v), sum) + C(factor(col.v), sum) , family=binomial) # Check fit summary summary(fit.rce.cent) # Save the model save(fit.rce.cent, file='data/models/rce_cent') # Individual effects mu.hat <- fit.rce.cent$coef[1] # Estimated mean # Estimate a a.hat <- fit.rce.cent$coef[1 + 1:(nrow(Y) - 1)] a.hat <- c(a.hat, -sum(a.hat)) summary(a.hat) # Summarize estimated a # Estimate b b.hat <- fit.rce.cent$coef[nrow(Y) + 1:(nrow(Y) - 1)] b.hat <- c(b.hat, -sum(b.hat)) summary(b.hat) # Summarize estimated b # Compute estimated probabilities for every cell mu.ij.mle <- mu.hat + outer(a.hat, b.hat, '+') # Mu ij maximum likelihood estimation p.mle <- exp(mu.ij.mle) / (1 + exp(mu.ij.mle)) diag(p.mle) <- NA # Analysis of the inferential model through parameteric bootstrap # First: check if generator for RGM is still working for this model rgm.gen(Y, p.mle) # Define statistics function for RCE rce.stat <- function(socio.matrix) { # Compute standard error for out- and in- degree rgm.out <- rgm.stat(socio.matrix) out.std.error <- rgm.out[2] in.std.error <- rgm.out[1] # Add mutual dyads (useful for independecy of y variables evaluation) conc <- (socio.matrix == t(socio.matrix)) mu.dy <- sum(conc[socio.matrix == 1], na.rm=T) / 2 # Return standard errors and mutual dyads return(c(in.std.error, out.std.error, mu.dy)) } # stat(Y) # # controllo che rangen1 funzioni anche per questo modello # ran.gen1(Y,p.mle) # # tutto ok # Compute bootstrapped model R <- 10**4 # Replicates rce.boot <- boot(data=socio.matrix, statistic=rce.stat, R=R, sim='parametric', ran.gen=rgm.gen, mle=p.mle) # Save the bootstrapped model save(rce.boot, file='data/models/rce_bootstrap') # Indegree statistic: analysis of the standard error distribution, under the RCE hypotesis ggplot(data=NULL, aes(x=rce.boot$t[, 1], y=..density.., fill=1)) + geom_histogram(bins=200) + geom_vline(xintercept=rce.boot$t0[1], color=2) + theme(legend.position='none') + labs(title='Bootstrapped indegree coefficients distribution', x='Bootstrapped coefficients', y='Density') # Bilateral p-value rce.in.pval <- 2 * mean(rce.boot$t[, 1]< rce.boot$t0[1]) rce.in.pval # Outdegree statistic: analysis of the standard error distribution, under the RCE hypotesis ggplot(data=NULL, aes(x=rce.boot$t[, 2], y=..density.., fill=1)) + geom_histogram(bins=200) + geom_vline(xintercept=rce.boot$t0[2], color=2) + theme(legend.position='none') + labs(title='Bootstrapped outdegree coefficients distribution', x='Bootstrapped coefficients', y='Density') # Bilateral p-value rce.out.pval <- 2 * mean(rce.boot$t[, 2] < rce.boot$t0[2]) rce.out.pval # Mutual dyads statistic: analysis of the standard error distribution, under the RCE hypotesis ggplot(data=NULL, aes(x=rce.boot$t[, 3], y=..density.., fill=1)) + geom_histogram(bins=200) + geom_vline(xintercept=rce.boot$t0[3], color=2) + theme(legend.position='none') + labs(title='Bootstrapped mutual dyads coefficients distribution', x='Bootstrapped coefficients', y='Density') # Conclusions: outdegree and indegree are very well modelled by RCE model. # However, it turns out from mutual dyads distribution that there are some dependency # effects which are still not considered and which must therefore be investigated. # More complex model: Exponentially parametrized Random Graph Model (ERGM) # This model takes into account either some covariates # Add some covariates country <- (users %>% select(country) %>% pull) is_usa <- !is.na(country) & country == 'US' ranking <- c(rep('top25', 25), rep('top50', 25), rep('top100', 50), rep('top225', 223 - (25 + 25 + 50))) # Define number of followers followers <- users %>% select(followers_count) %>% pull # Define number of friends friends <- users %>% select(friends_count) %>% pull # Define if user is verified verified <- users %>% select(verified) %>% pull # cbind(rownames(socio.matrix), usa) rankA <- c(rep(1,25),rep(0,200)) rankB <- c(rep(0,25),rep(1,25),rep(0,175)) rankC <- c( rep(0,50), rep(1,50), rep(0,125)) rankD <- c( rep(0,100),rep(1,125)) # New network object ergm.net <- as.network(socio.matrix[-v.isolated,-v.isolated]) diag(socio.matrix) <- NA set.vertex.attribute(ergm.net, 'is_usa', is_usa[-v.isolated]) set.vertex.attribute(ergm.net, 'ranking', ranking[-v.isolated]) set.vertex.attribute(ergm.net, 'followers', followers[-v.isolated]) set.vertex.attribute(ergm.net, 'friends', friends[-v.isolated]) set.vertex.attribute(ergm.net, 'verified', verified[-v.isolated]) set.vertex.attribute(ergm.net, 'ranking', ranking[-v.isolated]) # creo anche questi perch? vado meglio a capire che contrasti fare set.vertex.attribute(ergm.net, 'rank25', rankA[-v.isolated]) set.vertex.attribute(ergm.net, 'rank50', rankB[-v.isolated]) set.vertex.attribute(ergm.net, 'rank100', rankC[-v.isolated]) set.vertex.attribute(ergm.net, 'rank225', rankD[-v.isolated]) # Fit ergm equivalent to logistic regression (independece of y) fit.ergm.0 <- ergm(ergm.net ~ edges) # AIC: 21982 (very high) summary(fit.ergm.0) # Fit new ergm model fit.ergm.1 <- ergm(ergm.net ~ edges + nodeofactor('is_usa') + nodeifactor('is_usa') + nodeofactor('ranking', levels=c('top25', 'top50', 'top100')) + nodeifactor('ranking', levels=c('top25', 'top50', 'top100')) + nodematch('is_usa') + nodematch('ranking')) # AIC: 20445 (still too high) summary(fit.ergm.1) # RCE effects modeled through ERGM fit.ergm.2 <- ergm(ergm.net ~ edges + sender + receiver) # AIC: 23357 summary(fit.ergm.2) # Trying new models to lower AIC score fit.ergm.3 <- ergm(ergm.net ~ edges + sender) summary(fit.ergm.3) # AIC: 25298 fit.ergm.4 <- ergm(ergm.net ~ edges + receiver) summary(fit.ergm.4) # AIC: 20506 # Model with outdegree and indegree fit.ergm.5 <- ergm(ergm.net ~ edges + mutual) summary(fit.ergm.5) # AIC: 20404 # Best model at this time fit.ergm.6 <- ergm(ergm.net ~ edges + mutual + nodeofactor('is_usa') + nodeifactor('is_usa') + nodematch('is_usa') + nodeocov('friends') + nodeicov('friends') + nodeocov('followers') + nodeicov('followers') + nodeocov('verified') + nodeicov('verified') + nodematch('verified') + nodeofactor('ranking', levels=c('top25', 'top50', 'top100')) + nodeifactor('ranking', levels=c('top25', 'top50', 'top100')) + nodemix('ranking', levels=c('top25', 'top50', 'top100')), control = control.ergm(MCMLE.maxit = 30)) summary(fit.ergm.6) # AIC # Save model to disk save(fit.ergm.6, file='data/models/ergm_6') # Search for a relationship between ranking, outdegree and indegree # Add ranking attribute to user users <- users %>% add_column(ranking=1:(dim(users)[1])) # Plot outdegree with respect to ranking position ggplot(users, aes(x=reorder(twitter_name, ranking), y=out_degree, fill=1)) + geom_bar(stat='identity') + theme(axis.text.x=element_text(angle=90, size=6)) + theme(legend.position='none') + labs(x='University', y='Outdegree') # Plot indegree with respect to ranking position ggplot(users, aes(x=reorder(twitter_name, ranking), y=in_degree, fill=1)) + geom_bar(stat='identity') + theme(axis.text.x=element_text(angle=90, size=6)) + theme(legend.position='none') + labs(x='University', y='Indegree') # Get out degree array out.degree <- users %>% select(out_degree) %>% pull # Show quantiles quantile(out.degree, probs=c(0.25,0.5,0.75,0.90)) # stampo le universit? che hanno un outdegree che supera il 75% percentile # Show universities whose outdegree exceeds the 75-th percentile outliers <- (out.degree > 20) outliers # There are 25% circa of anomal # commento outdegree: ? evidente come vi sia circa un 25% di ossservazioni anomale # va indagato il perch? fit.ergm.7 <- ergm(ergm.net ~ edges + mutual+ + nodeocov('is_usa') + nodeicov('is_usa') + nodematch('is_usa') + nodeocov('friends') + nodeicov('friends') + nodeocov('followers') + nodeicov('followers') + nodeocov('verified') + nodeicov('verified') + nodematch('verified') + nodeocov('rank25') + nodeicov('rank25')+nodematch('rank25', levels=1) + nodeocov('rank50') + nodeicov('rank50')+nodematch('rank50', levels=1) + nodeocov('rank100') + nodeicov('rank100')+nodematch('rank100', levels=1) + nodematch('rank225', levels=1), control = control.ergm(MCMLE.maxit = 30,MCMC.samplesize = 2048) ) # AIC: 17099 summary(fit.ergm.7) # Commento del modello # Questo modello sembra il migliore e offre anche delle interpretazioni interessanti # le universit? tendono a seguire di pi? se hanno un renking scarso mentre vengono seguite # di pi? le universit? pi? famose. Stesso discorso tra americane e non # scrivo la funzione per generare dal modello stimato # NB data=socio.matrix fit invece deve essere l'ergm che vogliomo testare ergm.gen <- function(socio.matrix, fit) { # Compute new matrix out.matrix <- as.matrix(simulate(fit)[1:nrow(socio.matrix), 1:nrow(socio.matrix)]) diag(out.matrix) <- NA # return computed matrix return(out.matrix) } # apply(ran.gen2(socio.matrix,fit.ergm7),1,sum,na.rm=T) # Faccio il bootstrap parametrico utilizzando la funzione boot R <- 200 # Number of replicates ergm.boot <- boot(data=socio.matrix, statistic=rce.stat, R=R, sim='parametric', ran.gen=ergm.gen, mle=fit.ergm.7) ergm.boot$t0 # Save the model to disk save(ergm.boot, file='data/models/ergm_bootstrap') # i warnigns erano previsti # Indegree statistic: analysis of the standard error distribution, under the ERGM hypotesis ggplot(data=NULL, aes(x=ergm.boot$t[, 1], y=..density.., fill=1)) + geom_histogram(bins=30) + geom_vline(xintercept=ergm.boot$t0[1], color=2) + theme(legend.position='none') + labs(title='Bootstrapped indegree coefficients distribution', x='Bootstrapped coefficients', y='Density') # Bilateral p-value ergm.in.pval <- 2 * mean(ergm.boot$t[, 1]< ergm.boot$t0[1]) ergm.in.pval # Outdegree statistic: analysis of the standard error distribution, under the ERGM hypotesis ggplot(data=NULL, aes(x=ergm.boot$t[, 2], y=..density.., fill=1)) + geom_histogram(bins=30) + geom_vline(xintercept=ergm.boot$t0[2], color=2) + theme(legend.position='none') + labs(title='Bootstrapped outdegree coefficients distribution', x='Bootstrapped coefficients', y='Density') # Bilateral p-value ergm.out.pval <- 2 * mean(ergm.boot$t[, 2]< ergm.boot$t0[2]) ergm.out.pval # Mutual dyads statistic: analysis of the standard error distribution, under the ERGM hypotesis ggplot(data=NULL, aes(x=ergm.boot$t[, 3], y=..density.., fill=1)) + geom_histogram(bins=30) + geom_vline(xintercept=ergm.boot$t0[3], color=2) + theme(legend.position='none') + labs(title='Bootstrapped mutual dyads coefficients distribution', x='Bootstrapped coefficients', y='Density') # Commento: in realt? il risultato ? ancora abbastanza insoddisfacente in outdegree e indegree
/inference.R
permissive
damiclem/smhdd_network
R
false
false
21,415
r
# Dependencies library(tidyverse) library(igraph) library(ggraph) library(boot) library(ergm) # Load socio matrix load('data/socio_matrix.RData') # Load datasets users <- read_csv('data/users.csv', col_types='ncnccccclcnnn') %>% filter(!is.na(id)) %>% distinct friendship <- read_csv('data/friendship.csv', col_types='cc') positions <- read_csv('data/positions.csv', col_types='cnccnnc') # Keep only friendship with valid users friendship <- friendship %>% inner_join(users %>% select(id), by=c('from'='id')) %>% inner_join(users %>% select(id), by=c('to'='id')) # Get positions for each univeristy user positions <- positions %>% select(id=id, name=university, country, location, lat, lng) %>% mutate(id=as.character(id)) %>% distinct # Remove duplicated positions positions <- positions %>% group_by(id) %>% slice(1) %>% ungroup # Add position info to university data users <- users %>% select(-location, -country) %>% left_join(positions, by=c('id'='id')) # Create igraph from adjacency matrix net.full = graph_from_adjacency_matrix(socio.matrix, mode='directed', weighted=NULL, diag=F) net.full # Show network info str(net.full) # Network analysis E(net.full) # Show edges V(net.full) # Show vertices # Compute and show edge density edge_density(net.full) # Add degree column to users users <- users %>% add_column(degree=degree(net.full)) # Get users which have no friends (i.e. overall degree 0) v.isolated <- which(degree(net.full) < 1) str(v.isolated) # Remove isolated vertices net = delete_vertices(net.full, v.isolated) net # Outdegree analysis (sociability?) # Add outdegree column to users users <- users %>% add_column(out_degree = apply(socio.matrix, 1, sum)) # Show outdegree ranking barplot ggplot(data=users, aes(x=reorder(twitter_name, -out_degree), y=out_degree, fill=out_degree)) + geom_bar(stat='identity') + theme(axis.text.x = element_text(angle=90, size=6)) + theme(legend.position = 'none') + labs(x='University', y='Outdegree') # Comment on the plot: units with highest outdegree seem to be the less known ones # Show outdegree histogram ggplot(data=users, aes(x=out_degree, y=..density.., fill=1)) + geom_histogram(bins=30) + theme(legend.position = 'none') + labs(x='Outdegree', y='Frequency') # Indegree analysis (leader) # Add indegree column to users users <- users %>% add_column(in_degree = apply(socio.matrix, 2, sum)) # Show indegree ranking braplot ggplot(data=users, aes(x=reorder(twitter_name, -in_degree), y=in_degree, fill=in_degree)) + geom_bar(stat='identity') + theme(axis.text.x = element_text(angle=90, size=6)) + theme(legend.position = 'none') + labs(x='University', y='Indegree') # About the plot: most followed universities seem to be the better known ones # Show indegree histogram ggplot(data=users, aes(x=in_degree, y=..density.., fill=1)) + geom_histogram(bins=30) + theme(legend.position = 'none') + labs(x='Indegree', y='Frequency') # Joint outdegree and indegree analysis (is the outdegree inversely proportional to indegree?) # Show scatter plot of indegree and outdegree ggplot(data=users, aes(x=out_degree, y=in_degree, colour=twitter_name, label=twitter_name)) + geom_point() + geom_text(aes(label=twitter_name), hjust=-0.2, vjust=0, size=3) + theme(legend.position = 'none') + labs(x='Outdegree', y='Indegree') # Compute distribution of indegree indeg.dist <- users %>% select(in_degree) %>% count(in_degree) %>% mutate(f=n/sum(n)) # Compute distribution of indegree outdeg.dist <- users %>% select(out_degree) %>% count(out_degree) %>% mutate(f=n/sum(n)) # Plot degree distribution ggplot(data=NULL) + geom_point(data=indeg.dist, aes(x=in_degree, y=f, color=as.factor('In'))) + geom_point(data=outdeg.dist, aes(x=out_degree, y=f, color=as.factor('Out'))) + labs(x='Degree', y='Frequency', color='Degree type') + theme(legend.position=c(.91, .91)) # Plot degree distribution ggplot(data=NULL) + geom_point(data=indeg.dist, aes(x=log10(in_degree), y=log10(f), color=as.factor('In'))) + geom_point(data=outdeg.dist, aes(x=log10(out_degree), y=log10(f), color=as.factor('Out'))) + labs(x='Degree', y='Frequency', color='Degree type') + theme(legend.position=c(.91, .91)) # Plot cumulative degree distribution ggplot(data=NULL) + geom_point(data=indeg.dist, aes(x=log10(in_degree), y=cumsum(log10(f)), color=as.factor('In'))) + geom_point(data=outdeg.dist, aes(x=log10(out_degree), y=cumsum(log10(f)), color=as.factor('Out'))) + labs(x='Degree', y='Frequency', color='Degree type') + theme(legend.position=c(.91, .91)) # Check shortest paths # Note that these shortest paths take into account directionality shortest_paths(net.full, from='39585367') # From harvard shortest_paths(net.full, from='562781948') # From UCT_news # Other measures # Compute and add betweenness score to users table users <- users %>% add_column(betweenness = sqrt(betweenness(net.full))) # Show betweenness histogram ggplot(data=users, aes(x=reorder(twitter_name, -betweenness), y=betweenness, fill=betweenness)) + geom_bar(stat='identity') + theme(axis.text.x=element_text(angle=90, size=6)) + theme(legend.position='none') + labs(x='University', y='Betweenness') # Compute diameter diameter(net.full) # # Plot network # plot(net.full, edge.arrow.size=0.2, vertex.label=NA) # plot(net.full, layout=layout.fruchterman.reingold, vertex.label=NA) # Inference # SRG MODEL # Evaluation of the model is carried out through parametric bootstrap # (As seen in the slides) # Define of a function for calculating standard error std.error <- function(x) { # Get the mean of x x.mu <- mean(x) # Get squared deviations x.sq <- sqrt(sum((x-x.mu)**2)/length(x)) # Return squared deviation return(x.sq) } # Get standard errors of outdegree and indegree std.error(users %>% select(out_degree) %>% pull) # Outdegree std.error(users %>% select(in_degree) %>% pull) # Indegree # # scrivo la funzione per calcolare lo std error di outDegree e inDegree # stat <- function(data) # { # out.deg <- apply(data, 1,sum, na.rm= T) # in.deg <- apply(data, 2,sum, na.rm = T) # out.mu <- mean(out.deg) # in.mu <- mean(in.deg) # out.sq <- sqrt(sum((out.deg-out.mu)**2)/length(data[,1])) # in.sq <- sqrt(sum((in.deg-in.mu)**2)/length(data[,1])) # c(in.sq,out.sq) # } # stat(socio.matrix) # scrivo la funzione per generare la rete secondo RGM # in pratica genero 199*199 bernuolli indipendenti di probabilit? la densit? della rete # e le salvo in una matrice # Define a simple random graph (SRG) random graph model (RGM) # It is composed of N*N i.i.d. Bernoulli, wose probabilities are given by network density # It takes as input the adjacency matrix and a probability rgm.gen <- function(socio.matrix, prob) { # Define size of the adjacency matrix socio.matrix.dim <- dim(socio.matrix) # Define number of nodes n.nodes <- socio.matrix.dim[1] # Define number of edges (directed graph) n.edges <- n.nodes ** 2 # Create new adjacency matrix out.matrix <- matrix(data=rbinom(n=n.edges, size=1, prob=prob), ncol=n.nodes) diag(out.matrix) <- NA # Remove elements on the diagonal # Return the newly generated matrix return(out.matrix) } # Define statistic function for SRG rgm.stat <- function(socio.matrix) { # Get outdegree and indegree out.deg <- apply(socio.matrix, 1, sum, na.rm=T) in.deg <- apply(socio.matrix, 2, sum, na.rm=T) # Compute standard errors out.std.error <- std.error(out.deg) in.std.error <- std.error(in.deg) # Return computed standard errors return(c(in.std.error, out.std.error)) } # Compute paraemtrix bootstrap (using boot(...) function) R <- 10**4 # Define number of replicates mu <- mean(socio.matrix) # Define mean mu srg.boot <- boot(data=socio.matrix, statistic=rgm.stat, R=R, sim='parametric', ran.gen=rgm.gen, mle=mu) # Save the bootstrapped model save(srg.boot, file='data/models/srg_bootstrap') # Analysis of the distribution of the standard error statistic for indegree, under SRG hypotesis ggplot(data=NULL, aes(x=srg.boot$t[, 1], y=..density.., fill=1)) + geom_histogram(bins=200) + geom_vline(xintercept=srg.boot$t0[1], color=2) + theme(legend.position='none') + labs(title='Bootstrapped indegree coefficients distribution', x='Bootstrapped coefficients', y='Density') # head(srg.boot$t) # hist(srg.boot$t[,1], nclass = 50, xlim = c(2,16)) # abline(v=srg.boot$t0[1],col=2) # Analysis of the distribution of the standard error statistic for outdegree, under SRG hypotesis ggplot(data=NULL, aes(x=srg.boot$t[, 2], y=..density.., fill=1)) + geom_histogram(bins=200) + geom_vline(xintercept=srg.boot$t0[2], color=2) + theme(legend.position='none') + labs(title='Bootstrapped outdegree coefficients distribution', x='Bootstrapped coefficients', y='Density') # hist(boot.SRG$t[,2], nclass = 50, xlim = c(2,16)) # abline(v=boot.SRG$t0[2],col=2) # mean(boot.SRG$t[,2]>boot.SRG$t0[2]) # per quello che vale # Conclusions: data seem to sightly refuse the hypotesis of the network boing a simple random graph (SRG). # Nodes show much higher enerogeneity either in indegree and outdegree coefficeints # ANOVA model # bisogna classificare come fattori indivuduali sia la socialit? che l'attrativit? # Initialize new sociomatrix Y <- socio.matrix # Copy sociomatrix by value diag(Y) <- NA # Set values on the diagonal to NA # Define latent factors # Rows matrix: each cell contains row index row.matrix <- matrix(data=(1:nrow(Y)), nrow=nrow(Y), ncol=nrow(Y)) # Columns matrix: each cell contains column index col.matrix <- t(row.matrix) # Just transpose previous matrix # Ridx[1:4,1:4] # Cidx[1:4,1:4] # # Y[1:4,1:4] # Vectorization y <- c(Y) # Vectorize sociomatrix row.v <- c(row.matrix) # Vectorize rows matrix col.v <- c(col.matrix) # vectorize columns matrix row.v[1:20] # Fit logistic regression fit.rce.nocent <- glm(y ~ factor(row.v) + factor(col.v), family=binomial) # Check fit summary summary(fit.rce.nocent) summary(glm(y~1)) # RCE ha troppo adattamento # Save the model save(fit.rce.nocent, file='data/models/rce_nocent') # Estimation of the model in which individual factors are related to deviation from the mean # C(...) function creates the contrasts fit.rce.cent <- glm(y ~ C(factor(row.v), sum) + C(factor(col.v), sum) , family=binomial) # Check fit summary summary(fit.rce.cent) # Save the model save(fit.rce.cent, file='data/models/rce_cent') # Individual effects mu.hat <- fit.rce.cent$coef[1] # Estimated mean # Estimate a a.hat <- fit.rce.cent$coef[1 + 1:(nrow(Y) - 1)] a.hat <- c(a.hat, -sum(a.hat)) summary(a.hat) # Summarize estimated a # Estimate b b.hat <- fit.rce.cent$coef[nrow(Y) + 1:(nrow(Y) - 1)] b.hat <- c(b.hat, -sum(b.hat)) summary(b.hat) # Summarize estimated b # Compute estimated probabilities for every cell mu.ij.mle <- mu.hat + outer(a.hat, b.hat, '+') # Mu ij maximum likelihood estimation p.mle <- exp(mu.ij.mle) / (1 + exp(mu.ij.mle)) diag(p.mle) <- NA # Analysis of the inferential model through parameteric bootstrap # First: check if generator for RGM is still working for this model rgm.gen(Y, p.mle) # Define statistics function for RCE rce.stat <- function(socio.matrix) { # Compute standard error for out- and in- degree rgm.out <- rgm.stat(socio.matrix) out.std.error <- rgm.out[2] in.std.error <- rgm.out[1] # Add mutual dyads (useful for independecy of y variables evaluation) conc <- (socio.matrix == t(socio.matrix)) mu.dy <- sum(conc[socio.matrix == 1], na.rm=T) / 2 # Return standard errors and mutual dyads return(c(in.std.error, out.std.error, mu.dy)) } # stat(Y) # # controllo che rangen1 funzioni anche per questo modello # ran.gen1(Y,p.mle) # # tutto ok # Compute bootstrapped model R <- 10**4 # Replicates rce.boot <- boot(data=socio.matrix, statistic=rce.stat, R=R, sim='parametric', ran.gen=rgm.gen, mle=p.mle) # Save the bootstrapped model save(rce.boot, file='data/models/rce_bootstrap') # Indegree statistic: analysis of the standard error distribution, under the RCE hypotesis ggplot(data=NULL, aes(x=rce.boot$t[, 1], y=..density.., fill=1)) + geom_histogram(bins=200) + geom_vline(xintercept=rce.boot$t0[1], color=2) + theme(legend.position='none') + labs(title='Bootstrapped indegree coefficients distribution', x='Bootstrapped coefficients', y='Density') # Bilateral p-value rce.in.pval <- 2 * mean(rce.boot$t[, 1]< rce.boot$t0[1]) rce.in.pval # Outdegree statistic: analysis of the standard error distribution, under the RCE hypotesis ggplot(data=NULL, aes(x=rce.boot$t[, 2], y=..density.., fill=1)) + geom_histogram(bins=200) + geom_vline(xintercept=rce.boot$t0[2], color=2) + theme(legend.position='none') + labs(title='Bootstrapped outdegree coefficients distribution', x='Bootstrapped coefficients', y='Density') # Bilateral p-value rce.out.pval <- 2 * mean(rce.boot$t[, 2] < rce.boot$t0[2]) rce.out.pval # Mutual dyads statistic: analysis of the standard error distribution, under the RCE hypotesis ggplot(data=NULL, aes(x=rce.boot$t[, 3], y=..density.., fill=1)) + geom_histogram(bins=200) + geom_vline(xintercept=rce.boot$t0[3], color=2) + theme(legend.position='none') + labs(title='Bootstrapped mutual dyads coefficients distribution', x='Bootstrapped coefficients', y='Density') # Conclusions: outdegree and indegree are very well modelled by RCE model. # However, it turns out from mutual dyads distribution that there are some dependency # effects which are still not considered and which must therefore be investigated. # More complex model: Exponentially parametrized Random Graph Model (ERGM) # This model takes into account either some covariates # Add some covariates country <- (users %>% select(country) %>% pull) is_usa <- !is.na(country) & country == 'US' ranking <- c(rep('top25', 25), rep('top50', 25), rep('top100', 50), rep('top225', 223 - (25 + 25 + 50))) # Define number of followers followers <- users %>% select(followers_count) %>% pull # Define number of friends friends <- users %>% select(friends_count) %>% pull # Define if user is verified verified <- users %>% select(verified) %>% pull # cbind(rownames(socio.matrix), usa) rankA <- c(rep(1,25),rep(0,200)) rankB <- c(rep(0,25),rep(1,25),rep(0,175)) rankC <- c( rep(0,50), rep(1,50), rep(0,125)) rankD <- c( rep(0,100),rep(1,125)) # New network object ergm.net <- as.network(socio.matrix[-v.isolated,-v.isolated]) diag(socio.matrix) <- NA set.vertex.attribute(ergm.net, 'is_usa', is_usa[-v.isolated]) set.vertex.attribute(ergm.net, 'ranking', ranking[-v.isolated]) set.vertex.attribute(ergm.net, 'followers', followers[-v.isolated]) set.vertex.attribute(ergm.net, 'friends', friends[-v.isolated]) set.vertex.attribute(ergm.net, 'verified', verified[-v.isolated]) set.vertex.attribute(ergm.net, 'ranking', ranking[-v.isolated]) # creo anche questi perch? vado meglio a capire che contrasti fare set.vertex.attribute(ergm.net, 'rank25', rankA[-v.isolated]) set.vertex.attribute(ergm.net, 'rank50', rankB[-v.isolated]) set.vertex.attribute(ergm.net, 'rank100', rankC[-v.isolated]) set.vertex.attribute(ergm.net, 'rank225', rankD[-v.isolated]) # Fit ergm equivalent to logistic regression (independece of y) fit.ergm.0 <- ergm(ergm.net ~ edges) # AIC: 21982 (very high) summary(fit.ergm.0) # Fit new ergm model fit.ergm.1 <- ergm(ergm.net ~ edges + nodeofactor('is_usa') + nodeifactor('is_usa') + nodeofactor('ranking', levels=c('top25', 'top50', 'top100')) + nodeifactor('ranking', levels=c('top25', 'top50', 'top100')) + nodematch('is_usa') + nodematch('ranking')) # AIC: 20445 (still too high) summary(fit.ergm.1) # RCE effects modeled through ERGM fit.ergm.2 <- ergm(ergm.net ~ edges + sender + receiver) # AIC: 23357 summary(fit.ergm.2) # Trying new models to lower AIC score fit.ergm.3 <- ergm(ergm.net ~ edges + sender) summary(fit.ergm.3) # AIC: 25298 fit.ergm.4 <- ergm(ergm.net ~ edges + receiver) summary(fit.ergm.4) # AIC: 20506 # Model with outdegree and indegree fit.ergm.5 <- ergm(ergm.net ~ edges + mutual) summary(fit.ergm.5) # AIC: 20404 # Best model at this time fit.ergm.6 <- ergm(ergm.net ~ edges + mutual + nodeofactor('is_usa') + nodeifactor('is_usa') + nodematch('is_usa') + nodeocov('friends') + nodeicov('friends') + nodeocov('followers') + nodeicov('followers') + nodeocov('verified') + nodeicov('verified') + nodematch('verified') + nodeofactor('ranking', levels=c('top25', 'top50', 'top100')) + nodeifactor('ranking', levels=c('top25', 'top50', 'top100')) + nodemix('ranking', levels=c('top25', 'top50', 'top100')), control = control.ergm(MCMLE.maxit = 30)) summary(fit.ergm.6) # AIC # Save model to disk save(fit.ergm.6, file='data/models/ergm_6') # Search for a relationship between ranking, outdegree and indegree # Add ranking attribute to user users <- users %>% add_column(ranking=1:(dim(users)[1])) # Plot outdegree with respect to ranking position ggplot(users, aes(x=reorder(twitter_name, ranking), y=out_degree, fill=1)) + geom_bar(stat='identity') + theme(axis.text.x=element_text(angle=90, size=6)) + theme(legend.position='none') + labs(x='University', y='Outdegree') # Plot indegree with respect to ranking position ggplot(users, aes(x=reorder(twitter_name, ranking), y=in_degree, fill=1)) + geom_bar(stat='identity') + theme(axis.text.x=element_text(angle=90, size=6)) + theme(legend.position='none') + labs(x='University', y='Indegree') # Get out degree array out.degree <- users %>% select(out_degree) %>% pull # Show quantiles quantile(out.degree, probs=c(0.25,0.5,0.75,0.90)) # stampo le universit? che hanno un outdegree che supera il 75% percentile # Show universities whose outdegree exceeds the 75-th percentile outliers <- (out.degree > 20) outliers # There are 25% circa of anomal # commento outdegree: ? evidente come vi sia circa un 25% di ossservazioni anomale # va indagato il perch? fit.ergm.7 <- ergm(ergm.net ~ edges + mutual+ + nodeocov('is_usa') + nodeicov('is_usa') + nodematch('is_usa') + nodeocov('friends') + nodeicov('friends') + nodeocov('followers') + nodeicov('followers') + nodeocov('verified') + nodeicov('verified') + nodematch('verified') + nodeocov('rank25') + nodeicov('rank25')+nodematch('rank25', levels=1) + nodeocov('rank50') + nodeicov('rank50')+nodematch('rank50', levels=1) + nodeocov('rank100') + nodeicov('rank100')+nodematch('rank100', levels=1) + nodematch('rank225', levels=1), control = control.ergm(MCMLE.maxit = 30,MCMC.samplesize = 2048) ) # AIC: 17099 summary(fit.ergm.7) # Commento del modello # Questo modello sembra il migliore e offre anche delle interpretazioni interessanti # le universit? tendono a seguire di pi? se hanno un renking scarso mentre vengono seguite # di pi? le universit? pi? famose. Stesso discorso tra americane e non # scrivo la funzione per generare dal modello stimato # NB data=socio.matrix fit invece deve essere l'ergm che vogliomo testare ergm.gen <- function(socio.matrix, fit) { # Compute new matrix out.matrix <- as.matrix(simulate(fit)[1:nrow(socio.matrix), 1:nrow(socio.matrix)]) diag(out.matrix) <- NA # return computed matrix return(out.matrix) } # apply(ran.gen2(socio.matrix,fit.ergm7),1,sum,na.rm=T) # Faccio il bootstrap parametrico utilizzando la funzione boot R <- 200 # Number of replicates ergm.boot <- boot(data=socio.matrix, statistic=rce.stat, R=R, sim='parametric', ran.gen=ergm.gen, mle=fit.ergm.7) ergm.boot$t0 # Save the model to disk save(ergm.boot, file='data/models/ergm_bootstrap') # i warnigns erano previsti # Indegree statistic: analysis of the standard error distribution, under the ERGM hypotesis ggplot(data=NULL, aes(x=ergm.boot$t[, 1], y=..density.., fill=1)) + geom_histogram(bins=30) + geom_vline(xintercept=ergm.boot$t0[1], color=2) + theme(legend.position='none') + labs(title='Bootstrapped indegree coefficients distribution', x='Bootstrapped coefficients', y='Density') # Bilateral p-value ergm.in.pval <- 2 * mean(ergm.boot$t[, 1]< ergm.boot$t0[1]) ergm.in.pval # Outdegree statistic: analysis of the standard error distribution, under the ERGM hypotesis ggplot(data=NULL, aes(x=ergm.boot$t[, 2], y=..density.., fill=1)) + geom_histogram(bins=30) + geom_vline(xintercept=ergm.boot$t0[2], color=2) + theme(legend.position='none') + labs(title='Bootstrapped outdegree coefficients distribution', x='Bootstrapped coefficients', y='Density') # Bilateral p-value ergm.out.pval <- 2 * mean(ergm.boot$t[, 2]< ergm.boot$t0[2]) ergm.out.pval # Mutual dyads statistic: analysis of the standard error distribution, under the ERGM hypotesis ggplot(data=NULL, aes(x=ergm.boot$t[, 3], y=..density.., fill=1)) + geom_histogram(bins=30) + geom_vline(xintercept=ergm.boot$t0[3], color=2) + theme(legend.position='none') + labs(title='Bootstrapped mutual dyads coefficients distribution', x='Bootstrapped coefficients', y='Density') # Commento: in realt? il risultato ? ancora abbastanza insoddisfacente in outdegree e indegree
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/tracked_environment.R \name{force_push} \alias{force_push} \title{Force push a tracked environment to a given commit.} \usage{ force_push(env, commit) } \arguments{ \item{env}{tracked_environment.} \item{commit}{integer or character. If character, the commit with this name will be attempted for the force push. If there are multiple commits with this same name, a warning will be issued.} } \description{ Forcing pushing means restoring a tracked environment to what it looked like as of that commit. You can force push with either the commit index or the name of the commit. } \examples{ env <- tracked_environment() env$x <- 1 commit(env) <- 'first commit' env$y <- 2 commit(env) <- 'second commit' force_push(env, 'first commit') # equivalent to force_push(env, 1) stopifnot(identical(as.list(environment(env)), list(x = 1))) }
/man/force_push.Rd
permissive
robertzk/objectdiff
R
false
true
912
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/tracked_environment.R \name{force_push} \alias{force_push} \title{Force push a tracked environment to a given commit.} \usage{ force_push(env, commit) } \arguments{ \item{env}{tracked_environment.} \item{commit}{integer or character. If character, the commit with this name will be attempted for the force push. If there are multiple commits with this same name, a warning will be issued.} } \description{ Forcing pushing means restoring a tracked environment to what it looked like as of that commit. You can force push with either the commit index or the name of the commit. } \examples{ env <- tracked_environment() env$x <- 1 commit(env) <- 'first commit' env$y <- 2 commit(env) <- 'second commit' force_push(env, 'first commit') # equivalent to force_push(env, 1) stopifnot(identical(as.list(environment(env)), list(x = 1))) }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/rangevoting.R \name{proportional} \alias{proportional} \title{Proportional system calculator} \usage{ proportional(seats, counter) } \arguments{ \item{seats}{"Number of seats assigned to the analyzed costituency"} \item{counter}{"Named vector of total parliament seats won by each party in previously analyzed costituencies"} } \value{ Updated counter of parliament seats for each party } \description{ Reads a csv file containing votes for each party in a costituency and assigns the corresponding number of seats } \examples{ seats = 10 counter = c(1,2,3,0,4,1) #in the used dataset, 6 parties are running in the election counter = proportional(seats,counter) }
/man/proportional.Rd
no_license
unimi-dse/1a62ddfc
R
false
true
744
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/rangevoting.R \name{proportional} \alias{proportional} \title{Proportional system calculator} \usage{ proportional(seats, counter) } \arguments{ \item{seats}{"Number of seats assigned to the analyzed costituency"} \item{counter}{"Named vector of total parliament seats won by each party in previously analyzed costituencies"} } \value{ Updated counter of parliament seats for each party } \description{ Reads a csv file containing votes for each party in a costituency and assigns the corresponding number of seats } \examples{ seats = 10 counter = c(1,2,3,0,4,1) #in the used dataset, 6 parties are running in the election counter = proportional(seats,counter) }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/empiricalCopula.R \docType{class} \name{empiricalCopula-class} \alias{empiricalCopula-class} \alias{dim,empiricalCopula-method} \title{Empirical Copula class (virtual mother class)} \usage{ \S4method{dim}{empiricalCopula}(x) } \arguments{ \item{x}{ConvexCombCopula object} } \description{ Empirical Copula class (virtual mother class) } \section{Methods (by generic)}{ \itemize{ \item \code{dim}: dimension }} \section{Slots}{ \describe{ \item{\code{pseudo_data}}{matrix : pseudo_data that the empirical copula is based on.} }}
/man/empiricalCopula-class.Rd
permissive
ayotoasset/empCop
R
false
true
636
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/empiricalCopula.R \docType{class} \name{empiricalCopula-class} \alias{empiricalCopula-class} \alias{dim,empiricalCopula-method} \title{Empirical Copula class (virtual mother class)} \usage{ \S4method{dim}{empiricalCopula}(x) } \arguments{ \item{x}{ConvexCombCopula object} } \description{ Empirical Copula class (virtual mother class) } \section{Methods (by generic)}{ \itemize{ \item \code{dim}: dimension }} \section{Slots}{ \describe{ \item{\code{pseudo_data}}{matrix : pseudo_data that the empirical copula is based on.} }}
library(KSD) ### Name: scorefunctiongmm ### Title: Score function for given GMM : calculates score function ### dlogp(x)/dx for a given Gaussian Mixture Model ### Aliases: scorefunctiongmm ### ** Examples # Compute score for a given gaussianmixture model and dataset model <- gmm() X <- rgmm(model) score <- scorefunctiongmm(model=model, X=X)
/data/genthat_extracted_code/KSD/examples/scorefunctiongmm.Rd.R
no_license
surayaaramli/typeRrh
R
false
false
352
r
library(KSD) ### Name: scorefunctiongmm ### Title: Score function for given GMM : calculates score function ### dlogp(x)/dx for a given Gaussian Mixture Model ### Aliases: scorefunctiongmm ### ** Examples # Compute score for a given gaussianmixture model and dataset model <- gmm() X <- rgmm(model) score <- scorefunctiongmm(model=model, X=X)
setMethod('fit', signature(object='FLsz'), function(object, package="FLsz", exeNm="seine", dir=tempdir(),...) runExe(object, package, exeNm, dir,...)) writeFn=function(object,exeNm="seine") { ## Data ObsLength = object@obs SampleSize = object@n ObsLength[ is.na(ObsLength)]=mean(ObsLength,na.rm=T) SampleSize[is.na(SampleSize)]=0 ## Parameters nbreaks= dim(object@params)[1]/2 -1 zguess =array(c(object@bounds[,"initial"][1:(nbreaks+1)], object@bounds[, "phase"][1:(nbreaks+1)]),c(nbreaks+1,2)) yguess =array(c(object@bounds[,"initial"][1+nbreaks+(1:(nbreaks))], object@bounds[, "phase"][1+nbreaks+(1:(nbreaks))]),c(nbreaks,2)) sigma =rev(object@bounds[,"initial"])[1] ## Growth dimnames(object@grw)$params = tolower(dimnames(object@grw)$params) KParm = object@grw["k"] LInf = object@grw["linf"] Lc = object@grw["lc"] ## Output data file dat<- list("Number of Breaks" =nbreaks, "First Year of Data" =range(object)["minyear"], "Last Year of Data" =range(object)["maxyear"], "(1,NYears Observed Mean Lengths)" =ObsLength, "(1,NYears Observed Sample Sizes)" =SampleSize, "VB K Parameter" =KParm, "VB L-Infinity" =LInf, "Critical Length - Length at first capture" =Lc, "(1,NBreaks+1,1,2)" =c(t(zguess)), "(1,NBreaks, 1,2)" =c(t(yguess)), "sigma" =sigma, "stepsize" =5, "casenum" =10) writeADMB(dat,paste(exeNm,".dat",sep="")) # ctl file ctl <- object@bounds ctl[,2:4] <- log(ctl[,2:4]) ctl <- alply(ctl,1) names(ctl) <- dimnames(object@bounds)$params writeADMB(ctl,paste(exeNm,".ctl",sep="")) # prr file prr <- object@priors prr <- alply(prr,1) names(prr) <- dimnames(object@priors)$params writeADMB(prr,paste(exeNm,".prr",sep=""))} readFn=function(object,i,exeNm="seine") { rep=readADMB(paste(exeNm,".rep",sep="")) std=read.table(paste(exeNm,".std",sep=""),skip=1)[,-1] names(std)=c("param","value","sd") params(object)[,i]=bounds(object)[,"initial"] params(object)[bounds(object)[,"phase"]>0,i]=std[,"value"] object@se[,i]= 0 object@se[object@bounds[,"phase"]>0,i]=std[,"sd"] object@hat[,,,,,i] =FLQuant(rep$hatLen, dimnames=dimnames(iter(object@obs,i))) object@residuals[,,,,,i]=FLQuant(rep$Residuals,dimnames=dimnames(iter(object@obs,i))) # object@vcov =rep$vcov # object@hessian =rep$hessian # object@logLik =rep$logLik # object@rsdlvar =rep$rsdlVar # object@dof =rep$dof # object@stopmess=rep$stopmess # object@aic =rep$aic return(object)} runExe=function(object, package="FLszS", exeNm="seine", dir=tempdir(),cmdOps=paste("-maxfn 500"),...){ ##### set up temp dir with exe for data files # Linux if (R.version$os=="linux-gnu") { # executable exe <- paste(system.file("bin", "linux", package=package, mustWork=TRUE),exeNm, sep="/") file.copy(exe, dir) path <- paste(dir, "/", sep="") # Windows } else if (.Platform$OS.type == "windows") { # executable exe <- paste(system.file("bin", "windows", package=package, mustWork=TRUE), paste(exeNm, ".exe", sep=""), sep="/") file.copy(exe, dir) path <- paste(dir, "\\", sep="") # Mac OSX # or fail! }else stop() oldwd <- getwd() # change wd to avoid exe case bug setwd(dir) nits=dims(object)$iter if (dims( se( object))$iter==1) se(object)=propagate( se( object),nits) if (dims(params( object))$iter==1) params(object)=propagate(params( object),nits) if (dims( hat( object))$iter==1) hat(object)=propagate( hat( object),nits) if (dims(residuals(object))$iter==1) residuals(object)=propagate(residuals(object),nits) if (nits>1) nits=seq(nits) for (i in nits){ # create exe input files do.call("writeFn", list(object=iter(object,i))) # run system(paste("./", exeNm, sep="")) # read exe output files object=readFn(object=object,i=i) } setwd(oldwd) if (chkJK(object)) attributes(object)$jacknife=TRUE residuals(object)[is.na(obs(object))]=NA return(object)}
/R/runExe.R
no_license
laurieKell/FLsz
R
false
false
5,014
r
setMethod('fit', signature(object='FLsz'), function(object, package="FLsz", exeNm="seine", dir=tempdir(),...) runExe(object, package, exeNm, dir,...)) writeFn=function(object,exeNm="seine") { ## Data ObsLength = object@obs SampleSize = object@n ObsLength[ is.na(ObsLength)]=mean(ObsLength,na.rm=T) SampleSize[is.na(SampleSize)]=0 ## Parameters nbreaks= dim(object@params)[1]/2 -1 zguess =array(c(object@bounds[,"initial"][1:(nbreaks+1)], object@bounds[, "phase"][1:(nbreaks+1)]),c(nbreaks+1,2)) yguess =array(c(object@bounds[,"initial"][1+nbreaks+(1:(nbreaks))], object@bounds[, "phase"][1+nbreaks+(1:(nbreaks))]),c(nbreaks,2)) sigma =rev(object@bounds[,"initial"])[1] ## Growth dimnames(object@grw)$params = tolower(dimnames(object@grw)$params) KParm = object@grw["k"] LInf = object@grw["linf"] Lc = object@grw["lc"] ## Output data file dat<- list("Number of Breaks" =nbreaks, "First Year of Data" =range(object)["minyear"], "Last Year of Data" =range(object)["maxyear"], "(1,NYears Observed Mean Lengths)" =ObsLength, "(1,NYears Observed Sample Sizes)" =SampleSize, "VB K Parameter" =KParm, "VB L-Infinity" =LInf, "Critical Length - Length at first capture" =Lc, "(1,NBreaks+1,1,2)" =c(t(zguess)), "(1,NBreaks, 1,2)" =c(t(yguess)), "sigma" =sigma, "stepsize" =5, "casenum" =10) writeADMB(dat,paste(exeNm,".dat",sep="")) # ctl file ctl <- object@bounds ctl[,2:4] <- log(ctl[,2:4]) ctl <- alply(ctl,1) names(ctl) <- dimnames(object@bounds)$params writeADMB(ctl,paste(exeNm,".ctl",sep="")) # prr file prr <- object@priors prr <- alply(prr,1) names(prr) <- dimnames(object@priors)$params writeADMB(prr,paste(exeNm,".prr",sep=""))} readFn=function(object,i,exeNm="seine") { rep=readADMB(paste(exeNm,".rep",sep="")) std=read.table(paste(exeNm,".std",sep=""),skip=1)[,-1] names(std)=c("param","value","sd") params(object)[,i]=bounds(object)[,"initial"] params(object)[bounds(object)[,"phase"]>0,i]=std[,"value"] object@se[,i]= 0 object@se[object@bounds[,"phase"]>0,i]=std[,"sd"] object@hat[,,,,,i] =FLQuant(rep$hatLen, dimnames=dimnames(iter(object@obs,i))) object@residuals[,,,,,i]=FLQuant(rep$Residuals,dimnames=dimnames(iter(object@obs,i))) # object@vcov =rep$vcov # object@hessian =rep$hessian # object@logLik =rep$logLik # object@rsdlvar =rep$rsdlVar # object@dof =rep$dof # object@stopmess=rep$stopmess # object@aic =rep$aic return(object)} runExe=function(object, package="FLszS", exeNm="seine", dir=tempdir(),cmdOps=paste("-maxfn 500"),...){ ##### set up temp dir with exe for data files # Linux if (R.version$os=="linux-gnu") { # executable exe <- paste(system.file("bin", "linux", package=package, mustWork=TRUE),exeNm, sep="/") file.copy(exe, dir) path <- paste(dir, "/", sep="") # Windows } else if (.Platform$OS.type == "windows") { # executable exe <- paste(system.file("bin", "windows", package=package, mustWork=TRUE), paste(exeNm, ".exe", sep=""), sep="/") file.copy(exe, dir) path <- paste(dir, "\\", sep="") # Mac OSX # or fail! }else stop() oldwd <- getwd() # change wd to avoid exe case bug setwd(dir) nits=dims(object)$iter if (dims( se( object))$iter==1) se(object)=propagate( se( object),nits) if (dims(params( object))$iter==1) params(object)=propagate(params( object),nits) if (dims( hat( object))$iter==1) hat(object)=propagate( hat( object),nits) if (dims(residuals(object))$iter==1) residuals(object)=propagate(residuals(object),nits) if (nits>1) nits=seq(nits) for (i in nits){ # create exe input files do.call("writeFn", list(object=iter(object,i))) # run system(paste("./", exeNm, sep="")) # read exe output files object=readFn(object=object,i=i) } setwd(oldwd) if (chkJK(object)) attributes(object)$jacknife=TRUE residuals(object)[is.na(obs(object))]=NA return(object)}
# region data layers---- # a list of possible id fields used in datalayers (most will use rgn_id, but not always) layers_id_fields <- c('rgn_id') # the official list of regions (and corresponding names) layer_region_labels <- 'rgn_labels' # the official ocean areas of each region (used to weight each subregions contribution to the region score) layer_region_areas <- 'rgn_area' # pressures & resilience matrices ---- # For goals with elements (e.g., for coastal protection: mangrove, saltmarsh, seagrass), these data layers describe how to # weight the contribution of each goal element to calculate the final goal pressure and resilience dimensions. resilience_element <- list('CS' = 'element_wts_cs_km2_x_storage' , 'CP' = 'element_wts_cp_km2_x_protection', 'HAB' = 'element_wts_hab_pres_abs') pressures_element <- list('CS' = 'element_wts_cs_km2_x_storage' , 'CP' = 'element_wts_cp_km2_x_protection' , 'HAB' = 'element_wts_hab_pres_abs') # constants pressures_gamma <- 0.5 # The relative importance of social vs. ecological pressures (pressure = gamma * ecological + (1-gamma) * social) resilience_gamma <- 0.5 # The relative importance of social vs. ecological resiliences (resilience = gamma * ecological + (1-gamma) * social) goal_discount <- 1.0 # Used to calculate likely future state goal_beta <- 0.67 # The relative importance of trend vs. pressure/resilience on likely future state; if goal_beta = 0.67, trend is twice as important as pressure/resilience. default_trend <- 0 #### NOTE: can we delete the following information??? # map configuration map_lat <- 0; map_lon <- 0; map_zoom <- 3 # extra descriptions not covered by goals.description or layers.description, used in ohigui index_description <- 'The overall Index represents the weighted average of all goal scores.' dimension_descriptions <- c('score' = 'This dimension is an average of the current status and likely future.', 'status' = 'This dimension represents the current value of a goal or sub-goal relative to its reference point.', 'future' = 'For this dimension, the likely future is calculated as the projected status in 5 years, informed by the current status, continued trend, inflected upwards by resilience and downwards by pressures.', 'trend' = 'This dimension represents the recent change in the value of the status. Unlike all other dimensions which range in value from 0 to 100, the trend ranges from -1 to 1, representing the steepest declines to increases respectively.', 'pressures' = 'This dimension represents the anthropogenic stressors that negatively affect the ability of a goal to be delivered to people. Pressures can affect either ecological or social (i.e. human) systems.', 'resilience' = 'This dimension represents the social, institutional, and ecological factors that positively affect the ability of a goal to be delivered to people.')
/calc_ohibc/master/config_master.R
no_license
seifertjenny/ohibc
R
false
false
3,162
r
# region data layers---- # a list of possible id fields used in datalayers (most will use rgn_id, but not always) layers_id_fields <- c('rgn_id') # the official list of regions (and corresponding names) layer_region_labels <- 'rgn_labels' # the official ocean areas of each region (used to weight each subregions contribution to the region score) layer_region_areas <- 'rgn_area' # pressures & resilience matrices ---- # For goals with elements (e.g., for coastal protection: mangrove, saltmarsh, seagrass), these data layers describe how to # weight the contribution of each goal element to calculate the final goal pressure and resilience dimensions. resilience_element <- list('CS' = 'element_wts_cs_km2_x_storage' , 'CP' = 'element_wts_cp_km2_x_protection', 'HAB' = 'element_wts_hab_pres_abs') pressures_element <- list('CS' = 'element_wts_cs_km2_x_storage' , 'CP' = 'element_wts_cp_km2_x_protection' , 'HAB' = 'element_wts_hab_pres_abs') # constants pressures_gamma <- 0.5 # The relative importance of social vs. ecological pressures (pressure = gamma * ecological + (1-gamma) * social) resilience_gamma <- 0.5 # The relative importance of social vs. ecological resiliences (resilience = gamma * ecological + (1-gamma) * social) goal_discount <- 1.0 # Used to calculate likely future state goal_beta <- 0.67 # The relative importance of trend vs. pressure/resilience on likely future state; if goal_beta = 0.67, trend is twice as important as pressure/resilience. default_trend <- 0 #### NOTE: can we delete the following information??? # map configuration map_lat <- 0; map_lon <- 0; map_zoom <- 3 # extra descriptions not covered by goals.description or layers.description, used in ohigui index_description <- 'The overall Index represents the weighted average of all goal scores.' dimension_descriptions <- c('score' = 'This dimension is an average of the current status and likely future.', 'status' = 'This dimension represents the current value of a goal or sub-goal relative to its reference point.', 'future' = 'For this dimension, the likely future is calculated as the projected status in 5 years, informed by the current status, continued trend, inflected upwards by resilience and downwards by pressures.', 'trend' = 'This dimension represents the recent change in the value of the status. Unlike all other dimensions which range in value from 0 to 100, the trend ranges from -1 to 1, representing the steepest declines to increases respectively.', 'pressures' = 'This dimension represents the anthropogenic stressors that negatively affect the ability of a goal to be delivered to people. Pressures can affect either ecological or social (i.e. human) systems.', 'resilience' = 'This dimension represents the social, institutional, and ecological factors that positively affect the ability of a goal to be delivered to people.')
#' @title shift 5' ends #' @description shift the GAlignmentsLists by 5' ends. #' All reads aligning to the positive strand will be offset by +4bp, #' and all reads aligning to the negative strand will be offset -5bp by default. #' @param gal An object of \link[GenomicAlignments]{GAlignmentsList}. #' @param positive integer(1). the size to be shift for positive strand #' @param negative integer(1). the size to be shift for negative strand #' @return An object of \link[GenomicAlignments]{GAlignments} with 5' end #' shifted reads. #' @author Jianhong Ou #' @export #' @import S4Vectors #' @import GenomicRanges #' @examples #' bamfile <- system.file("extdata", "GL1.bam", package="ATACseqQC") #' tags <- c("AS", "XN", "XM", "XO", "XG", "NM", "MD", "YS", "YT") #' library(BSgenome.Hsapiens.UCSC.hg19) #' which <- as(seqinfo(Hsapiens)["chr1"], "GRanges") #' gal <- readBamFile(bamfile, tag=tags, which=which, asMates=TRUE) #' objs <- shiftGAlignmentsList(gal) #' export(objs, "shift.bam") shiftGAlignmentsList <- function(gal, positive=4L, negative=5L){ stopifnot(is.integer(positive)) stopifnot(is.integer(negative)) stopifnot(is(gal, "GAlignmentsList")) stopifnot(length(gal)>0) stopifnot(all(elementNROWS(gal)<3)) ## move the 5'end ## first reads is 5'end gal1 <- unlist(gal) gp <- rep(1, length(gal1)) gp1 <- rep(seq_along(gal), elementNROWS(gal)) gp[duplicated(gp1)] <- 2 mcols(gal1)$MD <- NULL gal1[gp==1] <- shiftReads(gal1[gp==1], positive=positive, negative=negative) names(gal1) <- mcols(gal1)$qname mcols(gal1)$isize[gp==2] <- sign(mcols(gal1)$isize[gp==2]) * abs(mcols(gal1)$isize[which(gp==2)-1]) mcols(gal1)$mpos[gp==2] <- start(gal1)[which(gp==2)-1] mcols(gal1)$mpos[gp==1] <- start(gal1)[which(gp==1)+1] ## till now, gal1 must have mrnm, mpos, names and flag stopifnot(length(mcols(gal1)$mrnm)>0) stopifnot(length(mcols(gal1)$mpos)>0) stopifnot(length(mcols(gal1)$flag)>0) stopifnot(length(names(gal1))>0) return(gal1) }
/R/shiftGAlignmentsList.R
no_license
jaime11/Transcription-Factor-Footprinting
R
false
false
2,110
r
#' @title shift 5' ends #' @description shift the GAlignmentsLists by 5' ends. #' All reads aligning to the positive strand will be offset by +4bp, #' and all reads aligning to the negative strand will be offset -5bp by default. #' @param gal An object of \link[GenomicAlignments]{GAlignmentsList}. #' @param positive integer(1). the size to be shift for positive strand #' @param negative integer(1). the size to be shift for negative strand #' @return An object of \link[GenomicAlignments]{GAlignments} with 5' end #' shifted reads. #' @author Jianhong Ou #' @export #' @import S4Vectors #' @import GenomicRanges #' @examples #' bamfile <- system.file("extdata", "GL1.bam", package="ATACseqQC") #' tags <- c("AS", "XN", "XM", "XO", "XG", "NM", "MD", "YS", "YT") #' library(BSgenome.Hsapiens.UCSC.hg19) #' which <- as(seqinfo(Hsapiens)["chr1"], "GRanges") #' gal <- readBamFile(bamfile, tag=tags, which=which, asMates=TRUE) #' objs <- shiftGAlignmentsList(gal) #' export(objs, "shift.bam") shiftGAlignmentsList <- function(gal, positive=4L, negative=5L){ stopifnot(is.integer(positive)) stopifnot(is.integer(negative)) stopifnot(is(gal, "GAlignmentsList")) stopifnot(length(gal)>0) stopifnot(all(elementNROWS(gal)<3)) ## move the 5'end ## first reads is 5'end gal1 <- unlist(gal) gp <- rep(1, length(gal1)) gp1 <- rep(seq_along(gal), elementNROWS(gal)) gp[duplicated(gp1)] <- 2 mcols(gal1)$MD <- NULL gal1[gp==1] <- shiftReads(gal1[gp==1], positive=positive, negative=negative) names(gal1) <- mcols(gal1)$qname mcols(gal1)$isize[gp==2] <- sign(mcols(gal1)$isize[gp==2]) * abs(mcols(gal1)$isize[which(gp==2)-1]) mcols(gal1)$mpos[gp==2] <- start(gal1)[which(gp==2)-1] mcols(gal1)$mpos[gp==1] <- start(gal1)[which(gp==1)+1] ## till now, gal1 must have mrnm, mpos, names and flag stopifnot(length(mcols(gal1)$mrnm)>0) stopifnot(length(mcols(gal1)$mpos)>0) stopifnot(length(mcols(gal1)$flag)>0) stopifnot(length(names(gal1))>0) return(gal1) }
# source('/ihme/code/geospatial/temperature/exposure/launch_exposure_calc_by_year.R', echo =T) #set up smart sensing of os if(Sys.info()[1]=='Windows'){ j = "J:/" } else{ j = '/home/j/' } #set variables for parallelization code.dir = paste0('/ihme/code/geospatial/temperature/exposure/') slots = 30 years = seq(1990,2015, 5) num_draws = 100 temp_prods = c('era_mean', 'cru_spline_interp') admin_level = 'admin0' tmrel_version = 1 risk_version = 'test' paf_version = 1 #load the possible causes load(paste0('/share/geospatial/temperature/estimates/risk/temperature_risks_', risk_version, '.Rdata')) cause_list = as.character(unique(risk_grid$acause)) #cause_list = c('diabetes') #, 'resp_asthma') for(tp in temp_prods){ #make paf directory dir.create(paste0('/share/geospatial/temperature/estimates/paf/',tp,'/',paf_version),recursive = T) for(yyy in years){ convert_k = grepl('era', tp) for (ccc in cause_list) { #prepare qsub args = paste(slots, yyy, tp, admin_level, ccc, paf_version, tmrel_version, risk_version, convert_k) rscript = paste0(code.dir, 'temperature_exposure_by_year.R') rshell = paste0('/ihme/code/geospatial/temperature/r_shell_fancy.sh') errors = '-o /share/temp/sgeoutput/dccasey/output -e /share/temp/sgeoutput/dccasey/errors' jname = paste0('calc_temp_exp_',tp,'_',yyy,'_',ccc) sys.sub <- paste0("qsub -P proj_geospatial ", errors, " -N ", jname, " ", "-pe multi_slot ", slots, " ", "-t ", paste0('1:', num_draws)) #array job, era that I've downloaded exists for 1989-2016 command =paste(sys.sub, rshell, rscript, args) #launch jobs system(command) #print(command) } #close cause } #close years } #close temperture products #write arguments to disk cols = list(run_date = as.character(Sys.time()), years = years, temperature_products = temp_prods, admin_level = admin_level, causes = cause_list, paf_version = paf_version, tmrel_version= tmrel_version, risk_version = risk_version, draws = num_draws) run_log = as.data.frame(lapply(cols, `length<-`, max(sapply(cols, length)))) write.csv(run_log, file = paste0('/share/geospatial/temperature/run_log/run_log_',paf_version,'.csv'))
/temperature/archive/exposure/launch_exposure_calc_by_year.R
no_license
wgodwin28/ihme_code
R
false
false
2,227
r
# source('/ihme/code/geospatial/temperature/exposure/launch_exposure_calc_by_year.R', echo =T) #set up smart sensing of os if(Sys.info()[1]=='Windows'){ j = "J:/" } else{ j = '/home/j/' } #set variables for parallelization code.dir = paste0('/ihme/code/geospatial/temperature/exposure/') slots = 30 years = seq(1990,2015, 5) num_draws = 100 temp_prods = c('era_mean', 'cru_spline_interp') admin_level = 'admin0' tmrel_version = 1 risk_version = 'test' paf_version = 1 #load the possible causes load(paste0('/share/geospatial/temperature/estimates/risk/temperature_risks_', risk_version, '.Rdata')) cause_list = as.character(unique(risk_grid$acause)) #cause_list = c('diabetes') #, 'resp_asthma') for(tp in temp_prods){ #make paf directory dir.create(paste0('/share/geospatial/temperature/estimates/paf/',tp,'/',paf_version),recursive = T) for(yyy in years){ convert_k = grepl('era', tp) for (ccc in cause_list) { #prepare qsub args = paste(slots, yyy, tp, admin_level, ccc, paf_version, tmrel_version, risk_version, convert_k) rscript = paste0(code.dir, 'temperature_exposure_by_year.R') rshell = paste0('/ihme/code/geospatial/temperature/r_shell_fancy.sh') errors = '-o /share/temp/sgeoutput/dccasey/output -e /share/temp/sgeoutput/dccasey/errors' jname = paste0('calc_temp_exp_',tp,'_',yyy,'_',ccc) sys.sub <- paste0("qsub -P proj_geospatial ", errors, " -N ", jname, " ", "-pe multi_slot ", slots, " ", "-t ", paste0('1:', num_draws)) #array job, era that I've downloaded exists for 1989-2016 command =paste(sys.sub, rshell, rscript, args) #launch jobs system(command) #print(command) } #close cause } #close years } #close temperture products #write arguments to disk cols = list(run_date = as.character(Sys.time()), years = years, temperature_products = temp_prods, admin_level = admin_level, causes = cause_list, paf_version = paf_version, tmrel_version= tmrel_version, risk_version = risk_version, draws = num_draws) run_log = as.data.frame(lapply(cols, `length<-`, max(sapply(cols, length)))) write.csv(run_log, file = paste0('/share/geospatial/temperature/run_log/run_log_',paf_version,'.csv'))
# Pijuan-Sala et al. A single-cell molecular map of mouse gastrulation and early organogenesis. Nature. 2019 # PMID: # https://github.com/MarioniLab/EmbryoTimecourse2018 # 10X # QC - >=1,000 expressed genes, mito < 2%, mean log2(norm) >= 10^-3, exclude chrY, Xist, tdTomato construct # Normalization: scran - quickCluster(method=igraph, min=100, max=3000) # HVG: scran - trendVar and decomposeVar, loess span 0.05, FDR 5% # Batch effects: scran - fastMNN on 50 PCs from HVGs # doublet removal: scan - doubletCells, 50 PCs, 10-NN (buildSNNGraph in scran), louvain clustering from igraph, hierarchically applied, FDR 10% # SS2 # nuclear reads > 50,000, genes > 4000, mito < 10% # Clustering: buildSSNGraph (scran) 50 batch-corrected PCs on HVGs, 10-NN # cluster_louvain (igraph) x2 levels require("Matrix") require("SingleCellExperiment") anno <- read.table("atlas/meta.tab", sep="\t", header=T) count_mat <- readMM("atlas/raw_counts.mtx") count_mat <- as(count_mat, "dgCMatrix") gene_ids <- read.table("atlas/genes.tsv", stringsAsFactors=FALSE) cell_ids <- read.table("atlas/barcodes.tsv", stringsAsFactors=FALSE) pcs <- readRDS("atlas/corrected_pcas.rds") sfs <- read.table("atlas/sizefactors.tab") rownames(count_mat) <- gene_ids[,1] colnames(count_mat) <- cell_ids[,1] rownames(anno) <- anno[,1] rownames(gene_ids) <- gene_ids[,1] colnames(gene_ids) <- c("ensg", "feature_symbol") sce <- SingleCellExperiment(assays=list(counts=count_mat), colData=anno, rowData=gene_ids) sce@reducedDims <- SimpleList(pca=pcs$all) sce@int_colData <- DataFrame(size_factor=sfs[,1]) require("scater") sce <- normalize(sce) saveRDS(sce, "PijuanSala.rds") require("Matrix") require("SingleCellExperiment") anno <- read.table("atlas/meta.tab", sep="\t", header=T) keep <- !is.na(anno$celltype) anno <- anno[keep,] count_mat <- readMM("atlas/raw_counts.mtx") count_mat <- as(count_mat[,keep], "dgCMatrix") gene_ids <- read.table("atlas/genes.tsv", stringsAsFactors=FALSE) cell_ids <- read.table("atlas/barcodes.tsv", stringsAsFactors=FALSE) pcs <- readRDS("atlas/corrected_pcas.rds") sfs <- read.table("atlas/sizefactors.tab") sfs <- sfs[,keep] pcs <- pcs[,keep] rownames(count_mat) <- gene_ids[,1] colnames(count_mat) <- cell_ids[keep,1] rownames(anno) <- anno[,1] rownames(gene_ids) <- gene_ids[,1] colnames(gene_ids) <- c("ensg", "feature_symbol") sce <- SingleCellExperiment(assays=list(counts=count_mat), colData=anno, rowData=gene_ids) sce@reducedDims <- SimpleList(pca=pcs$all) sce@int_colData <- DataFrame(size_factor=sfs[,1]) require("scater") sce <- normalize(sce) saveRDS(sce, "PijuanSala_clean.rds")
/PijuanSala.R
no_license
yingstat/scDatasets
R
false
false
2,607
r
# Pijuan-Sala et al. A single-cell molecular map of mouse gastrulation and early organogenesis. Nature. 2019 # PMID: # https://github.com/MarioniLab/EmbryoTimecourse2018 # 10X # QC - >=1,000 expressed genes, mito < 2%, mean log2(norm) >= 10^-3, exclude chrY, Xist, tdTomato construct # Normalization: scran - quickCluster(method=igraph, min=100, max=3000) # HVG: scran - trendVar and decomposeVar, loess span 0.05, FDR 5% # Batch effects: scran - fastMNN on 50 PCs from HVGs # doublet removal: scan - doubletCells, 50 PCs, 10-NN (buildSNNGraph in scran), louvain clustering from igraph, hierarchically applied, FDR 10% # SS2 # nuclear reads > 50,000, genes > 4000, mito < 10% # Clustering: buildSSNGraph (scran) 50 batch-corrected PCs on HVGs, 10-NN # cluster_louvain (igraph) x2 levels require("Matrix") require("SingleCellExperiment") anno <- read.table("atlas/meta.tab", sep="\t", header=T) count_mat <- readMM("atlas/raw_counts.mtx") count_mat <- as(count_mat, "dgCMatrix") gene_ids <- read.table("atlas/genes.tsv", stringsAsFactors=FALSE) cell_ids <- read.table("atlas/barcodes.tsv", stringsAsFactors=FALSE) pcs <- readRDS("atlas/corrected_pcas.rds") sfs <- read.table("atlas/sizefactors.tab") rownames(count_mat) <- gene_ids[,1] colnames(count_mat) <- cell_ids[,1] rownames(anno) <- anno[,1] rownames(gene_ids) <- gene_ids[,1] colnames(gene_ids) <- c("ensg", "feature_symbol") sce <- SingleCellExperiment(assays=list(counts=count_mat), colData=anno, rowData=gene_ids) sce@reducedDims <- SimpleList(pca=pcs$all) sce@int_colData <- DataFrame(size_factor=sfs[,1]) require("scater") sce <- normalize(sce) saveRDS(sce, "PijuanSala.rds") require("Matrix") require("SingleCellExperiment") anno <- read.table("atlas/meta.tab", sep="\t", header=T) keep <- !is.na(anno$celltype) anno <- anno[keep,] count_mat <- readMM("atlas/raw_counts.mtx") count_mat <- as(count_mat[,keep], "dgCMatrix") gene_ids <- read.table("atlas/genes.tsv", stringsAsFactors=FALSE) cell_ids <- read.table("atlas/barcodes.tsv", stringsAsFactors=FALSE) pcs <- readRDS("atlas/corrected_pcas.rds") sfs <- read.table("atlas/sizefactors.tab") sfs <- sfs[,keep] pcs <- pcs[,keep] rownames(count_mat) <- gene_ids[,1] colnames(count_mat) <- cell_ids[keep,1] rownames(anno) <- anno[,1] rownames(gene_ids) <- gene_ids[,1] colnames(gene_ids) <- c("ensg", "feature_symbol") sce <- SingleCellExperiment(assays=list(counts=count_mat), colData=anno, rowData=gene_ids) sce@reducedDims <- SimpleList(pca=pcs$all) sce@int_colData <- DataFrame(size_factor=sfs[,1]) require("scater") sce <- normalize(sce) saveRDS(sce, "PijuanSala_clean.rds")
library(coneproj) ### Name: conv ### Title: Specify a Convex Shape-Restriction in a SHAPEREG Formula ### Aliases: conv ### Keywords: shape routine ### ** Examples # generate y x <- seq(-1, 2, by = 0.1) n <- length(x) y <- x^2 + rnorm(n, .3) # regress y on x under the shape-restriction: "convex" ans <- shapereg(y ~ conv(x)) # make a plot plot(x, y) lines(x, fitted(ans), col = 2) legend("topleft", bty = "n", "shapereg: convex fit", col = 2, lty = 1)
/data/genthat_extracted_code/coneproj/examples/conv.Rd.R
no_license
surayaaramli/typeRrh
R
false
false
483
r
library(coneproj) ### Name: conv ### Title: Specify a Convex Shape-Restriction in a SHAPEREG Formula ### Aliases: conv ### Keywords: shape routine ### ** Examples # generate y x <- seq(-1, 2, by = 0.1) n <- length(x) y <- x^2 + rnorm(n, .3) # regress y on x under the shape-restriction: "convex" ans <- shapereg(y ~ conv(x)) # make a plot plot(x, y) lines(x, fitted(ans), col = 2) legend("topleft", bty = "n", "shapereg: convex fit", col = 2, lty = 1)
% Generated by roxygen2 (4.1.1): do not edit by hand % Please edit documentation in R/embeddr.R \name{predicted_expression} \alias{predicted_expression} \title{Create a predicted expression matrix} \usage{ predicted_expression(sce, models = NULL, n_cores = 2) } \arguments{ \item{sce}{An object of class \code{SCESet}} \item{models}{An object representing models. If of type \code{list} then for each element the predicted expression is computed and a matrix returned. If of type \code{model} for which a \code{predict} function is available, then a single vector corresponding to \code{predict(model)} is returned. If NULL then the model is computed for all genes in \code{sce} and the resulting list returned.} \item{n_cores}{The number of cores to pass to \code{mclapply}.} } \value{ A dataframe of predicted expression where rows are cells (pseudotime) and columns are genes } \description{ Given a list of models return a matrix corresponding to the prediction from the models. Each column represents a gene and each row its expression at a given point in pseudotime. } \examples{ library(scater) data('sc_example_counts') ; sce <- newSCESet(countData = sc_example_counts) sce <- embeddr(sce) sce <- fit_pseudotime(sce) pe <- predicted_expression(sce[1:4,]) # use first four genes }
/man/predicted_expression.Rd
no_license
bjstewart1/embeddr
R
false
false
1,291
rd
% Generated by roxygen2 (4.1.1): do not edit by hand % Please edit documentation in R/embeddr.R \name{predicted_expression} \alias{predicted_expression} \title{Create a predicted expression matrix} \usage{ predicted_expression(sce, models = NULL, n_cores = 2) } \arguments{ \item{sce}{An object of class \code{SCESet}} \item{models}{An object representing models. If of type \code{list} then for each element the predicted expression is computed and a matrix returned. If of type \code{model} for which a \code{predict} function is available, then a single vector corresponding to \code{predict(model)} is returned. If NULL then the model is computed for all genes in \code{sce} and the resulting list returned.} \item{n_cores}{The number of cores to pass to \code{mclapply}.} } \value{ A dataframe of predicted expression where rows are cells (pseudotime) and columns are genes } \description{ Given a list of models return a matrix corresponding to the prediction from the models. Each column represents a gene and each row its expression at a given point in pseudotime. } \examples{ library(scater) data('sc_example_counts') ; sce <- newSCESet(countData = sc_example_counts) sce <- embeddr(sce) sce <- fit_pseudotime(sce) pe <- predicted_expression(sce[1:4,]) # use first four genes }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/vip.R \name{vip} \alias{vip} \alias{vip.default} \title{Variable Importance Plots} \usage{ vip(object, ...) \method{vip}{default}(object, bar = TRUE, width = 0.75, horizontal = TRUE, alpha = 1, color = "grey35", fill = "grey35", ...) } \arguments{ \item{object}{A fitted model object (e.g., a \code{"randomForest"} object).} \item{...}{Additional optional arguments to be passed onto \code{\link{vi}}.} \item{bar}{Logical indicating whether or not to produce a barplot. Default is \code{TRUE}. If \code{bar = FALSE}, then a dotchart is displayed instead.} \item{width}{Numeric value specifying the width of the bars when \code{bar = TRUE}. Default is \code{0.75}.} \item{horizontal}{Logical indicating whether or not to plot the importance scores on the x-axis (\code{TRUE}). Default is \code{TRUE}.} \item{alpha}{Numeric value between 0 and 1 giving the trasparency of the bars.} \item{color}{Character string specifying the color to use for the borders of the bars. Could also be a function, such as \code{\link[grDevices]{heat.colors}}. Default is \code{"grey35"}.} \item{fill}{Character string specifying the color to use to fill the bars. Could also be a function, such as \code{\link[grDevices]{heat.colors}}. Default is \code{"grey35"}.} } \description{ Plot variable importance scores for the predictors in a model. } \examples{ \dontrun{ mtcars.lm <- lm(mpg ~ ., data = mtcars) vip(mtcars.lm) + theme_light() } }
/man/vip.Rd
no_license
guhjy/vip
R
false
true
1,510
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/vip.R \name{vip} \alias{vip} \alias{vip.default} \title{Variable Importance Plots} \usage{ vip(object, ...) \method{vip}{default}(object, bar = TRUE, width = 0.75, horizontal = TRUE, alpha = 1, color = "grey35", fill = "grey35", ...) } \arguments{ \item{object}{A fitted model object (e.g., a \code{"randomForest"} object).} \item{...}{Additional optional arguments to be passed onto \code{\link{vi}}.} \item{bar}{Logical indicating whether or not to produce a barplot. Default is \code{TRUE}. If \code{bar = FALSE}, then a dotchart is displayed instead.} \item{width}{Numeric value specifying the width of the bars when \code{bar = TRUE}. Default is \code{0.75}.} \item{horizontal}{Logical indicating whether or not to plot the importance scores on the x-axis (\code{TRUE}). Default is \code{TRUE}.} \item{alpha}{Numeric value between 0 and 1 giving the trasparency of the bars.} \item{color}{Character string specifying the color to use for the borders of the bars. Could also be a function, such as \code{\link[grDevices]{heat.colors}}. Default is \code{"grey35"}.} \item{fill}{Character string specifying the color to use to fill the bars. Could also be a function, such as \code{\link[grDevices]{heat.colors}}. Default is \code{"grey35"}.} } \description{ Plot variable importance scores for the predictors in a model. } \examples{ \dontrun{ mtcars.lm <- lm(mpg ~ ., data = mtcars) vip(mtcars.lm) + theme_light() } }
data(satsolvers) vbsp = sum(parscores(satsolvers, vbs)) vbsm = sum(misclassificationPenalties(satsolvers, vbs)) vbss = sum(successes(satsolvers, vbs)) test_that("singleBest and vbs", { skip.expensive() vbsse = sum(apply(satsolvers$data[satsolvers$success], 1, max)) expect_equal(vbsse, 2125) expect_equal(vbss, 2125) vbsp1 = sum(parscores(satsolvers, vbs, 1)) vbsp1e = sum(apply(satsolvers$data[satsolvers$performance], 1, min)) expect_equal(vbsp1e, 1288664.971) expect_equal(vbsp1, 1288664.971) expect_equal(vbsp, 11267864.97) expect_equal(vbsm, 0) sbp = sum(parscores(satsolvers, singleBest)) sbm = sum(misclassificationPenalties(satsolvers, singleBest)) sbs = sum(successes(satsolvers, singleBest)) sbse = sum(satsolvers$data[,"clasp_success"]) expect_equal(sbse, 2048) expect_equal(sbs, 2048) sbp1 = sum(parscores(satsolvers, singleBest, 1)) sbp1e = sum(satsolvers$data["clasp"]) expect_equal(sbp1e, 1586266.044) expect_equal(sbp1, 1586266.044) sbme = sum(apply(satsolvers$data[satsolvers$performance], 1, function(x) { abs(x["clasp"] - min(x)) })) expect_equal(sbme, 297601.073) expect_equal(sbm, 297601.073) expect_equal(sbp, 14060266.04) }) folds = cvFolds(satsolvers) test_that("classify", { skip.expensive() res = classify(classifier=makeLearner("classif.OneR"), data=folds) expect_true(sum(parscores(folds, res)) > vbsp) expect_true(sum(misclassificationPenalties(folds, res)) > vbsm) expect_true(sum(successes(folds, res)) < vbss) expect_true(is.data.frame(res$predictor(satsolvers$data[satsolvers$features]))) res = classify(classifier=list(makeLearner("classif.OneR"), makeLearner("classif.OneR"), makeLearner("classif.OneR")), data=folds) expect_true(sum(parscores(folds, res)) > vbsp) expect_true(sum(misclassificationPenalties(folds, res)) > vbsm) expect_true(sum(successes(folds, res)) < vbss) expect_true(is.data.frame(res$predictor(satsolvers$data[satsolvers$features]))) res = classify(classifier=list(makeLearner("classif.OneR"), makeLearner("classif.OneR"), makeLearner("classif.OneR"), .combine=makeLearner("classif.OneR")), data=folds) expect_true(sum(parscores(folds, res)) > vbsp) expect_true(sum(misclassificationPenalties(folds, res)) > vbsm) expect_true(sum(successes(folds, res)) < vbss) expect_true(is.data.frame(res$predictor(satsolvers$data[satsolvers$features]))) }) test_that("classifyPairs", { skip.expensive() res = classifyPairs(classifier=makeLearner("classif.OneR"), data=folds) expect_true(sum(parscores(folds, res)) > vbsp) expect_true(sum(misclassificationPenalties(folds, res)) > vbsm) expect_true(sum(successes(folds, res)) < vbss) expect_true(is.data.frame(res$predictor(satsolvers$data[satsolvers$features]))) res = classifyPairs(classifier=makeLearner("classif.OneR"), data=folds, combine=makeLearner("classif.OneR")) expect_true(sum(parscores(folds, res)) > vbsp) expect_true(sum(misclassificationPenalties(folds, res)) > vbsm) expect_true(sum(successes(folds, res)) < vbss) expect_true(is.data.frame(res$predictor(satsolvers$data[satsolvers$features]))) }) test_that("cluster", { skip.expensive() res = cluster(clusterer=makeLearner("cluster.SimpleKMeans"), data=folds, pre=normalize) expect_true(sum(parscores(folds, res)) > vbsp) expect_true(sum(misclassificationPenalties(folds, res)) > vbsm) expect_true(sum(successes(folds, res)) < vbss) expect_true(is.data.frame(res$predictor(satsolvers$data[satsolvers$features]))) res = cluster(clusterer=makeLearner("cluster.SimpleKMeans"), data=folds, bestBy="successes", pre=normalize) expect_true(sum(parscores(folds, res)) > vbsp) expect_true(sum(misclassificationPenalties(folds, res)) > vbsm) expect_true(sum(successes(folds, res)) < vbss) expect_true(is.data.frame(res$predictor(satsolvers$data[satsolvers$features]))) res = cluster(clusterer=list(makeLearner("cluster.SimpleKMeans"), makeLearner("cluster.SimpleKMeans"), makeLearner("cluster.SimpleKMeans")), data=folds, pre=normalize) expect_true(sum(parscores(folds, res)) > vbsp) expect_true(sum(misclassificationPenalties(folds, res)) > vbsm) expect_true(sum(successes(folds, res)) < vbss) expect_true(is.data.frame(res$predictor(satsolvers$data[satsolvers$features]))) res = cluster(clusterer=list(makeLearner("cluster.SimpleKMeans"), makeLearner("cluster.SimpleKMeans"), makeLearner("cluster.SimpleKMeans"), .combine=makeLearner("classif.OneR")), data=folds, pre=normalize) expect_true(sum(parscores(folds, res)) > vbsp) expect_true(sum(misclassificationPenalties(folds, res)) > vbsm) expect_true(sum(successes(folds, res)) < vbss) expect_true(is.data.frame(res$predictor(satsolvers$data[satsolvers$features]))) }) test_that("regression", { skip.expensive() res = regression(regressor=makeLearner("regr.lm"), data=folds) expect_true(sum(parscores(folds, res)) > vbsp) expect_true(sum(misclassificationPenalties(folds, res)) > vbsm) expect_true(sum(successes(folds, res)) < vbss) expect_true(is.data.frame(res$predictor(satsolvers$data[satsolvers$features]))) res = regression(regressor=makeLearner("regr.lm"), data=folds, combine=makeLearner("classif.OneR")) expect_true(sum(parscores(folds, res)) > vbsp) expect_true(sum(misclassificationPenalties(folds, res)) > vbsm) expect_true(sum(successes(folds, res)) < vbss) expect_true(is.data.frame(res$predictor(satsolvers$data[satsolvers$features]))) res = regression(regressor=makeLearner("regr.lm"), data=folds, combine=makeLearner("classif.OneR"), expand=function(x) { cbind(x, combn(c(1:ncol(x)), 2, function(y) { abs(x[,y[1]] - x[,y[2]]) })) }) expect_true(sum(parscores(folds, res)) > vbsp) expect_true(sum(misclassificationPenalties(folds, res)) > vbsm) expect_true(sum(successes(folds, res)) < vbss) expect_true(is.data.frame(res$predictor(satsolvers$data[satsolvers$features]))) }) test_that("regressionPairs", { skip.expensive() res = regressionPairs(regressor=makeLearner("regr.lm"), data=folds) expect_true(sum(parscores(folds, res)) > vbsp) expect_true(sum(misclassificationPenalties(folds, res)) > vbsm) expect_true(sum(successes(folds, res)) < vbss) expect_true(is.data.frame(res$predictor(satsolvers$data[satsolvers$features]))) res = regressionPairs(regressor=makeLearner("regr.lm"), data=folds, combine=makeLearner("classif.OneR")) expect_true(sum(parscores(folds, res)) > vbsp) expect_true(sum(misclassificationPenalties(folds, res)) > vbsm) expect_true(sum(successes(folds, res)) < vbss) expect_true(is.data.frame(res$predictor(satsolvers$data[satsolvers$features]))) }) test_that("perfScatterPlot", { skip.expensive() model = classify(classifier=makeLearner("classif.J48"), data=folds) library(ggplot2) p = perfScatterPlot(parscores, model, singleBest, folds, satsolvers) + scale_x_log10() + scale_y_log10() + xlab("J48") + ylab("single best") expect_false(is.null(p)) satsolvers$extra = c("foo") satsolvers$data$foo = 1:nrow(satsolvers$data) p = perfScatterPlot(parscores, model, singleBest, folds, satsolvers, pargs=aes(colour = foo)) + scale_x_log10() + scale_y_log10() + xlab("J48") + ylab("single best") expect_false(is.null(p)) })
/tests/testthat/test.satsolvers.R
no_license
alincc/llama
R
false
false
8,019
r
data(satsolvers) vbsp = sum(parscores(satsolvers, vbs)) vbsm = sum(misclassificationPenalties(satsolvers, vbs)) vbss = sum(successes(satsolvers, vbs)) test_that("singleBest and vbs", { skip.expensive() vbsse = sum(apply(satsolvers$data[satsolvers$success], 1, max)) expect_equal(vbsse, 2125) expect_equal(vbss, 2125) vbsp1 = sum(parscores(satsolvers, vbs, 1)) vbsp1e = sum(apply(satsolvers$data[satsolvers$performance], 1, min)) expect_equal(vbsp1e, 1288664.971) expect_equal(vbsp1, 1288664.971) expect_equal(vbsp, 11267864.97) expect_equal(vbsm, 0) sbp = sum(parscores(satsolvers, singleBest)) sbm = sum(misclassificationPenalties(satsolvers, singleBest)) sbs = sum(successes(satsolvers, singleBest)) sbse = sum(satsolvers$data[,"clasp_success"]) expect_equal(sbse, 2048) expect_equal(sbs, 2048) sbp1 = sum(parscores(satsolvers, singleBest, 1)) sbp1e = sum(satsolvers$data["clasp"]) expect_equal(sbp1e, 1586266.044) expect_equal(sbp1, 1586266.044) sbme = sum(apply(satsolvers$data[satsolvers$performance], 1, function(x) { abs(x["clasp"] - min(x)) })) expect_equal(sbme, 297601.073) expect_equal(sbm, 297601.073) expect_equal(sbp, 14060266.04) }) folds = cvFolds(satsolvers) test_that("classify", { skip.expensive() res = classify(classifier=makeLearner("classif.OneR"), data=folds) expect_true(sum(parscores(folds, res)) > vbsp) expect_true(sum(misclassificationPenalties(folds, res)) > vbsm) expect_true(sum(successes(folds, res)) < vbss) expect_true(is.data.frame(res$predictor(satsolvers$data[satsolvers$features]))) res = classify(classifier=list(makeLearner("classif.OneR"), makeLearner("classif.OneR"), makeLearner("classif.OneR")), data=folds) expect_true(sum(parscores(folds, res)) > vbsp) expect_true(sum(misclassificationPenalties(folds, res)) > vbsm) expect_true(sum(successes(folds, res)) < vbss) expect_true(is.data.frame(res$predictor(satsolvers$data[satsolvers$features]))) res = classify(classifier=list(makeLearner("classif.OneR"), makeLearner("classif.OneR"), makeLearner("classif.OneR"), .combine=makeLearner("classif.OneR")), data=folds) expect_true(sum(parscores(folds, res)) > vbsp) expect_true(sum(misclassificationPenalties(folds, res)) > vbsm) expect_true(sum(successes(folds, res)) < vbss) expect_true(is.data.frame(res$predictor(satsolvers$data[satsolvers$features]))) }) test_that("classifyPairs", { skip.expensive() res = classifyPairs(classifier=makeLearner("classif.OneR"), data=folds) expect_true(sum(parscores(folds, res)) > vbsp) expect_true(sum(misclassificationPenalties(folds, res)) > vbsm) expect_true(sum(successes(folds, res)) < vbss) expect_true(is.data.frame(res$predictor(satsolvers$data[satsolvers$features]))) res = classifyPairs(classifier=makeLearner("classif.OneR"), data=folds, combine=makeLearner("classif.OneR")) expect_true(sum(parscores(folds, res)) > vbsp) expect_true(sum(misclassificationPenalties(folds, res)) > vbsm) expect_true(sum(successes(folds, res)) < vbss) expect_true(is.data.frame(res$predictor(satsolvers$data[satsolvers$features]))) }) test_that("cluster", { skip.expensive() res = cluster(clusterer=makeLearner("cluster.SimpleKMeans"), data=folds, pre=normalize) expect_true(sum(parscores(folds, res)) > vbsp) expect_true(sum(misclassificationPenalties(folds, res)) > vbsm) expect_true(sum(successes(folds, res)) < vbss) expect_true(is.data.frame(res$predictor(satsolvers$data[satsolvers$features]))) res = cluster(clusterer=makeLearner("cluster.SimpleKMeans"), data=folds, bestBy="successes", pre=normalize) expect_true(sum(parscores(folds, res)) > vbsp) expect_true(sum(misclassificationPenalties(folds, res)) > vbsm) expect_true(sum(successes(folds, res)) < vbss) expect_true(is.data.frame(res$predictor(satsolvers$data[satsolvers$features]))) res = cluster(clusterer=list(makeLearner("cluster.SimpleKMeans"), makeLearner("cluster.SimpleKMeans"), makeLearner("cluster.SimpleKMeans")), data=folds, pre=normalize) expect_true(sum(parscores(folds, res)) > vbsp) expect_true(sum(misclassificationPenalties(folds, res)) > vbsm) expect_true(sum(successes(folds, res)) < vbss) expect_true(is.data.frame(res$predictor(satsolvers$data[satsolvers$features]))) res = cluster(clusterer=list(makeLearner("cluster.SimpleKMeans"), makeLearner("cluster.SimpleKMeans"), makeLearner("cluster.SimpleKMeans"), .combine=makeLearner("classif.OneR")), data=folds, pre=normalize) expect_true(sum(parscores(folds, res)) > vbsp) expect_true(sum(misclassificationPenalties(folds, res)) > vbsm) expect_true(sum(successes(folds, res)) < vbss) expect_true(is.data.frame(res$predictor(satsolvers$data[satsolvers$features]))) }) test_that("regression", { skip.expensive() res = regression(regressor=makeLearner("regr.lm"), data=folds) expect_true(sum(parscores(folds, res)) > vbsp) expect_true(sum(misclassificationPenalties(folds, res)) > vbsm) expect_true(sum(successes(folds, res)) < vbss) expect_true(is.data.frame(res$predictor(satsolvers$data[satsolvers$features]))) res = regression(regressor=makeLearner("regr.lm"), data=folds, combine=makeLearner("classif.OneR")) expect_true(sum(parscores(folds, res)) > vbsp) expect_true(sum(misclassificationPenalties(folds, res)) > vbsm) expect_true(sum(successes(folds, res)) < vbss) expect_true(is.data.frame(res$predictor(satsolvers$data[satsolvers$features]))) res = regression(regressor=makeLearner("regr.lm"), data=folds, combine=makeLearner("classif.OneR"), expand=function(x) { cbind(x, combn(c(1:ncol(x)), 2, function(y) { abs(x[,y[1]] - x[,y[2]]) })) }) expect_true(sum(parscores(folds, res)) > vbsp) expect_true(sum(misclassificationPenalties(folds, res)) > vbsm) expect_true(sum(successes(folds, res)) < vbss) expect_true(is.data.frame(res$predictor(satsolvers$data[satsolvers$features]))) }) test_that("regressionPairs", { skip.expensive() res = regressionPairs(regressor=makeLearner("regr.lm"), data=folds) expect_true(sum(parscores(folds, res)) > vbsp) expect_true(sum(misclassificationPenalties(folds, res)) > vbsm) expect_true(sum(successes(folds, res)) < vbss) expect_true(is.data.frame(res$predictor(satsolvers$data[satsolvers$features]))) res = regressionPairs(regressor=makeLearner("regr.lm"), data=folds, combine=makeLearner("classif.OneR")) expect_true(sum(parscores(folds, res)) > vbsp) expect_true(sum(misclassificationPenalties(folds, res)) > vbsm) expect_true(sum(successes(folds, res)) < vbss) expect_true(is.data.frame(res$predictor(satsolvers$data[satsolvers$features]))) }) test_that("perfScatterPlot", { skip.expensive() model = classify(classifier=makeLearner("classif.J48"), data=folds) library(ggplot2) p = perfScatterPlot(parscores, model, singleBest, folds, satsolvers) + scale_x_log10() + scale_y_log10() + xlab("J48") + ylab("single best") expect_false(is.null(p)) satsolvers$extra = c("foo") satsolvers$data$foo = 1:nrow(satsolvers$data) p = perfScatterPlot(parscores, model, singleBest, folds, satsolvers, pargs=aes(colour = foo)) + scale_x_log10() + scale_y_log10() + xlab("J48") + ylab("single best") expect_false(is.null(p)) })
% Generated by roxygen2 (4.1.1): do not edit by hand % Please edit documentation in R/dRezyserowie.R \name{dRezyserowie} \alias{dRezyserowie} \title{Liczenie odleglosci dla rezyserow} \usage{ dRezyserowie(IDFilm1, IDFilm2, sciezkaDoBazy) } \arguments{ \item{IDFilm1}{- id pierwszego filmu} \item{IDFilm2}{- id drugiego filmu} \item{sciezkaDoBazy}{- sciezka do bazy filmow} } \value{ Funkcja zwraca wartosc liczbowa opisujaca odleglosc miedzy filmami. } \description{ Funkcja \code{dRezyserowie} liczy odleglosc miedzy rezyserami dwoch filmow z bazy } \details{ Funkcja wyciaga ID dwoch filmow z bazy. Jesli ktorys z nich nie ma rezyserow odleglosc miedzy nimi wynosi 0.8, jesli tak nie jest miara jest liczona zgodnie z funkcja PorownajWektory. } \examples{ #Nie wywoluj jesli nie masz takiej bazy dRezyserowie(4000,2461,"BazaFilmow.sql") dRezyserowie(23456,2461,"BazaFilmow.sql") } \author{ Krzysztof Rudas }
/WyszynskaRudasIMDB/man/dRezyserowie.Rd
no_license
Wyszynskak/Projekty
R
false
false
913
rd
% Generated by roxygen2 (4.1.1): do not edit by hand % Please edit documentation in R/dRezyserowie.R \name{dRezyserowie} \alias{dRezyserowie} \title{Liczenie odleglosci dla rezyserow} \usage{ dRezyserowie(IDFilm1, IDFilm2, sciezkaDoBazy) } \arguments{ \item{IDFilm1}{- id pierwszego filmu} \item{IDFilm2}{- id drugiego filmu} \item{sciezkaDoBazy}{- sciezka do bazy filmow} } \value{ Funkcja zwraca wartosc liczbowa opisujaca odleglosc miedzy filmami. } \description{ Funkcja \code{dRezyserowie} liczy odleglosc miedzy rezyserami dwoch filmow z bazy } \details{ Funkcja wyciaga ID dwoch filmow z bazy. Jesli ktorys z nich nie ma rezyserow odleglosc miedzy nimi wynosi 0.8, jesli tak nie jest miara jest liczona zgodnie z funkcja PorownajWektory. } \examples{ #Nie wywoluj jesli nie masz takiej bazy dRezyserowie(4000,2461,"BazaFilmow.sql") dRezyserowie(23456,2461,"BazaFilmow.sql") } \author{ Krzysztof Rudas }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/utils.R \name{bedloss} \alias{bedloss} \title{Mean Squared Error for Genomewide prediction} \usage{ bedloss(bed, y, weights, pred = NULL, ...) } \arguments{ \item{bed}{an rbed object, or a mergedrbed object} \item{y}{vector of length bed$no.ind describing the truth} \item{weights}{a vector of length bed$no.snps for an rbed, or length(bed$DATkeep) for a mergedrbed object} \item{pred}{(default=NULL) a previous call to bedloss, to save calling bedpred again} \item{...}{extra parameters to \code{\link[pcapred]{get_data}}} } \value{ A list containing pred: the predictions and loss: the mse of those predictions } \description{ Constructed the genomewide prediction for a score function defined by \code{weights} on an \code{rbed} object, then evaluate the mean squared error to a provided \code{y}. }
/man/bedloss.Rd
no_license
danjlawson/bedlm
R
false
true
886
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/utils.R \name{bedloss} \alias{bedloss} \title{Mean Squared Error for Genomewide prediction} \usage{ bedloss(bed, y, weights, pred = NULL, ...) } \arguments{ \item{bed}{an rbed object, or a mergedrbed object} \item{y}{vector of length bed$no.ind describing the truth} \item{weights}{a vector of length bed$no.snps for an rbed, or length(bed$DATkeep) for a mergedrbed object} \item{pred}{(default=NULL) a previous call to bedloss, to save calling bedpred again} \item{...}{extra parameters to \code{\link[pcapred]{get_data}}} } \value{ A list containing pred: the predictions and loss: the mse of those predictions } \description{ Constructed the genomewide prediction for a score function defined by \code{weights} on an \code{rbed} object, then evaluate the mean squared error to a provided \code{y}. }
library(Rdbi) library(RdbiPgSQL) # conn <- dbConnect(PgSQL(), host="", dbname="bioenergy", user="ptittmann", password="") #dbListTables(conn) #r_baseline results query <- dbSendQuery(conn, "select * from crosstab('select price_point, fstk_type||f_type as pathway , sum(quant_mgy*c.gal_per_bdt*d.energy_density_gge_per_gal)/1000 as Mgge from r_baseline.brfn join model.runs using (run) join model.conversion_efficiency c on (f_type=c.tech and fstk_type=c.type) join model.technology d on (f_type=d.tech) where fstk_type NOT like ''%cost'' and fstk_type NOT like ''production'' and fstk_type not like ''credit'' group by price_point, pathway order by 1;','select distinct fstk_type||f_type from r_baseline.brfn where fstk_type NOT like ''%cost'' and fstk_type NOT like ''production'' and fstk_type not like ''credit'' order by 1;') as ct ( price_point float, ag_res_lce float, animal_fats_fame float, corngrain_dry_mill float, corngrain_wet_mill float, forest_lce float, grease_fame float, hec_lce float, msw_dirty_ft_diesel float, msw_food_lce float, msw_paper_ft_diesel float, msw_paper_lce float, msw_wood_lce float, msw_yard_ft_diesel float, msw_yard_lce float, ovw_lce float, pulpwood_lce float, seed_oils_fame float);") r_baseline <- dbGetResult(query) dbClearResult(query) #r_baseline # r_badlce results pquery <- dbSendQuery(conn, "select * from crosstab('select price_point, fstk_type||f_type as pathway , sum(quant_mgy*c.gal_per_bdt*d.energy_density_gge_per_gal)/1000 as Mgge from r_badlce.brfn join model.runs using (run) join model.conversion_efficiency c on (f_type=c.tech and fstk_type=c.type) join model.technology d on (f_type=d.tech) where fstk_type NOT like ''%cost'' and fstk_type NOT like ''production'' and fstk_type not like ''credit'' group by price_point, pathway order by 1;','select distinct fstk_type||f_type from r_badlce.brfn where fstk_type NOT like ''%cost'' and fstk_type NOT like ''production'' and fstk_type not like ''credit'' order by 1;') as ct ( price_point float, ag_res_ft_diesel float, ag_res_lce float, animal_fats_fame float, corngrain_dry_mill float, corngrain_wet_mill float, forest_ft_diesel float, forest_lce float, grease_fame float, hec_ft_diesel float, hec_lce float, msw_dirty_ft_diesel float, msw_food_lce float, msw_paper_ft_diesel float, msw_paper_lce float, msw_wood_ft_diesel float, msw_wood_lce float, msw_yard_ft_diesel float, msw_yard_lce float, ovw_ft_diesel float, ovw_lce float, pulpwood_ft_diesel float, pulpwood_lce float, seed_oils_fame float);") r_badlce <- dbGetResult(pquery) dbClearResult(pquery) #r_badlce #r_cblend query <- dbSendQuery(conn, "select * from crosstab('select price_point, fstk_type||f_type as pathway , sum(quant_mgy*c.gal_per_bdt*d.energy_density_gge_per_gal)/1000 as Mgge from r_cblend.brfn join model.runs using (run) join model.conversion_efficiency c on (f_type=c.tech and fstk_type=c.type) join model.technology d on (f_type=d.tech) where fstk_type NOT like ''%cost'' and fstk_type NOT like ''production'' and fstk_type not like ''credit'' group by price_point, pathway order by 1;','select distinct fstk_type||f_type from r_cblend.brfn where fstk_type NOT like ''%cost'' and fstk_type NOT like ''production'' and fstk_type not like ''credit'' order by 1;') as ct ( price_point float, ag_res_ft_diesel float, ag_res_lce float, animal_fats_fame float, corngrain_dry_mill float, corngrain_wet_mill float, forest_ft_diesel float, forest_lce float, grease_fame float, hec_ft_diesel float, hec_lce float, msw_dirty_ft_diesel float, msw_food_lce float, msw_paper_ft_diesel float, msw_paper_lce float, msw_wood_ft_diesel float, msw_wood_lce float, msw_yard_ft_diesel float, msw_yard_lce float, ovw_ft_diesel float, ovw_lce float, pulpwood_ft_diesel float, pulpwood_lce float, seed_oils_fame float);") r_cblend <- dbGetResult(query) dbClearResult(query) #r_cblend #r_fedforest query <- dbSendQuery(conn, "select * from crosstab('select price_point, fstk_type||f_type as pathway , sum(quant_mgy*c.gal_per_bdt*d.energy_density_gge_per_gal)/1000 as Mgge from r_fedforest.brfn join model.runs using (run) join model.conversion_efficiency c on (f_type=c.tech and fstk_type=c.type) join model.technology d on (f_type=d.tech) where fstk_type NOT like ''%cost'' and fstk_type NOT like ''production'' and fstk_type not like ''credit'' group by price_point, pathway order by 1;','select distinct fstk_type||f_type from r_fedforest.brfn where fstk_type NOT like ''%cost'' and fstk_type NOT like ''production'' and fstk_type not like ''credit'' order by 1;') as ct ( price_point float, ag_res_lce float, animal_fats_fame float, corngrain_dry_mill float, corngrain_wet_mill float, forest_lce float, grease_fame float, hec_lce float, msw_dirty_ft_diesel float, msw_food_lce float, msw_paper_ft_diesel float, msw_paper_lce float, msw_wood_lce float, msw_yard_ft_diesel float, msw_yard_lce float, ovw_lce float, pulpwood_lce float, seed_oils_fame float);") r_fedforest<- dbGetResult(query) dbClearResult(query) #r_fedforest #r_ffv query<- dbSendQuery(conn, "select * from crosstab('select price_point, fstk_type||f_type as pathway , sum(quant_mgy*c.gal_per_bdt*d.energy_density_gge_per_gal)/1000 as Mgge from r_ffv.brfn join model.runs using (run) join model.conversion_efficiency c on (f_type=c.tech and fstk_type=c.type) join model.technology d on (f_type=d.tech) where fstk_type NOT like ''%cost'' and fstk_type NOT like ''production'' and fstk_type not like ''credit'' group by price_point, pathway order by 1;','select distinct fstk_type||f_type from r_ffv.brfn where fstk_type NOT like ''%cost'' and fstk_type NOT like ''production'' and fstk_type not like ''credit'' order by 1;') as ct ( price_point float, ag_res_ft_diesel float, ag_res_lce float, animal_fats_fame float, corngrain_dry_mill float, corngrain_wet_mill float, forest_ft_diesel float, forest_lce float, grease_fame float, hec_ft_diesel float, hec_lce float, msw_dirty_ft_diesel float, msw_food_lce float, msw_paper_ft_diesel float, msw_paper_lce float, msw_wood_ft_diesel float, msw_wood_lce float, msw_yard_ft_diesel float, msw_yard_lce float, ovw_lce float, pulpwood_ft_diesel float, pulpwood_lce float, seed_oils_fame float);") r_ffv <- dbGetResult(query) dbClearResult(query) #r_ffv #r_hiencrop query<- dbSendQuery(conn, "select * from crosstab('select price_point, fstk_type||f_type as pathway , sum(quant_mgy*c.gal_per_bdt*d.energy_density_gge_per_gal)/1000 as Mgge from r_hiencrop.brfn join model.runs using (run) join model.conversion_efficiency c on (f_type=c.tech and fstk_type=c.type) join model.technology d on (f_type=d.tech) where fstk_type NOT like ''%cost'' and fstk_type NOT like ''production'' and fstk_type not like ''credit'' group by price_point, pathway order by 1;','select distinct fstk_type||f_type from r_hiencrop.brfn where fstk_type NOT like ''%cost'' and fstk_type NOT like ''production'' and fstk_type not like ''credit'' order by 1;') as ct ( price_point float, ag_res_lce float, animal_fats_fame float, corngrain_dry_mill float, corngrain_wet_mill float, forest_lce float, grease_fame float, hec_lce float, msw_dirty_ft_diesel float, msw_food_lce float, msw_paper_ft_diesel float, msw_paper_lce float, msw_wood_lce float, msw_yard_ft_diesel float, msw_yard_lce float, ovw_lce float, pulpwood_lce float, seed_oils_fame float);") r_hiencrop <- dbGetResult(query) dbClearResult(query) #results r_loencrop query <- dbSendQuery(conn, "select * from crosstab('select price_point, fstk_type||f_type as pathway , sum(quant_mgy*c.gal_per_bdt*d.energy_density_gge_per_gal)/1000 as Mgge from r_loencrop.brfn join model.runs using (run) join model.conversion_efficiency c on (f_type=c.tech and fstk_type=c.type) join model.technology d on (f_type=d.tech) where fstk_type NOT like ''%cost'' and fstk_type NOT like ''production'' and fstk_type not like ''credit'' group by price_point, pathway order by 1;','select distinct fstk_type||f_type from r_loencrop.brfn where fstk_type NOT like ''%cost'' and fstk_type NOT like ''production'' and fstk_type not like ''credit'' order by 1;') as ct ( price_point float, ag_res_lce float, animal_fats_fame float, corngrain_dry_mill float, corngrain_wet_mill float, forest_lce float, grease_fame float, hec_lce float, msw_dirty_ft_diesel float, msw_food_lce float, msw_paper_ft_diesel float, msw_paper_lce float, msw_wood_lce float, msw_yard_ft_diesel float, msw_yard_lce float, ovw_lce float, pulpwood_lce float, seed_oils_fame float);") r_loencrop <- dbGetResult(query) dbClearResult(query) # results r_maxfeed query <- dbSendQuery(conn, "select * from crosstab('select price_point, fstk_type||f_type as pathway , sum(quant_mgy*c.gal_per_bdt*d.energy_density_gge_per_gal)/1000 as Mgge from r_maxfeed.brfn join model.runs using (run) join model.conversion_efficiency c on (f_type=c.tech and fstk_type=c.type) join model.technology d on (f_type=d.tech) where fstk_type NOT like ''%cost'' and fstk_type NOT like ''production'' and fstk_type not like ''credit'' group by price_point, pathway order by 1;','select distinct fstk_type||f_type from r_maxfeed.brfn where fstk_type NOT like ''%cost'' and fstk_type NOT like ''production'' and fstk_type not like ''credit'' order by 1;') as ct ( price_point float, ag_res_lce float, animal_fats_fame float, corngrain_dry_mill float, corngrain_wet_mill float, forest_lce float, grease_fame float, hec_lce float, msw_dirty_ft_diesel float, msw_food_lce float, msw_paper_ft_diesel float, msw_paper_lce float, msw_wood_lce float, msw_yard_ft_diesel float, msw_yard_lce float, ovw_lce float, pulpwood_lce float, seed_oils_fame float);") r_maxfeed <- dbGetResult(query) dbClearResult(query) #create a list of results tables matrixlist <- list(r_baseline, r_badlce, r_cblend, r_fedforest, r_ffv, r_hiencrop, r_loencrop, r_maxfeed) #par(mfcol= c(5,5)) for (i in matrixlist){ ptable<-i[,-1]#create plot matrix w/o price point column ptable [is.na(ptable)] <- 0 #convert NA values to 0 maxval<-max(ptable) #create volume axis interval<-ceiling(maxval)/27 mgy<- seq(0,ceiling(maxval), by=interval) fprice <- c(i$price_point)#create price point axis #pdf(paste(i,"fuel_pw.pdf", sep=""), bg="white") matplot(ptable, fprice, type="l", col= rainbow(length(names(i)))) rm(i,ptable, fprice, maxval, interval, mgy) dev.off() } #legend(x=-1, y=.25, names(i), col = rainbow(length(names(i))), lty=1, ncol=3)
/make-db/results/r/doe_fuelpw_sc.r
no_license
qjhart/wga-biorefinery-study
R
false
false
10,541
r
library(Rdbi) library(RdbiPgSQL) # conn <- dbConnect(PgSQL(), host="", dbname="bioenergy", user="ptittmann", password="") #dbListTables(conn) #r_baseline results query <- dbSendQuery(conn, "select * from crosstab('select price_point, fstk_type||f_type as pathway , sum(quant_mgy*c.gal_per_bdt*d.energy_density_gge_per_gal)/1000 as Mgge from r_baseline.brfn join model.runs using (run) join model.conversion_efficiency c on (f_type=c.tech and fstk_type=c.type) join model.technology d on (f_type=d.tech) where fstk_type NOT like ''%cost'' and fstk_type NOT like ''production'' and fstk_type not like ''credit'' group by price_point, pathway order by 1;','select distinct fstk_type||f_type from r_baseline.brfn where fstk_type NOT like ''%cost'' and fstk_type NOT like ''production'' and fstk_type not like ''credit'' order by 1;') as ct ( price_point float, ag_res_lce float, animal_fats_fame float, corngrain_dry_mill float, corngrain_wet_mill float, forest_lce float, grease_fame float, hec_lce float, msw_dirty_ft_diesel float, msw_food_lce float, msw_paper_ft_diesel float, msw_paper_lce float, msw_wood_lce float, msw_yard_ft_diesel float, msw_yard_lce float, ovw_lce float, pulpwood_lce float, seed_oils_fame float);") r_baseline <- dbGetResult(query) dbClearResult(query) #r_baseline # r_badlce results pquery <- dbSendQuery(conn, "select * from crosstab('select price_point, fstk_type||f_type as pathway , sum(quant_mgy*c.gal_per_bdt*d.energy_density_gge_per_gal)/1000 as Mgge from r_badlce.brfn join model.runs using (run) join model.conversion_efficiency c on (f_type=c.tech and fstk_type=c.type) join model.technology d on (f_type=d.tech) where fstk_type NOT like ''%cost'' and fstk_type NOT like ''production'' and fstk_type not like ''credit'' group by price_point, pathway order by 1;','select distinct fstk_type||f_type from r_badlce.brfn where fstk_type NOT like ''%cost'' and fstk_type NOT like ''production'' and fstk_type not like ''credit'' order by 1;') as ct ( price_point float, ag_res_ft_diesel float, ag_res_lce float, animal_fats_fame float, corngrain_dry_mill float, corngrain_wet_mill float, forest_ft_diesel float, forest_lce float, grease_fame float, hec_ft_diesel float, hec_lce float, msw_dirty_ft_diesel float, msw_food_lce float, msw_paper_ft_diesel float, msw_paper_lce float, msw_wood_ft_diesel float, msw_wood_lce float, msw_yard_ft_diesel float, msw_yard_lce float, ovw_ft_diesel float, ovw_lce float, pulpwood_ft_diesel float, pulpwood_lce float, seed_oils_fame float);") r_badlce <- dbGetResult(pquery) dbClearResult(pquery) #r_badlce #r_cblend query <- dbSendQuery(conn, "select * from crosstab('select price_point, fstk_type||f_type as pathway , sum(quant_mgy*c.gal_per_bdt*d.energy_density_gge_per_gal)/1000 as Mgge from r_cblend.brfn join model.runs using (run) join model.conversion_efficiency c on (f_type=c.tech and fstk_type=c.type) join model.technology d on (f_type=d.tech) where fstk_type NOT like ''%cost'' and fstk_type NOT like ''production'' and fstk_type not like ''credit'' group by price_point, pathway order by 1;','select distinct fstk_type||f_type from r_cblend.brfn where fstk_type NOT like ''%cost'' and fstk_type NOT like ''production'' and fstk_type not like ''credit'' order by 1;') as ct ( price_point float, ag_res_ft_diesel float, ag_res_lce float, animal_fats_fame float, corngrain_dry_mill float, corngrain_wet_mill float, forest_ft_diesel float, forest_lce float, grease_fame float, hec_ft_diesel float, hec_lce float, msw_dirty_ft_diesel float, msw_food_lce float, msw_paper_ft_diesel float, msw_paper_lce float, msw_wood_ft_diesel float, msw_wood_lce float, msw_yard_ft_diesel float, msw_yard_lce float, ovw_ft_diesel float, ovw_lce float, pulpwood_ft_diesel float, pulpwood_lce float, seed_oils_fame float);") r_cblend <- dbGetResult(query) dbClearResult(query) #r_cblend #r_fedforest query <- dbSendQuery(conn, "select * from crosstab('select price_point, fstk_type||f_type as pathway , sum(quant_mgy*c.gal_per_bdt*d.energy_density_gge_per_gal)/1000 as Mgge from r_fedforest.brfn join model.runs using (run) join model.conversion_efficiency c on (f_type=c.tech and fstk_type=c.type) join model.technology d on (f_type=d.tech) where fstk_type NOT like ''%cost'' and fstk_type NOT like ''production'' and fstk_type not like ''credit'' group by price_point, pathway order by 1;','select distinct fstk_type||f_type from r_fedforest.brfn where fstk_type NOT like ''%cost'' and fstk_type NOT like ''production'' and fstk_type not like ''credit'' order by 1;') as ct ( price_point float, ag_res_lce float, animal_fats_fame float, corngrain_dry_mill float, corngrain_wet_mill float, forest_lce float, grease_fame float, hec_lce float, msw_dirty_ft_diesel float, msw_food_lce float, msw_paper_ft_diesel float, msw_paper_lce float, msw_wood_lce float, msw_yard_ft_diesel float, msw_yard_lce float, ovw_lce float, pulpwood_lce float, seed_oils_fame float);") r_fedforest<- dbGetResult(query) dbClearResult(query) #r_fedforest #r_ffv query<- dbSendQuery(conn, "select * from crosstab('select price_point, fstk_type||f_type as pathway , sum(quant_mgy*c.gal_per_bdt*d.energy_density_gge_per_gal)/1000 as Mgge from r_ffv.brfn join model.runs using (run) join model.conversion_efficiency c on (f_type=c.tech and fstk_type=c.type) join model.technology d on (f_type=d.tech) where fstk_type NOT like ''%cost'' and fstk_type NOT like ''production'' and fstk_type not like ''credit'' group by price_point, pathway order by 1;','select distinct fstk_type||f_type from r_ffv.brfn where fstk_type NOT like ''%cost'' and fstk_type NOT like ''production'' and fstk_type not like ''credit'' order by 1;') as ct ( price_point float, ag_res_ft_diesel float, ag_res_lce float, animal_fats_fame float, corngrain_dry_mill float, corngrain_wet_mill float, forest_ft_diesel float, forest_lce float, grease_fame float, hec_ft_diesel float, hec_lce float, msw_dirty_ft_diesel float, msw_food_lce float, msw_paper_ft_diesel float, msw_paper_lce float, msw_wood_ft_diesel float, msw_wood_lce float, msw_yard_ft_diesel float, msw_yard_lce float, ovw_lce float, pulpwood_ft_diesel float, pulpwood_lce float, seed_oils_fame float);") r_ffv <- dbGetResult(query) dbClearResult(query) #r_ffv #r_hiencrop query<- dbSendQuery(conn, "select * from crosstab('select price_point, fstk_type||f_type as pathway , sum(quant_mgy*c.gal_per_bdt*d.energy_density_gge_per_gal)/1000 as Mgge from r_hiencrop.brfn join model.runs using (run) join model.conversion_efficiency c on (f_type=c.tech and fstk_type=c.type) join model.technology d on (f_type=d.tech) where fstk_type NOT like ''%cost'' and fstk_type NOT like ''production'' and fstk_type not like ''credit'' group by price_point, pathway order by 1;','select distinct fstk_type||f_type from r_hiencrop.brfn where fstk_type NOT like ''%cost'' and fstk_type NOT like ''production'' and fstk_type not like ''credit'' order by 1;') as ct ( price_point float, ag_res_lce float, animal_fats_fame float, corngrain_dry_mill float, corngrain_wet_mill float, forest_lce float, grease_fame float, hec_lce float, msw_dirty_ft_diesel float, msw_food_lce float, msw_paper_ft_diesel float, msw_paper_lce float, msw_wood_lce float, msw_yard_ft_diesel float, msw_yard_lce float, ovw_lce float, pulpwood_lce float, seed_oils_fame float);") r_hiencrop <- dbGetResult(query) dbClearResult(query) #results r_loencrop query <- dbSendQuery(conn, "select * from crosstab('select price_point, fstk_type||f_type as pathway , sum(quant_mgy*c.gal_per_bdt*d.energy_density_gge_per_gal)/1000 as Mgge from r_loencrop.brfn join model.runs using (run) join model.conversion_efficiency c on (f_type=c.tech and fstk_type=c.type) join model.technology d on (f_type=d.tech) where fstk_type NOT like ''%cost'' and fstk_type NOT like ''production'' and fstk_type not like ''credit'' group by price_point, pathway order by 1;','select distinct fstk_type||f_type from r_loencrop.brfn where fstk_type NOT like ''%cost'' and fstk_type NOT like ''production'' and fstk_type not like ''credit'' order by 1;') as ct ( price_point float, ag_res_lce float, animal_fats_fame float, corngrain_dry_mill float, corngrain_wet_mill float, forest_lce float, grease_fame float, hec_lce float, msw_dirty_ft_diesel float, msw_food_lce float, msw_paper_ft_diesel float, msw_paper_lce float, msw_wood_lce float, msw_yard_ft_diesel float, msw_yard_lce float, ovw_lce float, pulpwood_lce float, seed_oils_fame float);") r_loencrop <- dbGetResult(query) dbClearResult(query) # results r_maxfeed query <- dbSendQuery(conn, "select * from crosstab('select price_point, fstk_type||f_type as pathway , sum(quant_mgy*c.gal_per_bdt*d.energy_density_gge_per_gal)/1000 as Mgge from r_maxfeed.brfn join model.runs using (run) join model.conversion_efficiency c on (f_type=c.tech and fstk_type=c.type) join model.technology d on (f_type=d.tech) where fstk_type NOT like ''%cost'' and fstk_type NOT like ''production'' and fstk_type not like ''credit'' group by price_point, pathway order by 1;','select distinct fstk_type||f_type from r_maxfeed.brfn where fstk_type NOT like ''%cost'' and fstk_type NOT like ''production'' and fstk_type not like ''credit'' order by 1;') as ct ( price_point float, ag_res_lce float, animal_fats_fame float, corngrain_dry_mill float, corngrain_wet_mill float, forest_lce float, grease_fame float, hec_lce float, msw_dirty_ft_diesel float, msw_food_lce float, msw_paper_ft_diesel float, msw_paper_lce float, msw_wood_lce float, msw_yard_ft_diesel float, msw_yard_lce float, ovw_lce float, pulpwood_lce float, seed_oils_fame float);") r_maxfeed <- dbGetResult(query) dbClearResult(query) #create a list of results tables matrixlist <- list(r_baseline, r_badlce, r_cblend, r_fedforest, r_ffv, r_hiencrop, r_loencrop, r_maxfeed) #par(mfcol= c(5,5)) for (i in matrixlist){ ptable<-i[,-1]#create plot matrix w/o price point column ptable [is.na(ptable)] <- 0 #convert NA values to 0 maxval<-max(ptable) #create volume axis interval<-ceiling(maxval)/27 mgy<- seq(0,ceiling(maxval), by=interval) fprice <- c(i$price_point)#create price point axis #pdf(paste(i,"fuel_pw.pdf", sep=""), bg="white") matplot(ptable, fprice, type="l", col= rainbow(length(names(i)))) rm(i,ptable, fprice, maxval, interval, mgy) dev.off() } #legend(x=-1, y=.25, names(i), col = rainbow(length(names(i))), lty=1, ncol=3)
#plot4.R # Project 1.. Exploratory Data for Making Plots ################################################### setwd("E:/R1/ExData_Plotting1_test/data") unzip("exdata_data_household_power_consumption.zip") data <- read.table("household_power_consumption.txt", header = TRUE, sep = ";", na.string = "?") # #test add date and time data$DateTime <- paste(as.character(data$Date), as.character(data$Time), sep = " ") data$DateTime <- strptime(data$DateTime,"%d/%m/%Y %H:%M:%S") # convert the Date and Time variables to Date/Time classes data$Date <- as.Date(data$Date, "%d/%m/%Y") data <- subset(data, Date == "2007-02-01" | Date == "2007-02-02") #using data from the dates 2007-02-01 and 2007-02-02 ################################################### #Drawing Plot4 par(mfrow = c(2,2)) #plot top left plot(data$DateTime, data$Global_active_power, xlab= " ", ylab = "Global Active Power (kilowatts)") lines(data$DateTime, data$Global_active_power) #plot top right plot(data$DateTime, data$Voltage, xlab = "datetime", ylab = "Voltage") # plot bottom left with(data, plot(DateTime, Sub_metering_1, type = "n", ylab = "Energy sub metering", xlab = " ")) with(data, lines(DateTime, Sub_metering_1,type="l", col = "black")) with(data, lines(DateTime, Sub_metering_2,type="l", col = "red")) with(data, lines(DateTime, Sub_metering_3,type="l", col = "blue")) legend("topright", pch="_",col=c("black", "red", "blue"), legend=c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3")) #plot bottom right plot(data$DateTime, data$Global_reactive_power, xlab = "datetime", ylab = "Global_reactive_power", type = "n") lines(data$DateTime, data$Global_reactive_power) dev.copy(png, file = "plot4.png", width = 480, height = 480) dev.off()
/plot4.R
no_license
huskyr/ExData_Plotting1
R
false
false
1,754
r
#plot4.R # Project 1.. Exploratory Data for Making Plots ################################################### setwd("E:/R1/ExData_Plotting1_test/data") unzip("exdata_data_household_power_consumption.zip") data <- read.table("household_power_consumption.txt", header = TRUE, sep = ";", na.string = "?") # #test add date and time data$DateTime <- paste(as.character(data$Date), as.character(data$Time), sep = " ") data$DateTime <- strptime(data$DateTime,"%d/%m/%Y %H:%M:%S") # convert the Date and Time variables to Date/Time classes data$Date <- as.Date(data$Date, "%d/%m/%Y") data <- subset(data, Date == "2007-02-01" | Date == "2007-02-02") #using data from the dates 2007-02-01 and 2007-02-02 ################################################### #Drawing Plot4 par(mfrow = c(2,2)) #plot top left plot(data$DateTime, data$Global_active_power, xlab= " ", ylab = "Global Active Power (kilowatts)") lines(data$DateTime, data$Global_active_power) #plot top right plot(data$DateTime, data$Voltage, xlab = "datetime", ylab = "Voltage") # plot bottom left with(data, plot(DateTime, Sub_metering_1, type = "n", ylab = "Energy sub metering", xlab = " ")) with(data, lines(DateTime, Sub_metering_1,type="l", col = "black")) with(data, lines(DateTime, Sub_metering_2,type="l", col = "red")) with(data, lines(DateTime, Sub_metering_3,type="l", col = "blue")) legend("topright", pch="_",col=c("black", "red", "blue"), legend=c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3")) #plot bottom right plot(data$DateTime, data$Global_reactive_power, xlab = "datetime", ylab = "Global_reactive_power", type = "n") lines(data$DateTime, data$Global_reactive_power) dev.copy(png, file = "plot4.png", width = 480, height = 480) dev.off()
#!/usr/bin/env Rscript args = commandArgs(trailingOnly=TRUE) batch = args[1] #batch variable in the metadata slot if no batch fill in empty string QC_feature_min = as.numeric(args[2]) #Minimal features threshold QC_mt_max = as.numeric(args[3]) #Maximum mitochondrial content threshold pca_dims = as.numeric(args[4]) #Amount of PCA dimensions to use integrate = TRUE data = "temp/data.rds" #If data is already normalized or not, stored by check_seurat.R features_var = 2000 #Amount of variable features to select cluster_resolution = c(1) #At which resolutions to cluster the data object_path = "temp/raw.rds" #_raw.rds file cellMarker_path = "/gpfs01/home/glanl/scripts/IMMUcan/TME_markerGenes.xlsx" chetahClassifier_path = "/gpfs01/home/glanl/scripts/IMMUcan/CHETAH_reference_updatedAnnotation.RData" # Make and set directories dir <- getwd() setwd(dir) ifelse(!dir.exists("temp"), dir.create("temp"), FALSE) ifelse(!dir.exists("out"), dir.create("out"), FALSE) # Load packages and set environment library(Seurat) library(SingleCellExperiment) library(CHETAH) library(harmony) library(ggplot2) library(patchwork) library(Matrix) library(dplyr) library(WriteXLS) library(pheatmap) RNGkind(sample.kind = "Rounding") set.seed(111) # Recreate seurat object seurat <- readRDS(object_path) data <- readRDS(data) if (batch == "none") { print("NO BATCH SPECIFIED => NO INTEGRATION") batch = "orig.ident" integrate = FALSE } # QC cells_before_QC <- ncol(seurat) seurat[["percent.mt"]] <- PercentageFeatureSet(seurat, pattern = "^Mt\\.|^MT\\.|^mt\\.|^Mt-|^MT-|^mt-") p1 <- AugmentPlot(VlnPlot(seurat, features = "nFeature_RNA", pt.size = 0.1, group.by = batch, log = TRUE)) + NoLegend() + scale_y_log10("Genes", expand = c(0,0)) + geom_hline(yintercept = QC_feature_min, color = "red") + theme(axis.title.x = element_blank(), plot.title = element_blank(), axis.title.y = element_text(), axis.text.x = element_blank(), axis.ticks.x = element_blank()) p2 <- AugmentPlot(VlnPlot(seurat, features = "nCount_RNA", pt.size = 0.1, group.by = batch, log = TRUE)) + NoLegend() + scale_y_log10("Counts", expand = c(0,0)) + theme(axis.title.x = element_blank(), plot.title = element_blank(), axis.title.y = element_text(), axis.text.x = element_blank(), axis.ticks.x = element_blank()) p3 <- AugmentPlot(VlnPlot(seurat, features = "percent.mt", pt.size = 0.1, group.by = batch)) + NoLegend() + geom_hline(yintercept = QC_mt_max, color = "red") + scale_y_continuous("Mito", expand = c(0,0)) + theme(axis.title.x = element_blank(), plot.title = element_blank(), axis.title.y = element_text()) seurat <- subset(seurat, subset = nFeature_RNA > QC_feature_min & percent.mt < QC_mt_max) # Prepare if (data$norm == FALSE) { seurat <- Seurat::NormalizeData(seurat, verbose = TRUE) } seurat <- seurat %>% FindVariableFeatures(selection.method = "vst", nfeatures = features_var, verbose=TRUE) %>% ScaleData(verbose = TRUE) %>% RunPCA(pc.genes = seurat@var.genes, npcs = pca_dims+20, verbose = TRUE) %>% RunUMAP(dims = 1:pca_dims, a = .5, b = 1.2, verbose = TRUE) p4 <- ElbowPlot(seurat, ndims = pca_dims+20) + geom_vline(xintercept = pca_dims, color = "red") + ylab("STDEV PCA") + theme(axis.title.x = element_blank()) p <- p4 / p1 / p2 / p3 ggsave(plot = p, filename = "out/QC.png") p0 <- AugmentPlot(DimPlot(seurat, reduction = "umap", group.by = batch, pt.size = .1) + NoLegend() + ggtitle("Before harmony")) # Harmony if (integrate == TRUE) { p1 <- AugmentPlot(DimPlot(object = seurat, reduction = "pca", pt.size = .1, group.by = batch) + NoLegend()) p2 <- AugmentPlot(VlnPlot(object = seurat, features = "PC_1", group.by = batch, pt.size = .1) + NoLegend() + theme(plot.title = element_blank())) seurat <- seurat %>% RunHarmony(batch, plot_convergence = FALSE) p3 <- AugmentPlot(DimPlot(object = seurat, reduction = "harmony", pt.size = .1, group.by = batch) + NoLegend()) p4 <- AugmentPlot(VlnPlot(object = seurat, features = "harmony_1", group.by = batch, pt.size = .1) + NoLegend() + theme(plot.title = element_blank())) # Dimensionality reduction and clustering seurat <- seurat %>% RunUMAP(reduction = "harmony", dims = 1:pca_dims, a = .5, b = 1.2, verbose = TRUE) %>% RunTSNE(reduction = "harmony", dims = 1:pca_dims, check_duplicates = FALSE) %>% FindNeighbors(reduction = "harmony", dims = 1:pca_dims, verbose = TRUE) %>% FindClusters(resolution = cluster_resolution, verbose = TRUE) %>% identity() p5 <- AugmentPlot(DimPlot(seurat, reduction = "umap", group.by = batch, pt.size = .1) + NoLegend() + ggtitle("After harmony")) p <- (p0 | p5) / (p1 | p3) / (p2 | p4) ggsave(plot = p, filename = "out/Harmony.png") } else { seurat <- seurat %>% RunUMAP(reduction = "pca", dims = 1:pca_dims, a = .5, b = 1.2, verbose = TRUE) %>% RunTSNE(reduction = "pca", dims = 1:pca_dims, check_duplicates = FALSE) %>% FindNeighbors(reduction = "pca", dims = 1:pca_dims, verbose = TRUE) %>% FindClusters(resolution = cluster_resolution, verbose = TRUE) %>% identity() } # Supervised annotation load(chetahClassifier_path) input <- SingleCellExperiment(assays = list(counts = seurat[["RNA"]]@data), reducedDims = SimpleList(TSNE = seurat@reductions$umap@cell.embeddings)) input <- CHETAHclassifier(input = input, ref_cells = reference, n_genes = 500, thresh = 0.05) p1 <- PlotCHETAH(input, return = TRUE) nodes <- c("Node1" = "Immune", "Node2" = "Immune", "Node3" = "Lymphoid", "Node4" = "Lymphoid", "Node5" = "NKT", "Node6" = "T", "Node7" = "T", "Node8" = "Myeloid", "Node9" = "Macro/DC", "Node10"= "Stromal", "Node11" = "Stromal") input$celltype_CHETAH <- plyr::revalue(input$celltype_CHETAH, replace = nodes[names(nodes) %in% input$celltype_CHETAH]) seurat@meta.data$annotation_CHETAH <- input$celltype_CHETAH ggsave(plot = p1, filename = "out/CHETAH_classification.pdf", height = 6, width = 12) # Split object #Tcells <- c("T", "CD4 T cell", "CD8 T cell", "NK", "NKT", "reg. T cell") #Myeloid <- c("Myeloid", "Macro/DC", "Macrophage", "Dendritic") #seurat_T <- seurat[, seurat$annotation_CHETAH %in% Tcells] #seurat_myeloid <- seurat[, seurat$annotation_CHETAH %in% Myeloid] #seurat_T <- seurat_T %>% # FindVariableFeatures(selection.method = "vst", nfeatures = 2000, verbose=TRUE) %>% # ScaleData(verbose = TRUE) %>% # RunPCA(npcs = 30, verbose = TRUE) %>% # RunUMAP(reduction = "harmony", dims = 1:10, a = .5, b = 1.2, verbose = TRUE) %>% # #RunTSNE(reduction = "harmony", dims = 1:pca_dims, check_duplicates = FALSE) #%>% # FindNeighbors(reduction = "harmony", dims = 1:10, verbose = TRUE) %>% # FindClusters(resolution = 0.8, verbose = TRUE) %>% # identity() # Plot cell markers cell.markers <- readxl::read_excel(cellMarker_path) markers <- list() for (i in as.character(na.omit(unique(cell.markers$cell_type)))) { temp <- rownames(seurat)[rownames(seurat) %in% na.omit(cell.markers[cell.markers$cell_type == i, "gene", drop = TRUE])] if (length(temp) > 0) { markers[[i]] <- temp } } temp <- AddModuleScore(seurat, features = markers) p <- DotPlot(temp, features = colnames(temp@meta.data)[grepl("Cluster[[:digit:]]", colnames(temp@meta.data))], cluster.idents = TRUE) + scale_x_discrete(labels = names(markers)) + RotatedAxis() ggsave(plot = p, filename = "temp/Dotplot_seuratClusters_geneModules.png", dpi = 100, height = 12, width = 12) p0 <- DotPlot(seurat, features = unique(cell.markers$gene), group.by = "seurat_clusters", cluster.idents = TRUE) + coord_flip() + NoLegend() WriteXLS(x = list("annotation" = tibble("seurat_clusters" = 0:(length(unique(seurat$seurat_clusters))-1), "abbreviation" = "Fill in")), ExcelFileName = "out/annotation.xls") ggsave(plot = p0, filename = "temp/Dotplot_seuratClusters_genes.png", dpi = 100, height = 12, width = 12) p1 <- AugmentPlot(DimPlot(seurat, group.by = "seurat_clusters", label = TRUE, label.size = 12)) cell.markers <- cell.markers[cell.markers$gene %in% rownames(seurat), ] for (type in unique(cell.markers$category)) { p2 <- FeaturePlot(seurat, features = unique(cell.markers[cell.markers$category == type, ]$gene), pt.size = .1) p3 <- DotPlot(seurat, features = unique(cell.markers[cell.markers$category == type, ]$gene), group.by = "seurat_clusters", cluster.idents = TRUE) + coord_flip() + NoLegend() layout <- " ACC BBB BBB " p <- p1 + p2 + p3 + plot_layout(design = layout) ggsave(plot = p, filename = paste0("temp/", type, ".png"), height = 30, width = 20, dpi = 100) } temp <- table(seurat$seurat_clusters, seurat$annotation_CHETAH) temp <- apply(temp, 1, function(x) x / sum(x)) pheatmap::pheatmap(temp, filename = "temp/cluster_comparison.pdf") # Summary statistics harmony_summary = data.frame( "Input_file" = object_path, "Batch" = batch, "QC_features_min" = QC_feature_min, "QC_mito_max" = QC_mt_max, "Variable_features" = features_var, "PCA_dimensions" = pca_dims, "Amount_genes" = nrow(seurat), "Genes_detected_per_cell" = median(seurat@meta.data$nFeature_RNA), "Cells_before_QC" = cells_before_QC, "Cells_after_QC" = ncol(seurat) ) seurat@misc <- list(harmony_summary) write.csv(x = harmony_summary, file = "out/harmony_summary.csv", row.names = FALSE) # Save RDS and convert to h5ad with seuratdisk saveRDS(seurat, paste0("temp/harmony.rds"))
/scProcessor_1.R
no_license
soumelis-lab/IMMUcan
R
false
false
9,402
r
#!/usr/bin/env Rscript args = commandArgs(trailingOnly=TRUE) batch = args[1] #batch variable in the metadata slot if no batch fill in empty string QC_feature_min = as.numeric(args[2]) #Minimal features threshold QC_mt_max = as.numeric(args[3]) #Maximum mitochondrial content threshold pca_dims = as.numeric(args[4]) #Amount of PCA dimensions to use integrate = TRUE data = "temp/data.rds" #If data is already normalized or not, stored by check_seurat.R features_var = 2000 #Amount of variable features to select cluster_resolution = c(1) #At which resolutions to cluster the data object_path = "temp/raw.rds" #_raw.rds file cellMarker_path = "/gpfs01/home/glanl/scripts/IMMUcan/TME_markerGenes.xlsx" chetahClassifier_path = "/gpfs01/home/glanl/scripts/IMMUcan/CHETAH_reference_updatedAnnotation.RData" # Make and set directories dir <- getwd() setwd(dir) ifelse(!dir.exists("temp"), dir.create("temp"), FALSE) ifelse(!dir.exists("out"), dir.create("out"), FALSE) # Load packages and set environment library(Seurat) library(SingleCellExperiment) library(CHETAH) library(harmony) library(ggplot2) library(patchwork) library(Matrix) library(dplyr) library(WriteXLS) library(pheatmap) RNGkind(sample.kind = "Rounding") set.seed(111) # Recreate seurat object seurat <- readRDS(object_path) data <- readRDS(data) if (batch == "none") { print("NO BATCH SPECIFIED => NO INTEGRATION") batch = "orig.ident" integrate = FALSE } # QC cells_before_QC <- ncol(seurat) seurat[["percent.mt"]] <- PercentageFeatureSet(seurat, pattern = "^Mt\\.|^MT\\.|^mt\\.|^Mt-|^MT-|^mt-") p1 <- AugmentPlot(VlnPlot(seurat, features = "nFeature_RNA", pt.size = 0.1, group.by = batch, log = TRUE)) + NoLegend() + scale_y_log10("Genes", expand = c(0,0)) + geom_hline(yintercept = QC_feature_min, color = "red") + theme(axis.title.x = element_blank(), plot.title = element_blank(), axis.title.y = element_text(), axis.text.x = element_blank(), axis.ticks.x = element_blank()) p2 <- AugmentPlot(VlnPlot(seurat, features = "nCount_RNA", pt.size = 0.1, group.by = batch, log = TRUE)) + NoLegend() + scale_y_log10("Counts", expand = c(0,0)) + theme(axis.title.x = element_blank(), plot.title = element_blank(), axis.title.y = element_text(), axis.text.x = element_blank(), axis.ticks.x = element_blank()) p3 <- AugmentPlot(VlnPlot(seurat, features = "percent.mt", pt.size = 0.1, group.by = batch)) + NoLegend() + geom_hline(yintercept = QC_mt_max, color = "red") + scale_y_continuous("Mito", expand = c(0,0)) + theme(axis.title.x = element_blank(), plot.title = element_blank(), axis.title.y = element_text()) seurat <- subset(seurat, subset = nFeature_RNA > QC_feature_min & percent.mt < QC_mt_max) # Prepare if (data$norm == FALSE) { seurat <- Seurat::NormalizeData(seurat, verbose = TRUE) } seurat <- seurat %>% FindVariableFeatures(selection.method = "vst", nfeatures = features_var, verbose=TRUE) %>% ScaleData(verbose = TRUE) %>% RunPCA(pc.genes = seurat@var.genes, npcs = pca_dims+20, verbose = TRUE) %>% RunUMAP(dims = 1:pca_dims, a = .5, b = 1.2, verbose = TRUE) p4 <- ElbowPlot(seurat, ndims = pca_dims+20) + geom_vline(xintercept = pca_dims, color = "red") + ylab("STDEV PCA") + theme(axis.title.x = element_blank()) p <- p4 / p1 / p2 / p3 ggsave(plot = p, filename = "out/QC.png") p0 <- AugmentPlot(DimPlot(seurat, reduction = "umap", group.by = batch, pt.size = .1) + NoLegend() + ggtitle("Before harmony")) # Harmony if (integrate == TRUE) { p1 <- AugmentPlot(DimPlot(object = seurat, reduction = "pca", pt.size = .1, group.by = batch) + NoLegend()) p2 <- AugmentPlot(VlnPlot(object = seurat, features = "PC_1", group.by = batch, pt.size = .1) + NoLegend() + theme(plot.title = element_blank())) seurat <- seurat %>% RunHarmony(batch, plot_convergence = FALSE) p3 <- AugmentPlot(DimPlot(object = seurat, reduction = "harmony", pt.size = .1, group.by = batch) + NoLegend()) p4 <- AugmentPlot(VlnPlot(object = seurat, features = "harmony_1", group.by = batch, pt.size = .1) + NoLegend() + theme(plot.title = element_blank())) # Dimensionality reduction and clustering seurat <- seurat %>% RunUMAP(reduction = "harmony", dims = 1:pca_dims, a = .5, b = 1.2, verbose = TRUE) %>% RunTSNE(reduction = "harmony", dims = 1:pca_dims, check_duplicates = FALSE) %>% FindNeighbors(reduction = "harmony", dims = 1:pca_dims, verbose = TRUE) %>% FindClusters(resolution = cluster_resolution, verbose = TRUE) %>% identity() p5 <- AugmentPlot(DimPlot(seurat, reduction = "umap", group.by = batch, pt.size = .1) + NoLegend() + ggtitle("After harmony")) p <- (p0 | p5) / (p1 | p3) / (p2 | p4) ggsave(plot = p, filename = "out/Harmony.png") } else { seurat <- seurat %>% RunUMAP(reduction = "pca", dims = 1:pca_dims, a = .5, b = 1.2, verbose = TRUE) %>% RunTSNE(reduction = "pca", dims = 1:pca_dims, check_duplicates = FALSE) %>% FindNeighbors(reduction = "pca", dims = 1:pca_dims, verbose = TRUE) %>% FindClusters(resolution = cluster_resolution, verbose = TRUE) %>% identity() } # Supervised annotation load(chetahClassifier_path) input <- SingleCellExperiment(assays = list(counts = seurat[["RNA"]]@data), reducedDims = SimpleList(TSNE = seurat@reductions$umap@cell.embeddings)) input <- CHETAHclassifier(input = input, ref_cells = reference, n_genes = 500, thresh = 0.05) p1 <- PlotCHETAH(input, return = TRUE) nodes <- c("Node1" = "Immune", "Node2" = "Immune", "Node3" = "Lymphoid", "Node4" = "Lymphoid", "Node5" = "NKT", "Node6" = "T", "Node7" = "T", "Node8" = "Myeloid", "Node9" = "Macro/DC", "Node10"= "Stromal", "Node11" = "Stromal") input$celltype_CHETAH <- plyr::revalue(input$celltype_CHETAH, replace = nodes[names(nodes) %in% input$celltype_CHETAH]) seurat@meta.data$annotation_CHETAH <- input$celltype_CHETAH ggsave(plot = p1, filename = "out/CHETAH_classification.pdf", height = 6, width = 12) # Split object #Tcells <- c("T", "CD4 T cell", "CD8 T cell", "NK", "NKT", "reg. T cell") #Myeloid <- c("Myeloid", "Macro/DC", "Macrophage", "Dendritic") #seurat_T <- seurat[, seurat$annotation_CHETAH %in% Tcells] #seurat_myeloid <- seurat[, seurat$annotation_CHETAH %in% Myeloid] #seurat_T <- seurat_T %>% # FindVariableFeatures(selection.method = "vst", nfeatures = 2000, verbose=TRUE) %>% # ScaleData(verbose = TRUE) %>% # RunPCA(npcs = 30, verbose = TRUE) %>% # RunUMAP(reduction = "harmony", dims = 1:10, a = .5, b = 1.2, verbose = TRUE) %>% # #RunTSNE(reduction = "harmony", dims = 1:pca_dims, check_duplicates = FALSE) #%>% # FindNeighbors(reduction = "harmony", dims = 1:10, verbose = TRUE) %>% # FindClusters(resolution = 0.8, verbose = TRUE) %>% # identity() # Plot cell markers cell.markers <- readxl::read_excel(cellMarker_path) markers <- list() for (i in as.character(na.omit(unique(cell.markers$cell_type)))) { temp <- rownames(seurat)[rownames(seurat) %in% na.omit(cell.markers[cell.markers$cell_type == i, "gene", drop = TRUE])] if (length(temp) > 0) { markers[[i]] <- temp } } temp <- AddModuleScore(seurat, features = markers) p <- DotPlot(temp, features = colnames(temp@meta.data)[grepl("Cluster[[:digit:]]", colnames(temp@meta.data))], cluster.idents = TRUE) + scale_x_discrete(labels = names(markers)) + RotatedAxis() ggsave(plot = p, filename = "temp/Dotplot_seuratClusters_geneModules.png", dpi = 100, height = 12, width = 12) p0 <- DotPlot(seurat, features = unique(cell.markers$gene), group.by = "seurat_clusters", cluster.idents = TRUE) + coord_flip() + NoLegend() WriteXLS(x = list("annotation" = tibble("seurat_clusters" = 0:(length(unique(seurat$seurat_clusters))-1), "abbreviation" = "Fill in")), ExcelFileName = "out/annotation.xls") ggsave(plot = p0, filename = "temp/Dotplot_seuratClusters_genes.png", dpi = 100, height = 12, width = 12) p1 <- AugmentPlot(DimPlot(seurat, group.by = "seurat_clusters", label = TRUE, label.size = 12)) cell.markers <- cell.markers[cell.markers$gene %in% rownames(seurat), ] for (type in unique(cell.markers$category)) { p2 <- FeaturePlot(seurat, features = unique(cell.markers[cell.markers$category == type, ]$gene), pt.size = .1) p3 <- DotPlot(seurat, features = unique(cell.markers[cell.markers$category == type, ]$gene), group.by = "seurat_clusters", cluster.idents = TRUE) + coord_flip() + NoLegend() layout <- " ACC BBB BBB " p <- p1 + p2 + p3 + plot_layout(design = layout) ggsave(plot = p, filename = paste0("temp/", type, ".png"), height = 30, width = 20, dpi = 100) } temp <- table(seurat$seurat_clusters, seurat$annotation_CHETAH) temp <- apply(temp, 1, function(x) x / sum(x)) pheatmap::pheatmap(temp, filename = "temp/cluster_comparison.pdf") # Summary statistics harmony_summary = data.frame( "Input_file" = object_path, "Batch" = batch, "QC_features_min" = QC_feature_min, "QC_mito_max" = QC_mt_max, "Variable_features" = features_var, "PCA_dimensions" = pca_dims, "Amount_genes" = nrow(seurat), "Genes_detected_per_cell" = median(seurat@meta.data$nFeature_RNA), "Cells_before_QC" = cells_before_QC, "Cells_after_QC" = ncol(seurat) ) seurat@misc <- list(harmony_summary) write.csv(x = harmony_summary, file = "out/harmony_summary.csv", row.names = FALSE) # Save RDS and convert to h5ad with seuratdisk saveRDS(seurat, paste0("temp/harmony.rds"))
#Q3 data_got=read.csv("character-deaths.csv") #Q4 colnames(data_got) #Q5 summary(data_got) #random comments on descriptive statistics: #All characters die between the years 297 and 299, #the median death year is 299 and the mean 299.2 #Out of 917 characters 612 are still alive, that's only about 2/3! #Considering there mediocre importance in the books (at least compared to #Starks and Lannisters) there are surprisingly many Greyjoys out there (51)! # The mean book of death is 2.928, so this seems quite nicely distributed. #The mean gender is 0.8288, so there are clearly too many men in GoT! #Q6 data_got[839,] data_got2[,"Death.Year"] #I assume that charactter who are still alive at the year 300 #live for 25 years more data_got2=read.csv("character-deaths.csv") colnames(data_got2) data_got2[is.na(data_got2)]=325 require(data.table) data_got3=data.table(data_got2,key = "Allegiances") data_got3[,Death.Year.by.Allegiances := mean(Death.Year), by=Allegiances] data_got3 ######### nchaptersb1 <- 73 nchaptersb2 <- sum(nchaptersb1 + 70) nchaptersb3 <- sum(nchaptersb2 + 82) nchaptersb4 <- sum(nchaptersb3 + 46) nchaptersb5 <- sum(nchaptersb4 + 73) nchaptersb5 data_got4=read.csv("character-deaths.csv") data_got4["Cumulative.Death.Chapter"] <- NA if(data_got4$Book.of.Death == 1){ data_got4$Cumulative.Death.Chapter <- data_got4$Death.Chapter }else if(data_got4$Book.of.Death == 2){ data_got4$Cumulative.Death.Chapter <- data_got4$Death.Chapter + nchaptersb1 }else if(data_got4$Book.of.Death == 3){ data_got4$Cumulative.Death.Chapter <- data_got4$Death.Chapter + nchaptersb2 }else if(data_got4$Book.of.Death == 4){ data_got4$Cumulative.Death.Chapter <- data_got4$Death.Chapter + nchaptersb3 }else if(data_got4$Book.of.Death == 5){ data_got4$Cumulative.Death.Chapter <- data_got4$Death.Chapter + nchaptersb4 }else if (data_got4[, "Book.of.Death"][is.na(data_got4[, "Book.of.Death"])]){ data_got4$Cumulative.Death.Chapter <- nchaptersb5 } data_got4[, "Book.of.Death"][is.na(data_got4[, "Book.of.Death"])] <- nchaptersb5 ######## cdc <- function(x){ ifelse (data_got4$Book.of.Death == 1) y <- data_got4$Death.Chapter if(data_got4$Book.of.Death == 2) y <- data_got4$Death.Chapter + nchaptersb1 if(data_got4$Book.of.Death == 3) y <- data_got4$Death.Chapter + nchaptersb2 if(data_got4$Book.of.Death == 4) y <- data_got4$Death.Chapter + nchaptersb3 if(data_got4$Book.of.Death == 5) y <- data_got4$Death.Chapter + nchaptersb4 if(data_got4[, "Book.of.Death"][is.na(data_got4[, "Book.of.Death"])]) y <- NA return(y) } data_got4$Cumulative.Death.Chapter <- sapply(data_got4$Book.of.Death, cdc) ########### THIS ONE WORKS: #Game of Codes #A Song of R and Python #Made by Daniel Barreto and Johannes Seebauer #First, we create a new variable to measure for how many chapters #each character has survived. To do so we define the following #variables as the accumulated number of chapters at the end of each book. nchaptersb1 <- 73 nchaptersb2 <- sum(nchaptersb1 + 70) nchaptersb3 <- sum(nchaptersb2 + 82) nchaptersb4 <- sum(nchaptersb3 + 46) nchaptersb5 <- sum(nchaptersb4 + 73) #One problem we have in our data sheet is that for some observations #it is specified the book in which the character dies, but not the chapter. #We fix this by assuming that in all of those cases the character died at the prologue. data_got4=read.csv("character-deaths.csv") data_got4$Death.Chapter <- with(data_got4, ifelse(!is.na(Book.of.Death) & is.na(Death.Chapter),0, Death.Chapter)) #Another problem we faced is that in the column "Allegiances" in our data there was both #entries for "House Tully" and simply "Tully", for example. We solve this by: data_got4$Allegiances <- lapply(data_got4$Allegiances,gsub,pattern="House ",replacement="") #Then we create a column in our data sheet named "Cumulative Death Chapter" #and fill it up. data_got4["Cumulative.Death.Chapter"] <- NA data_got4$Cumulative.Death.Chapter <- with(data_got4, ifelse(Book.of.Death == 1, Death.Chapter, ifelse(Book.of.Death == 2, Death.Chapter + nchaptersb1, ifelse(Book.of.Death == 3, Death.Chapter + nchaptersb2, ifelse(Book.of.Death == 4, Death.Chapter + nchaptersb3, ifelse(Book.of.Death == 5, Death.Chapter + nchaptersb4, NA)))))) #In order to be able to compute the life expectancy of the characters, #we'll now replace the NA's for nchapterb5 + 50. That is, we assume that #50 chapters into book 6 Cersei is going to kill everyone using the wildfire. data_got4$Cumulative.Death.Chapter<- with(data_got4, ifelse(is.na(Cumulative.Death.Chapter),nchaptersb5+50, Cumulative.Death.Chapter)) # Creating a function for life expectancy conditional on allegiance. #' Life expectancy by allegiance #' @param x String: The name of the house #' @seealso \code{\link{mean}} which this function wraps #' @export lexpectancy.allegiance <- function(x){ mean(data_got4[data_got4$Allegiances==x, "Cumulative.Death.Chapter"]) } lexpectancy.allegiance("Lannister") lexpectancy.allegiance("Stark") lexpectancy.allegiance("Night's Watch") lexpectancy.allegiance("Tully") lexpectancy.allegiance("Wildling") lexpectancy.allegiance("None") #Creating a function for life expectancy conditional on gender. #' Life expectancy by gender #' @param x Dummy: 0 for female and 1 for male #' @seealso \code{\link{mean}} which this function wraps #' @export lexpectancy.gender <- function(x){ mean(data_got4[data_got4$Gender==x, "Cumulative.Death.Chapter"]) } lexpectancy.gender(0) lexpectancy.gender(1) #Creating a function for life expectancy conditional on nobility (0 for not noble and 1 for noble). #' Life expectancy by nobility #' @param x Dummy: 0 for not noble and 1 for noble #' @seealso \code{\link{mean}} which this function wraps #' @export lexpectancy.nobility <- function(x){ mean(data_got4[data_got4$Nobility==x, "Cumulative.Death.Chapter"]) } lexpectancy.nobility(0) lexpectancy.nobility(1) library(devtools) library(roxygen2) devtools::create("Game.of.Codes")
/HW_7.R
no_license
daniel-barreto/Programming-SCPO
R
false
false
6,180
r
#Q3 data_got=read.csv("character-deaths.csv") #Q4 colnames(data_got) #Q5 summary(data_got) #random comments on descriptive statistics: #All characters die between the years 297 and 299, #the median death year is 299 and the mean 299.2 #Out of 917 characters 612 are still alive, that's only about 2/3! #Considering there mediocre importance in the books (at least compared to #Starks and Lannisters) there are surprisingly many Greyjoys out there (51)! # The mean book of death is 2.928, so this seems quite nicely distributed. #The mean gender is 0.8288, so there are clearly too many men in GoT! #Q6 data_got[839,] data_got2[,"Death.Year"] #I assume that charactter who are still alive at the year 300 #live for 25 years more data_got2=read.csv("character-deaths.csv") colnames(data_got2) data_got2[is.na(data_got2)]=325 require(data.table) data_got3=data.table(data_got2,key = "Allegiances") data_got3[,Death.Year.by.Allegiances := mean(Death.Year), by=Allegiances] data_got3 ######### nchaptersb1 <- 73 nchaptersb2 <- sum(nchaptersb1 + 70) nchaptersb3 <- sum(nchaptersb2 + 82) nchaptersb4 <- sum(nchaptersb3 + 46) nchaptersb5 <- sum(nchaptersb4 + 73) nchaptersb5 data_got4=read.csv("character-deaths.csv") data_got4["Cumulative.Death.Chapter"] <- NA if(data_got4$Book.of.Death == 1){ data_got4$Cumulative.Death.Chapter <- data_got4$Death.Chapter }else if(data_got4$Book.of.Death == 2){ data_got4$Cumulative.Death.Chapter <- data_got4$Death.Chapter + nchaptersb1 }else if(data_got4$Book.of.Death == 3){ data_got4$Cumulative.Death.Chapter <- data_got4$Death.Chapter + nchaptersb2 }else if(data_got4$Book.of.Death == 4){ data_got4$Cumulative.Death.Chapter <- data_got4$Death.Chapter + nchaptersb3 }else if(data_got4$Book.of.Death == 5){ data_got4$Cumulative.Death.Chapter <- data_got4$Death.Chapter + nchaptersb4 }else if (data_got4[, "Book.of.Death"][is.na(data_got4[, "Book.of.Death"])]){ data_got4$Cumulative.Death.Chapter <- nchaptersb5 } data_got4[, "Book.of.Death"][is.na(data_got4[, "Book.of.Death"])] <- nchaptersb5 ######## cdc <- function(x){ ifelse (data_got4$Book.of.Death == 1) y <- data_got4$Death.Chapter if(data_got4$Book.of.Death == 2) y <- data_got4$Death.Chapter + nchaptersb1 if(data_got4$Book.of.Death == 3) y <- data_got4$Death.Chapter + nchaptersb2 if(data_got4$Book.of.Death == 4) y <- data_got4$Death.Chapter + nchaptersb3 if(data_got4$Book.of.Death == 5) y <- data_got4$Death.Chapter + nchaptersb4 if(data_got4[, "Book.of.Death"][is.na(data_got4[, "Book.of.Death"])]) y <- NA return(y) } data_got4$Cumulative.Death.Chapter <- sapply(data_got4$Book.of.Death, cdc) ########### THIS ONE WORKS: #Game of Codes #A Song of R and Python #Made by Daniel Barreto and Johannes Seebauer #First, we create a new variable to measure for how many chapters #each character has survived. To do so we define the following #variables as the accumulated number of chapters at the end of each book. nchaptersb1 <- 73 nchaptersb2 <- sum(nchaptersb1 + 70) nchaptersb3 <- sum(nchaptersb2 + 82) nchaptersb4 <- sum(nchaptersb3 + 46) nchaptersb5 <- sum(nchaptersb4 + 73) #One problem we have in our data sheet is that for some observations #it is specified the book in which the character dies, but not the chapter. #We fix this by assuming that in all of those cases the character died at the prologue. data_got4=read.csv("character-deaths.csv") data_got4$Death.Chapter <- with(data_got4, ifelse(!is.na(Book.of.Death) & is.na(Death.Chapter),0, Death.Chapter)) #Another problem we faced is that in the column "Allegiances" in our data there was both #entries for "House Tully" and simply "Tully", for example. We solve this by: data_got4$Allegiances <- lapply(data_got4$Allegiances,gsub,pattern="House ",replacement="") #Then we create a column in our data sheet named "Cumulative Death Chapter" #and fill it up. data_got4["Cumulative.Death.Chapter"] <- NA data_got4$Cumulative.Death.Chapter <- with(data_got4, ifelse(Book.of.Death == 1, Death.Chapter, ifelse(Book.of.Death == 2, Death.Chapter + nchaptersb1, ifelse(Book.of.Death == 3, Death.Chapter + nchaptersb2, ifelse(Book.of.Death == 4, Death.Chapter + nchaptersb3, ifelse(Book.of.Death == 5, Death.Chapter + nchaptersb4, NA)))))) #In order to be able to compute the life expectancy of the characters, #we'll now replace the NA's for nchapterb5 + 50. That is, we assume that #50 chapters into book 6 Cersei is going to kill everyone using the wildfire. data_got4$Cumulative.Death.Chapter<- with(data_got4, ifelse(is.na(Cumulative.Death.Chapter),nchaptersb5+50, Cumulative.Death.Chapter)) # Creating a function for life expectancy conditional on allegiance. #' Life expectancy by allegiance #' @param x String: The name of the house #' @seealso \code{\link{mean}} which this function wraps #' @export lexpectancy.allegiance <- function(x){ mean(data_got4[data_got4$Allegiances==x, "Cumulative.Death.Chapter"]) } lexpectancy.allegiance("Lannister") lexpectancy.allegiance("Stark") lexpectancy.allegiance("Night's Watch") lexpectancy.allegiance("Tully") lexpectancy.allegiance("Wildling") lexpectancy.allegiance("None") #Creating a function for life expectancy conditional on gender. #' Life expectancy by gender #' @param x Dummy: 0 for female and 1 for male #' @seealso \code{\link{mean}} which this function wraps #' @export lexpectancy.gender <- function(x){ mean(data_got4[data_got4$Gender==x, "Cumulative.Death.Chapter"]) } lexpectancy.gender(0) lexpectancy.gender(1) #Creating a function for life expectancy conditional on nobility (0 for not noble and 1 for noble). #' Life expectancy by nobility #' @param x Dummy: 0 for not noble and 1 for noble #' @seealso \code{\link{mean}} which this function wraps #' @export lexpectancy.nobility <- function(x){ mean(data_got4[data_got4$Nobility==x, "Cumulative.Death.Chapter"]) } lexpectancy.nobility(0) lexpectancy.nobility(1) library(devtools) library(roxygen2) devtools::create("Game.of.Codes")
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/interval2criterio.R \name{interval2criterio} \alias{interval2criterio} \title{Title} \usage{ interval2criterio(ts, intervalos) } \arguments{ \item{ts}{} \item{intervalos}{} } \description{ Title }
/man/interval2criterio.Rd
no_license
fjbaron/accelerator
R
false
true
276
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/interval2criterio.R \name{interval2criterio} \alias{interval2criterio} \title{Title} \usage{ interval2criterio(ts, intervalos) } \arguments{ \item{ts}{} \item{intervalos}{} } \description{ Title }
.libPaths() .libPaths("E:/R") getwd() setwd("E:/bungae") install.packages("tree") library(tree) library(ISLR) attach(Carseats)#자료 carseats사용->NAs introduced by coercion library(MASS) attach(Boston)# 대체 자료 Boston 사용 head(Boston) summary(Boston$medv) High=ifelse(medv<=22,"NO","Yes")#8을 초과하면 yes, 그렇지 않으면 no tree.Boston=tree(medv~.,Boston) #High->이진분류 사용시 NAs introduced by coercion 발생 summary(tree.Boston) plot(tree.Boston) text(tree.Boston,pretty=0)#lstat은 사회 경제적 지위가 낮은 사람들의 백분률을 측정 # -> tree는 tree.Boston#터미널 노드로 이어지는 가지에는 별표가 표시 #분류 트리의 성능을 평가하기 위해서는 훈련오차가 아니라 검정오차를 추정해야함 # train set, test set으로 분할 후 성능을 test set으로 평가 set.seed(2) train=sample(1:nrow(Boston),nrow(Boston)/2) tree.Boston2=tree(medv~.,Boston,subset=train) summary(tree.Boston2) plot(tree.Boston2) text(tree.Boston2,pretty=0) # lstat=사회경제적 지위가 늦은 사람들의 백분율을 측정 # lstat가 낮을수록 높은 주택가격에 대응됨 # 마지막 결과가 주택가격 cv.boston=cv.tree(tree.Boston2)# cv.tree함수로 트리 pruning 성능 개선 가능 plot(cv.boston$size,cv.boston$dev,type="b") # 위 경우 가장 복잡한 tree가 cv에 의해 선택됨 # tree pruning을 원할경우 prune.tree() 함수를 사용가능 prune.boston=prune.tree(tree.Boston2,best=5) plot(prune.boston) text(prune.boston,pretty=0) yhat=predict(tree.Boston2,newdata=Boston[-train,]) boston.test=Boston[-train,'medv'] plot(yhat,boston.test) abline(0,1) mean((yhat-boston.test)^2) sqrt(21.45) # test set의 mse=21.45, 제곱근은 4.6이다 # -> test set에 대한 모델의 예측값이 교외지역 실제 메디안 주택가격의 4600$안에 있다
/ISLR_tree_reg_classification.R
no_license
Jangsehawn/ISLR
R
false
false
1,938
r
.libPaths() .libPaths("E:/R") getwd() setwd("E:/bungae") install.packages("tree") library(tree) library(ISLR) attach(Carseats)#자료 carseats사용->NAs introduced by coercion library(MASS) attach(Boston)# 대체 자료 Boston 사용 head(Boston) summary(Boston$medv) High=ifelse(medv<=22,"NO","Yes")#8을 초과하면 yes, 그렇지 않으면 no tree.Boston=tree(medv~.,Boston) #High->이진분류 사용시 NAs introduced by coercion 발생 summary(tree.Boston) plot(tree.Boston) text(tree.Boston,pretty=0)#lstat은 사회 경제적 지위가 낮은 사람들의 백분률을 측정 # -> tree는 tree.Boston#터미널 노드로 이어지는 가지에는 별표가 표시 #분류 트리의 성능을 평가하기 위해서는 훈련오차가 아니라 검정오차를 추정해야함 # train set, test set으로 분할 후 성능을 test set으로 평가 set.seed(2) train=sample(1:nrow(Boston),nrow(Boston)/2) tree.Boston2=tree(medv~.,Boston,subset=train) summary(tree.Boston2) plot(tree.Boston2) text(tree.Boston2,pretty=0) # lstat=사회경제적 지위가 늦은 사람들의 백분율을 측정 # lstat가 낮을수록 높은 주택가격에 대응됨 # 마지막 결과가 주택가격 cv.boston=cv.tree(tree.Boston2)# cv.tree함수로 트리 pruning 성능 개선 가능 plot(cv.boston$size,cv.boston$dev,type="b") # 위 경우 가장 복잡한 tree가 cv에 의해 선택됨 # tree pruning을 원할경우 prune.tree() 함수를 사용가능 prune.boston=prune.tree(tree.Boston2,best=5) plot(prune.boston) text(prune.boston,pretty=0) yhat=predict(tree.Boston2,newdata=Boston[-train,]) boston.test=Boston[-train,'medv'] plot(yhat,boston.test) abline(0,1) mean((yhat-boston.test)^2) sqrt(21.45) # test set의 mse=21.45, 제곱근은 4.6이다 # -> test set에 대한 모델의 예측값이 교외지역 실제 메디안 주택가격의 4600$안에 있다
df1<-read.csv('winequality-red.csv',sep = ';') df2<-read.csv('winequality-white.csv',sep = ';') head(df1) head(df2) df1$label<-sapply(df1$pH,function(x){'red'}) df2$label<-sapply(df2$pH,function(x){'white'}) head(df1) head(df2) wine<-rbind(df1,df2) head(wine) str(wine) ##Exploring Data ##Histogram of residual sugar from the wine data. Color by red and white wines library(ggplot2) pl<-ggplot(wine,aes(residual.sugar))+geom_histogram(aes(fill=label),bins = 50,color='black') print(pl) ## Histogram of citric.acid from the wine data. Color by red and white wines pl2<-ggplot(wine,aes(citric.acid))+geom_histogram(aes(fill=label),color='black',bins = 50) print(pl2) ## Histogram of alcohol from the wine data. Color by red and white wines pl3<-ggplot(wine,aes(alcohol))+geom_histogram(aes(fill=label),color='black',bins = 50) print(pl3) ##scatterplot of residual.sugar versus citric.acid, color by red and white wine pl4<-ggplot(wine,aes(citric.acid,residual.sugar,))+geom_point(aes(color=label)) print(pl4) ##scatterplot of volatile.acidity versus residual.sugar, color by red and white wine pl5<-ggplot(wine,aes(volatile.acidity,residual.sugar,))+geom_point(aes(color=label)) print(pl5) clus.data<-wine[,1:12] head(clus.data) wine.cluster <- kmeans(wine[1:12],2) print(wine.cluster$centers) table(wine$label,wine.cluster$cluster)
/Script.R
no_license
imrvj/K-Clustering
R
false
false
1,346
r
df1<-read.csv('winequality-red.csv',sep = ';') df2<-read.csv('winequality-white.csv',sep = ';') head(df1) head(df2) df1$label<-sapply(df1$pH,function(x){'red'}) df2$label<-sapply(df2$pH,function(x){'white'}) head(df1) head(df2) wine<-rbind(df1,df2) head(wine) str(wine) ##Exploring Data ##Histogram of residual sugar from the wine data. Color by red and white wines library(ggplot2) pl<-ggplot(wine,aes(residual.sugar))+geom_histogram(aes(fill=label),bins = 50,color='black') print(pl) ## Histogram of citric.acid from the wine data. Color by red and white wines pl2<-ggplot(wine,aes(citric.acid))+geom_histogram(aes(fill=label),color='black',bins = 50) print(pl2) ## Histogram of alcohol from the wine data. Color by red and white wines pl3<-ggplot(wine,aes(alcohol))+geom_histogram(aes(fill=label),color='black',bins = 50) print(pl3) ##scatterplot of residual.sugar versus citric.acid, color by red and white wine pl4<-ggplot(wine,aes(citric.acid,residual.sugar,))+geom_point(aes(color=label)) print(pl4) ##scatterplot of volatile.acidity versus residual.sugar, color by red and white wine pl5<-ggplot(wine,aes(volatile.acidity,residual.sugar,))+geom_point(aes(color=label)) print(pl5) clus.data<-wine[,1:12] head(clus.data) wine.cluster <- kmeans(wine[1:12],2) print(wine.cluster$centers) table(wine$label,wine.cluster$cluster)
# saviosampaio - 20170215-0019 # - plot1.R do course-project-1 de exploratory-data-analysis / coursera # aqui fiz uma melhoria, para realizar o download dos dados direto da fonte # - salvei uma copia desse arquivo ZIP dentro do diretorio raiz desse course-project-1 url <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip" download.file(url, "household_power_consumption.zip") # usei a funcao unz para tratar os dados compactados e facilitar o seu compartilhamento e a reproducao desse grafico # o comando abaixo vai extrair o arquivo e importa-lo para a tabela "dados", usando informacoes como o separador ";" dados <- read.table(unz("household_power_consumption.zip", "household_power_consumption.txt"), header=TRUE, sep=";", stringsAsFactors=FALSE, dec=".") # podemos "filtrar" as linhas (rows) de varias maneiras #analisar <- dados[dados$Date %in% c("1/2/2007","2/2/2007") ,] analisar <- subset(dados, Date %in% c("1/2/2007","2/2/2007")) # definimos o device PNG, com o nome do arquivo e as dimensoes da imagem png("plot1.png", width=480, height=480) # desenhamos o grafico do tipo HIST, na cor vermelha, com um titulo e um X LABEL hist(as.numeric(analisar$Global_active_power), col="red", main="Global Active Power", xlab="Global Active Power (kilowatts)") # encerramos o device PNG dev.off()
/plot1.R
no_license
saviosampaio/ExData_Plotting1
R
false
false
1,368
r
# saviosampaio - 20170215-0019 # - plot1.R do course-project-1 de exploratory-data-analysis / coursera # aqui fiz uma melhoria, para realizar o download dos dados direto da fonte # - salvei uma copia desse arquivo ZIP dentro do diretorio raiz desse course-project-1 url <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip" download.file(url, "household_power_consumption.zip") # usei a funcao unz para tratar os dados compactados e facilitar o seu compartilhamento e a reproducao desse grafico # o comando abaixo vai extrair o arquivo e importa-lo para a tabela "dados", usando informacoes como o separador ";" dados <- read.table(unz("household_power_consumption.zip", "household_power_consumption.txt"), header=TRUE, sep=";", stringsAsFactors=FALSE, dec=".") # podemos "filtrar" as linhas (rows) de varias maneiras #analisar <- dados[dados$Date %in% c("1/2/2007","2/2/2007") ,] analisar <- subset(dados, Date %in% c("1/2/2007","2/2/2007")) # definimos o device PNG, com o nome do arquivo e as dimensoes da imagem png("plot1.png", width=480, height=480) # desenhamos o grafico do tipo HIST, na cor vermelha, com um titulo e um X LABEL hist(as.numeric(analisar$Global_active_power), col="red", main="Global Active Power", xlab="Global Active Power (kilowatts)") # encerramos o device PNG dev.off()
get_args_weighted <- function(weights, new_x, processed_lavaan) { b <- matrix(processed_lavaan$beta$est, ncol = 1) sigma_beta <- processed_lavaan$sigma_beta x_long <- create_x_matrix(new_x, processed_lavaan$beta) adjusted_weights <- adjust_weights(weights, processed_lavaan$rescale_df, processed_lavaan$beta) sigma_e_weighted <- weight_sigma_e(adjusted_weights$w, processed_lavaan$sigma_e) mean_adjustment <- weight_mean_adjustment(adjusted_weights$w, processed_lavaan$rescale_df) return(list(beta = b, sigma_e = sigma_e_weighted, sigma_beta = sigma_beta, weights = adjusted_weights$w_long, mean_adjustment = mean_adjustment, x = x_long)) } get_args_single_outcome <- function(outcome, new_x, processed_lavaan) { beta <- processed_lavaan$beta[processed_lavaan$beta$lhs == outcome,] b = matrix(beta$est, ncol = 1) sigma_beta <- processed_lavaan$sigma_beta[beta$eqname, beta$eqname, drop = FALSE] rescale_df <- processed_lavaan$rescale_df[processed_lavaan$rescale_df$outcome == outcome, ] rescale_factor <- rescale_df$scale rescale_mean <- rescale_df$mean*rescale_factor sigma_e <- processed_lavaan$sigma_e[outcome, outcome] adj_sigma_e <- as.vector(sigma_e)*rescale_factor^2 x <- create_x_matrix(new_x, beta) return(list(x = x, beta = b, sigma_beta = sigma_beta, sigma_e = adj_sigma_e, weights = rescale_factor, mean_adjustments = rescale_mean)) } create_x_matrix <- function(new_x, beta) { new_x <- c(new_x, intercept = 1) x_long <- matrix(new_x[beta$rhs], ncol = 1) return(x_long) } adjust_weights <- function(weights, rescale_df, beta) { rescale_factors <- rescale_df$scale names(rescale_factors) <- rescale_df$outcome adj_weights <- weights*rescale_factors[names(weights)] w_long <- matrix(adj_weights[beta$lhs], ncol = 1) return(list(w_long = w_long, w = adj_weights)) } weight_sigma_e <- function(adj_weights, sigma_e) { w <- matrix(adj_weights[rownames(sigma_e)], ncol = 1) sigma_e_weighted <- t(w) %*% sigma_e %*% w return(as.vector(sigma_e_weighted)) } weight_mean_adjustment <- function(adj_weights, rescale_df){ rescale_means <- matrix(rescale_df$mean, nrow = 1) adj_weights <- matrix(adj_weights[rescale_df$outcome], ncol = 1) weighted_mean_adjustment <- rescale_means %*% adj_weights return(as.vector(weighted_mean_adjustment)) }
/R/get_args_.R
permissive
GForb/mopi
R
false
false
2,463
r
get_args_weighted <- function(weights, new_x, processed_lavaan) { b <- matrix(processed_lavaan$beta$est, ncol = 1) sigma_beta <- processed_lavaan$sigma_beta x_long <- create_x_matrix(new_x, processed_lavaan$beta) adjusted_weights <- adjust_weights(weights, processed_lavaan$rescale_df, processed_lavaan$beta) sigma_e_weighted <- weight_sigma_e(adjusted_weights$w, processed_lavaan$sigma_e) mean_adjustment <- weight_mean_adjustment(adjusted_weights$w, processed_lavaan$rescale_df) return(list(beta = b, sigma_e = sigma_e_weighted, sigma_beta = sigma_beta, weights = adjusted_weights$w_long, mean_adjustment = mean_adjustment, x = x_long)) } get_args_single_outcome <- function(outcome, new_x, processed_lavaan) { beta <- processed_lavaan$beta[processed_lavaan$beta$lhs == outcome,] b = matrix(beta$est, ncol = 1) sigma_beta <- processed_lavaan$sigma_beta[beta$eqname, beta$eqname, drop = FALSE] rescale_df <- processed_lavaan$rescale_df[processed_lavaan$rescale_df$outcome == outcome, ] rescale_factor <- rescale_df$scale rescale_mean <- rescale_df$mean*rescale_factor sigma_e <- processed_lavaan$sigma_e[outcome, outcome] adj_sigma_e <- as.vector(sigma_e)*rescale_factor^2 x <- create_x_matrix(new_x, beta) return(list(x = x, beta = b, sigma_beta = sigma_beta, sigma_e = adj_sigma_e, weights = rescale_factor, mean_adjustments = rescale_mean)) } create_x_matrix <- function(new_x, beta) { new_x <- c(new_x, intercept = 1) x_long <- matrix(new_x[beta$rhs], ncol = 1) return(x_long) } adjust_weights <- function(weights, rescale_df, beta) { rescale_factors <- rescale_df$scale names(rescale_factors) <- rescale_df$outcome adj_weights <- weights*rescale_factors[names(weights)] w_long <- matrix(adj_weights[beta$lhs], ncol = 1) return(list(w_long = w_long, w = adj_weights)) } weight_sigma_e <- function(adj_weights, sigma_e) { w <- matrix(adj_weights[rownames(sigma_e)], ncol = 1) sigma_e_weighted <- t(w) %*% sigma_e %*% w return(as.vector(sigma_e_weighted)) } weight_mean_adjustment <- function(adj_weights, rescale_df){ rescale_means <- matrix(rescale_df$mean, nrow = 1) adj_weights <- matrix(adj_weights[rescale_df$outcome], ncol = 1) weighted_mean_adjustment <- rescale_means %*% adj_weights return(as.vector(weighted_mean_adjustment)) }
################################################################################ #### #### #### This script fits the MMPP model. #### #### #### #### Created by Jeff Eaton on 17 January 2016. #### #### #### ################################################################################ ##################### #### Fit model #### ##################### setwd("~/Documents/Research/hiv-subfertility/mmpp/") load("alpha-mmpp-data.RData") source("mmpp-model.R") par2 <- c(-4.81089224, -3.99204589, -3.89437682, -4.08546074, -4.36714715, -4.21024969, -7.29942232, -2.04898019, -1.32786472, -1.38946660, -1.51588339, -1.82244064, -2.45752807, -3.85530118, 1.00242190, 0.08917528, -0.04615499, -0.07043248, -0.06126176, -0.21694795, 0.20863540, -0.29060551, -2.01196198, -2.70513295) system.time(print(ll(par2, epis.hivp, dat.hivn, epis.entry))) options(width=500) fit.alpha <- optim(par2, ll, epis.hivp = epis.hivp, dat.hivn = dat.hivn, epis.entry = epis.entry, method="BFGS", control=list(fnscale=-1, trace=4, REPORT=1), hessian=TRUE) fit.alpha2 <- optim(rep(log(0.3), 24), ll, epis.hivp = epis.hivp, dat.hivn = dat.hivn, epis.entry = epis.entry, method="BFGS", control=list(fnscale=-1, trace=4, REPORT=1), hessian=TRUE) save(fit.alpha, fit.alpha2, file="alpha-mmpp-optim-fit_2015-01-17.RData") nll <- function(...) -ll(...) nlm.alpha <- nlm(nll, par2, epis.hivp = epis.hivp, dat.hivn = dat.hivn, epis.entry = epis.entry, fscale=-1, hessian=TRUE, print.level=2) save(nlm.alpha, nlm.alpha2, file="alpha-mmpp-nlm-fit_2015-01-17.RData") nlm.alpha2 <- nlm(nll, rep(log(0.3), 24), epis.hivp = epis.hivp, dat.hivn = dat.hivn, epis.entry = epis.entry, fscale=-1, hessian=TRUE, print.level=2)
/analysis-alpha-mmpp.R
no_license
jeffeaton/hivsubfertility-mmpp
R
false
false
2,049
r
################################################################################ #### #### #### This script fits the MMPP model. #### #### #### #### Created by Jeff Eaton on 17 January 2016. #### #### #### ################################################################################ ##################### #### Fit model #### ##################### setwd("~/Documents/Research/hiv-subfertility/mmpp/") load("alpha-mmpp-data.RData") source("mmpp-model.R") par2 <- c(-4.81089224, -3.99204589, -3.89437682, -4.08546074, -4.36714715, -4.21024969, -7.29942232, -2.04898019, -1.32786472, -1.38946660, -1.51588339, -1.82244064, -2.45752807, -3.85530118, 1.00242190, 0.08917528, -0.04615499, -0.07043248, -0.06126176, -0.21694795, 0.20863540, -0.29060551, -2.01196198, -2.70513295) system.time(print(ll(par2, epis.hivp, dat.hivn, epis.entry))) options(width=500) fit.alpha <- optim(par2, ll, epis.hivp = epis.hivp, dat.hivn = dat.hivn, epis.entry = epis.entry, method="BFGS", control=list(fnscale=-1, trace=4, REPORT=1), hessian=TRUE) fit.alpha2 <- optim(rep(log(0.3), 24), ll, epis.hivp = epis.hivp, dat.hivn = dat.hivn, epis.entry = epis.entry, method="BFGS", control=list(fnscale=-1, trace=4, REPORT=1), hessian=TRUE) save(fit.alpha, fit.alpha2, file="alpha-mmpp-optim-fit_2015-01-17.RData") nll <- function(...) -ll(...) nlm.alpha <- nlm(nll, par2, epis.hivp = epis.hivp, dat.hivn = dat.hivn, epis.entry = epis.entry, fscale=-1, hessian=TRUE, print.level=2) save(nlm.alpha, nlm.alpha2, file="alpha-mmpp-nlm-fit_2015-01-17.RData") nlm.alpha2 <- nlm(nll, rep(log(0.3), 24), epis.hivp = epis.hivp, dat.hivn = dat.hivn, epis.entry = epis.entry, fscale=-1, hessian=TRUE, print.level=2)
c DCNF-Autarky [version 0.0.1]. c Copyright (c) 2018-2019 Swansea University. c c Input Clause Count: 7693 c Performing E1-Autarky iteration. c Remaining clauses count after E-Reduction: 7693 c c Input Parameter (command line, file): c input filename QBFLIB/Jordan-Kaiser/reduction-finding-full-set-params-k1c3n4/query07_query45_1344.qdimacs c output filename /tmp/dcnfAutarky.dimacs c autarky level 1 c conformity level 0 c encoding type 2 c no.of var 1957 c no.of clauses 7693 c no.of taut cls 0 c c Output Parameters: c remaining no.of clauses 7693 c c QBFLIB/Jordan-Kaiser/reduction-finding-full-set-params-k1c3n4/query07_query45_1344.qdimacs 1957 7693 E1 [] 0 16 1941 7693 NONE
/code/dcnf-ankit-optimized/Results/QBFLIB-2018/E1/Experiments/Jordan-Kaiser/reduction-finding-full-set-params-k1c3n4/query07_query45_1344/query07_query45_1344.R
no_license
arey0pushpa/dcnf-autarky
R
false
false
711
r
c DCNF-Autarky [version 0.0.1]. c Copyright (c) 2018-2019 Swansea University. c c Input Clause Count: 7693 c Performing E1-Autarky iteration. c Remaining clauses count after E-Reduction: 7693 c c Input Parameter (command line, file): c input filename QBFLIB/Jordan-Kaiser/reduction-finding-full-set-params-k1c3n4/query07_query45_1344.qdimacs c output filename /tmp/dcnfAutarky.dimacs c autarky level 1 c conformity level 0 c encoding type 2 c no.of var 1957 c no.of clauses 7693 c no.of taut cls 0 c c Output Parameters: c remaining no.of clauses 7693 c c QBFLIB/Jordan-Kaiser/reduction-finding-full-set-params-k1c3n4/query07_query45_1344.qdimacs 1957 7693 E1 [] 0 16 1941 7693 NONE
## version: 1.31 ## method: get ## path: /distribution/{name}/json ## code: 200 ## response: {"Descriptor":{"MediaType":"application/vnd.docker.distribution.manifest.v2+json","Digest":"sha256:c0537ff6a5218ef531ece93d4984efc99bbf3f7497c0a7726c88e2bb7584dc96","Size":3987495,"URLs":""},"Platforms":[{"Architecture":"amd64","OS":"linux","OSVersion":"","OSFeatures":"","Variant":"","Features":""}]} list( descriptor = list( media_type = "application/vnd.docker.distribution.manifest.v2+json", size = 3987495L, digest = "sha256:c0537ff6a5218ef531ece93d4984efc99bbf3f7497c0a7726c88e2bb7584dc96", urls = ""), platforms = data.frame( architecture = "amd64", os = "linux", os_version = "", os_features = I(list("")), variant = "", features = I(list("")), stringsAsFactors = FALSE))
/tests/testthat/sample_responses/v1.31/system_distribution.R
no_license
cran/stevedore
R
false
false
822
r
## version: 1.31 ## method: get ## path: /distribution/{name}/json ## code: 200 ## response: {"Descriptor":{"MediaType":"application/vnd.docker.distribution.manifest.v2+json","Digest":"sha256:c0537ff6a5218ef531ece93d4984efc99bbf3f7497c0a7726c88e2bb7584dc96","Size":3987495,"URLs":""},"Platforms":[{"Architecture":"amd64","OS":"linux","OSVersion":"","OSFeatures":"","Variant":"","Features":""}]} list( descriptor = list( media_type = "application/vnd.docker.distribution.manifest.v2+json", size = 3987495L, digest = "sha256:c0537ff6a5218ef531ece93d4984efc99bbf3f7497c0a7726c88e2bb7584dc96", urls = ""), platforms = data.frame( architecture = "amd64", os = "linux", os_version = "", os_features = I(list("")), variant = "", features = I(list("")), stringsAsFactors = FALSE))
#run_analysis.R # the main script to perform the analysis from Human Activity Recognition #Using Smartphones Dataset. #The analysis generates a tidy dataset from the original training and test datasets #Read the featuring label setwd("cleaningdata") featlabel <- read.csv("UCIHARDataset//features.txt", sep=" ",header=FALSE) names(featlabel) <- c("featid","flabel") #Step 1 merge the training e test dataset #read training dataset only with measurement of mean and standard deviation trainingds <- read.table("UCIHARDataset//train//X_train.txt", header=FALSE) colnames(trainingds)<-featlabel$flabel #read training subject trainingsub <- read.csv("UCIHARDataset//train//subject_train.txt", header=FALSE) colnames(trainingsub) <- c("subject_id") #read training y activity trainingact <- read.csv("UCIHARDataset//train//y_train.txt", header=FALSE) colnames(trainingact) <- c("act_id") #merge the training input traindataset <- cbind(trainingds,trainingsub,trainingact) #read test dataset testds <- read.table("UCIHARDataset//test//X_test.txt", header=FALSE) colnames(testds)<-featlabel$flabel #read tet subject testsub <- read.csv("UCIHARDataset//test//subject_test.txt", header=FALSE) colnames(testsub) <- c("subject_id") #read test y activity testact <- read.csv("UCIHARDataset//test//y_test.txt", header=FALSE) colnames(testact) <- c("act_id") #merge the test input testdataset <- cbind(testds,testsub,testact) #generate the mergeddataset mergedataset <- rbind(traindataset,testdataset) #2 - Extracts only the measurements on the mean and standard deviation for each measurement. #subset features the contains mean and std in its names. featlabelm <- featlabel[grep("mean",featlabel$flabel),] featlabels <- featlabel[grep("std",featlabel$flabel),] featlabelms <- rbind(featlabelm,featlabels) #select the column in mergedataset by column index inside featlabelms mergemeands <- mergedataset[,c(featlabelms$featid,562,563)] # 3 Uses descriptive activity names to name the activities in the data set # read file activity_labels actlabels <- read.table("UCIHARDataset//activity_labels.txt", header=FALSE) names(actlabels) <- c("act_id","act_label") #merge activity_labels.txt with mergemeands on act_id labels -> generate mergemeandslb mergemeandslb <- merge(mergemeands, actlabels,by="act_id" ) #4Appropriately labels the data set with descriptive variable names. #This point is already resolved in step 1 cause it associates column names to original datasets outdataset <- aggregate(mergemeandslb[,2:80], by=list(Act_id=mergemeandslb$act_id,Act_label=mergemeandslb$act_label,Subject_id=mergemeandslb$subject_id), FUN=mean) setwd("..") write.table(outdataset, 'outputdataset.txt', row.name=FALSE)
/run_analysis.R
no_license
mderose/cleaningdata
R
false
false
2,706
r
#run_analysis.R # the main script to perform the analysis from Human Activity Recognition #Using Smartphones Dataset. #The analysis generates a tidy dataset from the original training and test datasets #Read the featuring label setwd("cleaningdata") featlabel <- read.csv("UCIHARDataset//features.txt", sep=" ",header=FALSE) names(featlabel) <- c("featid","flabel") #Step 1 merge the training e test dataset #read training dataset only with measurement of mean and standard deviation trainingds <- read.table("UCIHARDataset//train//X_train.txt", header=FALSE) colnames(trainingds)<-featlabel$flabel #read training subject trainingsub <- read.csv("UCIHARDataset//train//subject_train.txt", header=FALSE) colnames(trainingsub) <- c("subject_id") #read training y activity trainingact <- read.csv("UCIHARDataset//train//y_train.txt", header=FALSE) colnames(trainingact) <- c("act_id") #merge the training input traindataset <- cbind(trainingds,trainingsub,trainingact) #read test dataset testds <- read.table("UCIHARDataset//test//X_test.txt", header=FALSE) colnames(testds)<-featlabel$flabel #read tet subject testsub <- read.csv("UCIHARDataset//test//subject_test.txt", header=FALSE) colnames(testsub) <- c("subject_id") #read test y activity testact <- read.csv("UCIHARDataset//test//y_test.txt", header=FALSE) colnames(testact) <- c("act_id") #merge the test input testdataset <- cbind(testds,testsub,testact) #generate the mergeddataset mergedataset <- rbind(traindataset,testdataset) #2 - Extracts only the measurements on the mean and standard deviation for each measurement. #subset features the contains mean and std in its names. featlabelm <- featlabel[grep("mean",featlabel$flabel),] featlabels <- featlabel[grep("std",featlabel$flabel),] featlabelms <- rbind(featlabelm,featlabels) #select the column in mergedataset by column index inside featlabelms mergemeands <- mergedataset[,c(featlabelms$featid,562,563)] # 3 Uses descriptive activity names to name the activities in the data set # read file activity_labels actlabels <- read.table("UCIHARDataset//activity_labels.txt", header=FALSE) names(actlabels) <- c("act_id","act_label") #merge activity_labels.txt with mergemeands on act_id labels -> generate mergemeandslb mergemeandslb <- merge(mergemeands, actlabels,by="act_id" ) #4Appropriately labels the data set with descriptive variable names. #This point is already resolved in step 1 cause it associates column names to original datasets outdataset <- aggregate(mergemeandslb[,2:80], by=list(Act_id=mergemeandslb$act_id,Act_label=mergemeandslb$act_label,Subject_id=mergemeandslb$subject_id), FUN=mean) setwd("..") write.table(outdataset, 'outputdataset.txt', row.name=FALSE)
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/merge-styles.R \name{css_merge} \alias{css_merge} \title{Merge multiple stylesheets given in priority order} \usage{ css_merge(...) } \arguments{ \item{...}{multiple CSS objects. The order in which these arguments are given reflect the priority of the stylesheets from lowest to highest priority. Later stylesheets (i.e. high priority) will override any styles declared earlier (lower priority)} } \value{ final cascaded stylesheet } \description{ Merge multiple stylesheets given in priority order } \examples{ \dontrun{ css1 <- read_css("chrome_builtin.css") css2 <- read_css("this_page.css") css_merge(css1, css2) } }
/man/css_merge.Rd
permissive
coolbutuseless/cssparser
R
false
true
701
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/merge-styles.R \name{css_merge} \alias{css_merge} \title{Merge multiple stylesheets given in priority order} \usage{ css_merge(...) } \arguments{ \item{...}{multiple CSS objects. The order in which these arguments are given reflect the priority of the stylesheets from lowest to highest priority. Later stylesheets (i.e. high priority) will override any styles declared earlier (lower priority)} } \value{ final cascaded stylesheet } \description{ Merge multiple stylesheets given in priority order } \examples{ \dontrun{ css1 <- read_css("chrome_builtin.css") css2 <- read_css("this_page.css") css_merge(css1, css2) } }
"predict.bsamdpm" <- function(object, newp, newnp, alpha = 0.05, HPD = TRUE, ...) { smcmc <- object$mcmc$smcmc nbasis <- object$nbasis nint <- object$nint + 1 nfun <- object$nfun fmodel <- object$fmodel fpm <- object$fpm xmin <- object$xmin xmax <- object$xmax if (missing(newp) && missing(newnp)) { n <- object$n newp <- object$w newnp <- object$x fxobsg <- object$fit.draws$fxobs wbg <- object$fit.draws$wbeta yhatg <- object$fit.draws$yhat } else if (missing(newp) && !missing(newnp)) { newp <- object$w if (!is.matrix(newnp)) newnp <- as.matrix(newnp) n <- object$n if (n != nrow(newnp)) stop('The number of observations for both parametric and nonparametric components must be same.') wbg <- object$fit.draws$wbeta fxobsg <- .Fortran("predictbsam", as.matrix(newnp), as.double(xmin), as.double(xmax),as.integer(n), as.integer(nfun), as.integer(nbasis), as.integer(nint), as.integer(fmodel), as.double(fpm), as.integer(smcmc), as.array(object$mcmc.draws$theta), as.matrix(object$mcmc.draws$alpha), as.matrix(object$mcmc.draws$psi), as.matrix(object$mcmc.draws$omega), fxobsg = array(0, dim = c(n, nfun, smcmc)), NAOK = TRUE, PACKAGE = "bsamGP")$fxobsg yhatg <- wbg + t(apply(fxobsg, c(1,3), sum)) } else if (!missing(newp) && missing(newnp)) { newnp <- object$x if (!is.matrix(newp)) newp <- as.matrix(newp) newp <- cbind(1, newp) n <- object$n if (n != nrow(newp)) stop('The number of observations for both parametric and nonparametric components must be same.') fxobsg <- object$fit.draws$fxobs wbg <- object$mcmc.draws$beta %*% t(newp) yhatg <- wbg + t(apply(fxobsg, c(1,3), sum)) } else if (!missing(newp) && !missing(newnp)) { if (!is.matrix(newp)) newp <- as.matrix(newp) newp <- cbind(1, newp) if (!is.matrix(newnp)) newnp <- as.matrix(newnp) if (nrow(newp) != nrow(newnp)) stop('The number of observations for both parametric and nonparametric components must be same.') n <- nrow(newp) wbg <- object$mcmc.draws$beta %*% t(newp) fxobsg <- .Fortran("predictbsam", as.matrix(newnp), as.double(xmin), as.double(xmax),as.integer(n), as.integer(nfun), as.integer(nbasis), as.integer(nint), as.integer(fmodel), as.double(fpm), as.integer(smcmc), as.array(object$mcmc.draws$theta), as.matrix(object$mcmc.draws$alpha), as.matrix(object$mcmc.draws$psi), as.matrix(object$mcmc.draws$omega), fxobsg = array(0, dim = c(n, nfun, smcmc)), NAOK = TRUE, PACKAGE = "bsamGP")$fxobsg yhatg <- wbg + t(apply(fxobsg, c(1,3), sum)) } fxobs <- list() fxobsm <- apply(fxobsg, c(1, 2), mean) fxobs$mean <- fxobsm wbeta <- list() wbm <- apply(wbg, 2, mean) wbeta$mean <- wbm yhat <- list() ym <- apply(yhatg, 2, mean) yhat$mean <- ym if (HPD) { prob <- 1 - alpha fx.l <- fx.u <- matrix(0, n, nfun) for (i in 1:nfun) { fxobsg.o <- apply(fxobsg[, i, ], 1, sort) gap <- max(1, min(smcmc - 1, round(smcmc * prob))) init <- 1:(smcmc - gap) inds <- apply(fxobsg.o[init + gap, , drop = FALSE] - fxobsg.o[init, , drop = FALSE], 2, which.min) fx.l[, i] <- fxobsg.o[cbind(inds, 1:n)] fx.u[, i] <- fxobsg.o[cbind(inds + gap, 1:n)] } fxobs$lower <- fx.l fxobs$upper <- fx.u wbg.o <- apply(wbg, 2, sort) gap <- max(1, min(smcmc - 1, round(smcmc * prob))) init <- 1:(smcmc - gap) inds <- apply(wbg.o[init + gap, , drop = FALSE] - wbg.o[init, , drop = FALSE], 2, which.min) wbeta$lower <- wbg.o[cbind(inds, 1:n)] wbeta$upper <- wbg.o[cbind(inds + gap, 1:n)] yhatg.o <- apply(yhatg, 2, sort) gap <- max(1, min(smcmc - 1, round(smcmc * prob))) init <- 1:(smcmc - gap) inds <- apply(yhatg.o[init + gap, , drop = FALSE] - yhatg.o[init, , drop = FALSE], 2, which.min) yhat$lower <- yhatg.o[cbind(inds, 1:n)] yhat$upper <- yhatg.o[cbind(inds + gap, 1:n)] } else { fxobs$lower <- apply(fxobsg, c(1, 2), function(x) quantile(x, prob = alpha/2)) fxobs$upper <- apply(fxobsg, c(1, 2), function(x) quantile(x, prob = 1 - alpha/2)) wbeta$lower <- apply(wbg, 2, function(x) quantile(x, prob = alpha/2)) wbeta$upper <- apply(wbg, 2, function(x) quantile(x, prob = 1 - alpha/2)) yhat$lower <- apply(yhatg, 2, function(x) quantile(x, prob = alpha/2)) yhat$upper <- apply(yhatg, 2, function(x) quantile(x, prob = 1 - alpha/2)) } out <- list() out$n <- n out$nbasis <- nbasis out$newp <- newp out$newnp <- newnp out$alpha <- alpha out$HPD <- HPD out$yhat <- yhat out$wbeta <- wbeta out$fxobs <- fxobs if (object$model == 'bsaqdpm') out$p <- out$p class(out) <- "predict.bsamdpm" out }
/R/predict.bsamdpm.R
no_license
jourdy345/bsamGP
R
false
false
4,949
r
"predict.bsamdpm" <- function(object, newp, newnp, alpha = 0.05, HPD = TRUE, ...) { smcmc <- object$mcmc$smcmc nbasis <- object$nbasis nint <- object$nint + 1 nfun <- object$nfun fmodel <- object$fmodel fpm <- object$fpm xmin <- object$xmin xmax <- object$xmax if (missing(newp) && missing(newnp)) { n <- object$n newp <- object$w newnp <- object$x fxobsg <- object$fit.draws$fxobs wbg <- object$fit.draws$wbeta yhatg <- object$fit.draws$yhat } else if (missing(newp) && !missing(newnp)) { newp <- object$w if (!is.matrix(newnp)) newnp <- as.matrix(newnp) n <- object$n if (n != nrow(newnp)) stop('The number of observations for both parametric and nonparametric components must be same.') wbg <- object$fit.draws$wbeta fxobsg <- .Fortran("predictbsam", as.matrix(newnp), as.double(xmin), as.double(xmax),as.integer(n), as.integer(nfun), as.integer(nbasis), as.integer(nint), as.integer(fmodel), as.double(fpm), as.integer(smcmc), as.array(object$mcmc.draws$theta), as.matrix(object$mcmc.draws$alpha), as.matrix(object$mcmc.draws$psi), as.matrix(object$mcmc.draws$omega), fxobsg = array(0, dim = c(n, nfun, smcmc)), NAOK = TRUE, PACKAGE = "bsamGP")$fxobsg yhatg <- wbg + t(apply(fxobsg, c(1,3), sum)) } else if (!missing(newp) && missing(newnp)) { newnp <- object$x if (!is.matrix(newp)) newp <- as.matrix(newp) newp <- cbind(1, newp) n <- object$n if (n != nrow(newp)) stop('The number of observations for both parametric and nonparametric components must be same.') fxobsg <- object$fit.draws$fxobs wbg <- object$mcmc.draws$beta %*% t(newp) yhatg <- wbg + t(apply(fxobsg, c(1,3), sum)) } else if (!missing(newp) && !missing(newnp)) { if (!is.matrix(newp)) newp <- as.matrix(newp) newp <- cbind(1, newp) if (!is.matrix(newnp)) newnp <- as.matrix(newnp) if (nrow(newp) != nrow(newnp)) stop('The number of observations for both parametric and nonparametric components must be same.') n <- nrow(newp) wbg <- object$mcmc.draws$beta %*% t(newp) fxobsg <- .Fortran("predictbsam", as.matrix(newnp), as.double(xmin), as.double(xmax),as.integer(n), as.integer(nfun), as.integer(nbasis), as.integer(nint), as.integer(fmodel), as.double(fpm), as.integer(smcmc), as.array(object$mcmc.draws$theta), as.matrix(object$mcmc.draws$alpha), as.matrix(object$mcmc.draws$psi), as.matrix(object$mcmc.draws$omega), fxobsg = array(0, dim = c(n, nfun, smcmc)), NAOK = TRUE, PACKAGE = "bsamGP")$fxobsg yhatg <- wbg + t(apply(fxobsg, c(1,3), sum)) } fxobs <- list() fxobsm <- apply(fxobsg, c(1, 2), mean) fxobs$mean <- fxobsm wbeta <- list() wbm <- apply(wbg, 2, mean) wbeta$mean <- wbm yhat <- list() ym <- apply(yhatg, 2, mean) yhat$mean <- ym if (HPD) { prob <- 1 - alpha fx.l <- fx.u <- matrix(0, n, nfun) for (i in 1:nfun) { fxobsg.o <- apply(fxobsg[, i, ], 1, sort) gap <- max(1, min(smcmc - 1, round(smcmc * prob))) init <- 1:(smcmc - gap) inds <- apply(fxobsg.o[init + gap, , drop = FALSE] - fxobsg.o[init, , drop = FALSE], 2, which.min) fx.l[, i] <- fxobsg.o[cbind(inds, 1:n)] fx.u[, i] <- fxobsg.o[cbind(inds + gap, 1:n)] } fxobs$lower <- fx.l fxobs$upper <- fx.u wbg.o <- apply(wbg, 2, sort) gap <- max(1, min(smcmc - 1, round(smcmc * prob))) init <- 1:(smcmc - gap) inds <- apply(wbg.o[init + gap, , drop = FALSE] - wbg.o[init, , drop = FALSE], 2, which.min) wbeta$lower <- wbg.o[cbind(inds, 1:n)] wbeta$upper <- wbg.o[cbind(inds + gap, 1:n)] yhatg.o <- apply(yhatg, 2, sort) gap <- max(1, min(smcmc - 1, round(smcmc * prob))) init <- 1:(smcmc - gap) inds <- apply(yhatg.o[init + gap, , drop = FALSE] - yhatg.o[init, , drop = FALSE], 2, which.min) yhat$lower <- yhatg.o[cbind(inds, 1:n)] yhat$upper <- yhatg.o[cbind(inds + gap, 1:n)] } else { fxobs$lower <- apply(fxobsg, c(1, 2), function(x) quantile(x, prob = alpha/2)) fxobs$upper <- apply(fxobsg, c(1, 2), function(x) quantile(x, prob = 1 - alpha/2)) wbeta$lower <- apply(wbg, 2, function(x) quantile(x, prob = alpha/2)) wbeta$upper <- apply(wbg, 2, function(x) quantile(x, prob = 1 - alpha/2)) yhat$lower <- apply(yhatg, 2, function(x) quantile(x, prob = alpha/2)) yhat$upper <- apply(yhatg, 2, function(x) quantile(x, prob = 1 - alpha/2)) } out <- list() out$n <- n out$nbasis <- nbasis out$newp <- newp out$newnp <- newnp out$alpha <- alpha out$HPD <- HPD out$yhat <- yhat out$wbeta <- wbeta out$fxobs <- fxobs if (object$model == 'bsaqdpm') out$p <- out$p class(out) <- "predict.bsamdpm" out }
# Exercise 2: advanced ggplot2 practice # Install and load the `ggplot2` package #install.packages('ggplot2') library("ggplot2") # For this exercise you will again be working with the `diamonds` data set. # Use `?diamonds` to review details about this data set ?diamonds ## Position Adjustments # Draw a column (bar) chart of diamonds cuts by price, with each bar filled by # clarity. You should see a _stacked_ bar chart. ggplot(diamonds) + geom_col(mapping = aes(cut, price, fill=clarity)) # Draw the same chart again, but with each element positioned to "fill" the y axis ggplot(diamonds) + geom_col(mapping = aes(cut, price, fill=clarity), position = "fill") # Draw the same chart again, but with each element positioned to "dodge" each other ggplot(diamonds) + geom_col(mapping = aes(cut, price, fill=clarity), position = "dodge") # Draw a plot with point geometry with the x-position mapped to `cut` and the # y-position mapped to `clarity` # This creates a "grid" grouping the points ggplot(diamonds) + geom_point(mapping = aes(cut, clarity)) # Use the "jitter" position adjustment to keep the points from all overlapping! # (This works a little better with a sample of diamond data, such as from the # previous exercise). ggplot(diamonds_sample) + geom_point(mapping = aes(cut, clarity), position="jitter") ## Scales # Draw a "boxplot" (with `geom_boxplot`) for the diamond's price (y) by color (x) ggplot(diamonds) + geom_boxplot(mapping = aes(color, price)) # This has a lot of outliers, making it harder to read. To fix this, draw the # same plot but with a _logarithmic_ scale for the y axis. ggplot(diamonds) + geom_boxplot(mapping = aes(color, price)) + scale_y_log10() # For another version, draw the same plot but with `violin` geometry instead of # `boxplot` geometry! # How does the logarithmic scale change the data presentation? ggplot(diamonds) + geom_violin(mapping = aes(color, price)) + scale_y_log10() # Another interesting plot: draw a plot of the diamonds price (y) by carat (x), # using a heatmap of 2d bins (geom_bin2d) # What happens when you make the x and y channels scale logarithmically? ggplot(diamonds) + geom_bin2d(mapping = aes(carat, price)) + scale_y_log10() + scale_x_log10() # Draw a scatter plot for the diamonds price (y) by carat (x). Color each point # by the clarity (Remember, this will take a while. Use a sample of the diamonds # for faster results) ggplot(diamonds_sample) + geom_point(mapping = aes(carat, price, color=clarity)) # Change the color of the previous plot using a ColorBrewer scale of your choice. # What looks nice? ggplot(diamonds_sample) + geom_point(mapping = aes(carat, price, color=clarity)) + scale_color_brewer(palette="BuPu") #I think BuPu looks nice :) ## Coordinate Systems # Draw a bar chart with x-position and fill color BOTH mapped to cut # For best results, SET the `width` of the geometry to be 1 (fill plot, no space # between) # TIP: You can save the plot to a variable for easier modifications ggplot(diamonds) + geom_bar(mapping = aes(cut, fill=cut), width=1) # Draw the same chart, but with the coordinate system flipped ggplot(diamonds) + geom_bar(mapping = aes(cut, fill=cut), width=1) + coord_flip() # Draw the same chart, but in a polar coordinate system. It's a Coxcomb chart! ggplot(diamonds) + geom_bar(mapping = aes(cut, fill=cut), width=1) + coord_polar() ## Facets # Take the scatter plot of price by carat data (colored by clarity) and add # _facets_ based on the diamond's `color` ggplot(diamonds) + geom_point(mapping = aes(carat, price, facet=color)) + scale_color_brewer(palette="BuPu") ## Saving Plots # Use the `ggsave()` function to save the current (recent) plot to disk. # Name the output file "my-plot.png". # Make sure you've set the working directory!!
/chapter-16-exercises/exercise-2/exercise.R
permissive
samuel-lindsay/book-exercises
R
false
false
3,810
r
# Exercise 2: advanced ggplot2 practice # Install and load the `ggplot2` package #install.packages('ggplot2') library("ggplot2") # For this exercise you will again be working with the `diamonds` data set. # Use `?diamonds` to review details about this data set ?diamonds ## Position Adjustments # Draw a column (bar) chart of diamonds cuts by price, with each bar filled by # clarity. You should see a _stacked_ bar chart. ggplot(diamonds) + geom_col(mapping = aes(cut, price, fill=clarity)) # Draw the same chart again, but with each element positioned to "fill" the y axis ggplot(diamonds) + geom_col(mapping = aes(cut, price, fill=clarity), position = "fill") # Draw the same chart again, but with each element positioned to "dodge" each other ggplot(diamonds) + geom_col(mapping = aes(cut, price, fill=clarity), position = "dodge") # Draw a plot with point geometry with the x-position mapped to `cut` and the # y-position mapped to `clarity` # This creates a "grid" grouping the points ggplot(diamonds) + geom_point(mapping = aes(cut, clarity)) # Use the "jitter" position adjustment to keep the points from all overlapping! # (This works a little better with a sample of diamond data, such as from the # previous exercise). ggplot(diamonds_sample) + geom_point(mapping = aes(cut, clarity), position="jitter") ## Scales # Draw a "boxplot" (with `geom_boxplot`) for the diamond's price (y) by color (x) ggplot(diamonds) + geom_boxplot(mapping = aes(color, price)) # This has a lot of outliers, making it harder to read. To fix this, draw the # same plot but with a _logarithmic_ scale for the y axis. ggplot(diamonds) + geom_boxplot(mapping = aes(color, price)) + scale_y_log10() # For another version, draw the same plot but with `violin` geometry instead of # `boxplot` geometry! # How does the logarithmic scale change the data presentation? ggplot(diamonds) + geom_violin(mapping = aes(color, price)) + scale_y_log10() # Another interesting plot: draw a plot of the diamonds price (y) by carat (x), # using a heatmap of 2d bins (geom_bin2d) # What happens when you make the x and y channels scale logarithmically? ggplot(diamonds) + geom_bin2d(mapping = aes(carat, price)) + scale_y_log10() + scale_x_log10() # Draw a scatter plot for the diamonds price (y) by carat (x). Color each point # by the clarity (Remember, this will take a while. Use a sample of the diamonds # for faster results) ggplot(diamonds_sample) + geom_point(mapping = aes(carat, price, color=clarity)) # Change the color of the previous plot using a ColorBrewer scale of your choice. # What looks nice? ggplot(diamonds_sample) + geom_point(mapping = aes(carat, price, color=clarity)) + scale_color_brewer(palette="BuPu") #I think BuPu looks nice :) ## Coordinate Systems # Draw a bar chart with x-position and fill color BOTH mapped to cut # For best results, SET the `width` of the geometry to be 1 (fill plot, no space # between) # TIP: You can save the plot to a variable for easier modifications ggplot(diamonds) + geom_bar(mapping = aes(cut, fill=cut), width=1) # Draw the same chart, but with the coordinate system flipped ggplot(diamonds) + geom_bar(mapping = aes(cut, fill=cut), width=1) + coord_flip() # Draw the same chart, but in a polar coordinate system. It's a Coxcomb chart! ggplot(diamonds) + geom_bar(mapping = aes(cut, fill=cut), width=1) + coord_polar() ## Facets # Take the scatter plot of price by carat data (colored by clarity) and add # _facets_ based on the diamond's `color` ggplot(diamonds) + geom_point(mapping = aes(carat, price, facet=color)) + scale_color_brewer(palette="BuPu") ## Saving Plots # Use the `ggsave()` function to save the current (recent) plot to disk. # Name the output file "my-plot.png". # Make sure you've set the working directory!!
OptSig.2p2n <- function(ncp=NULL,h=NULL,n1=NULL,n2=NULL,p=0.5,k=1,alternative="two.sided",Figure=TRUE){ alphavec = seq(1e-05, 1, 1e-05) if (!is.null(ncp) & is.null(h)){ if (alternative=="less") betavec=1-pnorm(qnorm(alphavec),mean=ncp) if (alternative=="greater") betavec=pnorm(-qnorm(alphavec),mean=ncp) if (alternative=="two.sided") betavec=(1-pnorm(qnorm(0.5*alphavec),mean=-abs(ncp))) + pnorm(-qnorm(0.5*alphavec),mean=abs(ncp)) } if (!is.null(h) & is.null(ncp) ) betavec = 1-pwr.2p2n.test(h = h, n1 = n1, n2 = n2, sig.level = alphavec, power = NULL, alternative)$power M=E.loss(alphavec,betavec,p,k,Figure=TRUE) return(list(alpha.opt=M$alpha.opt,beta.opt=M$beta.opt))}
/R/OptSig.2p2n.R
no_license
cran/OptSig
R
false
false
729
r
OptSig.2p2n <- function(ncp=NULL,h=NULL,n1=NULL,n2=NULL,p=0.5,k=1,alternative="two.sided",Figure=TRUE){ alphavec = seq(1e-05, 1, 1e-05) if (!is.null(ncp) & is.null(h)){ if (alternative=="less") betavec=1-pnorm(qnorm(alphavec),mean=ncp) if (alternative=="greater") betavec=pnorm(-qnorm(alphavec),mean=ncp) if (alternative=="two.sided") betavec=(1-pnorm(qnorm(0.5*alphavec),mean=-abs(ncp))) + pnorm(-qnorm(0.5*alphavec),mean=abs(ncp)) } if (!is.null(h) & is.null(ncp) ) betavec = 1-pwr.2p2n.test(h = h, n1 = n1, n2 = n2, sig.level = alphavec, power = NULL, alternative)$power M=E.loss(alphavec,betavec,p,k,Figure=TRUE) return(list(alpha.opt=M$alpha.opt,beta.opt=M$beta.opt))}
dupInfo <- function(x, useNA="ifany"){ # this looks for duplicates dupdat <- data.frame(table(x, useNA = useNA)) dupdat <- dupdat[dupdat$Freq > 1,] dupdat$position <- lapply(dupdat$x, function(y){which(x %in% y)}) dupdat } construct.all <- function(..., cycle.order = NULL){ # construct.all() Forms string elements (often, variable names), of all # combinations of a set of characteristics. For instance, # construct.all(c("algebra.", "calculus.", "geometry"), 1:4, "_", letters[1:4]) # would make a vector 3x4x1x4 of format "[mathtype].[1-4]_[a-d]." Useful when # constructing a list of various categories of similarly named variables. if(!is.null(cycle.order)){ if(!length(list(...)) == length(cycle.order) | !all.equal(sort(cycle.order), 1:length(cycle.order))) stop("bad order specification") #expand.grid cycles starting with the first column, so we change the order beforehand and use that. elems <- list(...)[cycle.order] elemgrid <- expand.grid(elems, stringsAsFactors = FALSE)[order(cycle.order)] vals <- apply(elemgrid, 1, paste0, collapse = "") } else { vals <- apply( expand.grid(..., stringsAsFactors = FALSE), 1, paste, collapse = "" ) } vals } nameslike <- function(data, regexp, case = FALSE){ # shortcut for pulling out similarly named variables--returns all vars that have # a particular string or regular expression (really just a shortcut to save # typing) grep(regexp, names(data), perl = TRUE, value=TRUE, ignore.case = !case) } # %!in% - a convenience function for "not in" `%!in%` <- Negate("%in%") # Allows you to simultaneously extract and rename variables from a data frame using quoted strings # Uses a select.rename <- function(df, old, new, drop = TRUE){ sel = as.list(setNames(old, new)) if (drop == TRUE){ out <- df %>% select(!!!rlang::syms(sel)) } if (drop == FALSE){ out <- df %>% rename(!!!rlang::syms(sel)) } out }
/R/utility.functions.R
no_license
jmobrien/PsychMisc
R
false
false
2,033
r
dupInfo <- function(x, useNA="ifany"){ # this looks for duplicates dupdat <- data.frame(table(x, useNA = useNA)) dupdat <- dupdat[dupdat$Freq > 1,] dupdat$position <- lapply(dupdat$x, function(y){which(x %in% y)}) dupdat } construct.all <- function(..., cycle.order = NULL){ # construct.all() Forms string elements (often, variable names), of all # combinations of a set of characteristics. For instance, # construct.all(c("algebra.", "calculus.", "geometry"), 1:4, "_", letters[1:4]) # would make a vector 3x4x1x4 of format "[mathtype].[1-4]_[a-d]." Useful when # constructing a list of various categories of similarly named variables. if(!is.null(cycle.order)){ if(!length(list(...)) == length(cycle.order) | !all.equal(sort(cycle.order), 1:length(cycle.order))) stop("bad order specification") #expand.grid cycles starting with the first column, so we change the order beforehand and use that. elems <- list(...)[cycle.order] elemgrid <- expand.grid(elems, stringsAsFactors = FALSE)[order(cycle.order)] vals <- apply(elemgrid, 1, paste0, collapse = "") } else { vals <- apply( expand.grid(..., stringsAsFactors = FALSE), 1, paste, collapse = "" ) } vals } nameslike <- function(data, regexp, case = FALSE){ # shortcut for pulling out similarly named variables--returns all vars that have # a particular string or regular expression (really just a shortcut to save # typing) grep(regexp, names(data), perl = TRUE, value=TRUE, ignore.case = !case) } # %!in% - a convenience function for "not in" `%!in%` <- Negate("%in%") # Allows you to simultaneously extract and rename variables from a data frame using quoted strings # Uses a select.rename <- function(df, old, new, drop = TRUE){ sel = as.list(setNames(old, new)) if (drop == TRUE){ out <- df %>% select(!!!rlang::syms(sel)) } if (drop == FALSE){ out <- df %>% rename(!!!rlang::syms(sel)) } out }
\name{plotbeta} \alias{plotbeta} \title{ Plot a functional parameter object with confidence limits } \description{ Plot a functional parameter object with confidence limits } \usage{ plotbeta(betaestlist, betastderrlist, argvals=NULL, xlab="", ...) } \arguments{ \item{betaestlist}{ a list containing one or more functional parameter objects (class = fdPar) or functional data objects (class = fd). } \item{betastderrlist}{ a list containing functional data objects for the standard errors of the objects in \code{betaestlist}. } \item{argvals}{ a sequence of values at which to evaluate \code{betaestlist} and \code{betastderrlist}. } \item{xlab}{ x axis label } \item{\dots }{ additional plotting parameters passed to \code{plot}. } } \value{ none } \section{Side Effects}{ a plot of the basis functions } \seealso{ \code{\link{plot.fd}} } \keyword{smooth}
/man/plotbeta.Rd
no_license
cran/fda
R
false
false
913
rd
\name{plotbeta} \alias{plotbeta} \title{ Plot a functional parameter object with confidence limits } \description{ Plot a functional parameter object with confidence limits } \usage{ plotbeta(betaestlist, betastderrlist, argvals=NULL, xlab="", ...) } \arguments{ \item{betaestlist}{ a list containing one or more functional parameter objects (class = fdPar) or functional data objects (class = fd). } \item{betastderrlist}{ a list containing functional data objects for the standard errors of the objects in \code{betaestlist}. } \item{argvals}{ a sequence of values at which to evaluate \code{betaestlist} and \code{betastderrlist}. } \item{xlab}{ x axis label } \item{\dots }{ additional plotting parameters passed to \code{plot}. } } \value{ none } \section{Side Effects}{ a plot of the basis functions } \seealso{ \code{\link{plot.fd}} } \keyword{smooth}
# Within subjects ANOVA (general case) #' Necessary sample size to reach desired power for a within-subjects ANOVA with #' any number of factors using an uncertainty and publication bias correction #' procedure #' #' @description \code{ss.power.wa.general} returns the necessary per-group #' sample size to achieve a desired level of statistical power for a planned #' study testing any type of effect (omnibus, contrast) using a fully #' within-subjects ANOVA with any number of factors, based on information #' obtained from a previous study. The effect from the previous study can be #' corrected for publication bias and/or uncertainty to provide a sample size #' that will achieve more accurate statistical power for a planned study, when #' compared to approaches that use a sample effect size at face value or rely #' on sample size only. The bias and uncertainty adjusted previous study #' noncentrality parameter is also returned, which can be transformed to #' various effect size metrics. #' #' @details Researchers often use the sample effect size from a prior study as #' an estimate of the likely size of an expected future effect in sample size #' planning. However, sample effect size estimates should not usually be used #' at face value to plan sample size, due to both publication bias and #' uncertainty. #' #' The approach implemented in \code{ss.power.wa.general} uses the observed #' \eqn{F}-value and sample size from a previous study to correct the #' noncentrality parameter associated with the effect of interest for #' publication bias and/or uncertainty. This new estimated noncentrality #' parameter is then used to calculate the necessary per-group sample size to #' achieve the desired level of power in the planned study. #' #' The approach uses a likelihood function of a truncated non-central F #' distribution, where the truncation occurs due to small effect sizes being #' unobserved due to publication bias. The numerator of the likelihood #' function is simply the density of a noncentral F distribution. The #' denominator is the power of the test, which serves to truncate the #' distribution. Thus, the ratio of the numerator and the denominator is a #' truncated noncentral F distribution. (See Taylor & Muller, 1996, Equation #' 2.1. and Anderson & Maxwell, 2017 for more details.) #' #' Assurance is the proportion of times that power will be at or above the #' desired level, if the experiment were to be reproduced many times. For #' example, assurance = .5 means that power will be above the desired level #' half of the time, but below the desired level the other half of the time. #' Selecting assurance = .5 (selecting the noncentrality parameter at the 50th #' percentile of the likelihood distribution) results in a median-unbiased #' estimate of the population noncentrality parameter and does not correct for #' uncertainty. In order to correct for uncertainty, assurance > .5 #' can be selected, which corresponds to selecting the noncentrality parameter #' associated with the (1 - assurance) quantile of the likelihood #' distribution. #' #' If the previous study of interest has not been subjected to publication #' bias (e.g., a pilot study), \code{alpha.prior} can be set to 1 to indicate #' no publication bias. Alternative \eqn{\alpha}-levels can also be #' accommodated to represent differing amounts of publication bias. For #' example, setting \code{alpha.prior}=.20 would reflect less severe #' publication bias than the default of .05. In essence, setting #' \code{alpha.prior} at .20 assumes that studies with \eqn{p}-values less #' than .20 are published, whereas those with larger \eqn{p}-values are not. #' #' In some cases, the corrected noncentrality parameter for a given level of #' assurance will be estimated to be zero. This is an indication that, at the #' desired level of assurance, the previous study's effect cannot be #' accurately estimated due to high levels of uncertainty and bias. When this #' happens, subsequent sample size planning is not possible with the chosen #' specifications. Two alternatives are recommended. First, users can select a #' lower value of assurance (e.g. .8 instead of .95). Second, users can reduce #' the influence of publciation bias by setting \code{alpha.prior} at a value #' greater than .05. It is possible to correct for uncertainty only by setting #' \code{alpha.prior}=1 and choosing the desired level of assurance. We #' encourage users to make the adjustments as minimal as possible. #' #' \code{ss.power.wa.general} assumes sphericity for the within-subjects #' effects. #' #' @param F.observed Observed F-value from a previous study used to plan sample #' size for a planned study #' @param N Total sample size of the previous study #' @param df.numerator Numerator degrees of freedom for the effect of interest #' @param alpha.prior Alpha-level \eqn{\alpha} for the previous study or the #' assumed statistical significance necessary for publishing in the field; to #' assume no publication bias, a value of 1 can be entered #' @param alpha.planned Alpha-level (\eqn{\alpha}) assumed for the planned study #' @param assurance Desired level of assurance, or the long run proportion of #' times that the planned study power will reach or surpass desired level #' (assurance > .5 corrects for uncertainty; assurance < .5 not recommended) #' @param power Desired level of statistical power for the planned study #' @param step Value used in the iterative scheme to determine the noncentrality #' parameter necessary for sample size planning (0 < step < .01) (users should #' not generally need to change this value; smaller values lead to more #' accurate sample size planning results, but unnecessarily small values will #' add unnecessary computational time) #' #' @return Suggested per-group sample size for planned study #' Publication bias and uncertainty- adjusted prior study noncentrality parameter #' #' @export #' @import stats #' #' @examples ss.power.wa.general(F.observed=6.5, N=80, df.numerator=1, #' alpha.prior=.05, alpha.planned=.05, assurance=.50, power=.80, step=.001) #' #' @author Samantha F. Anderson \email{samantha.f.anderson@asu.edu}, #' Ken Kelley \email{kkelley@@nd.edu} #' #' @references Anderson, S. F., & Maxwell, S. E. (2017). #' Addressing the 'replication crisis': Using original studies to design #' replication studies with appropriate statistical power. \emph{Multivariate #' Behavioral Research, 52,} 305-322. #' #' Anderson, S. F., Kelley, K., & Maxwell, S. E. (2017). Sample size #' planning for more accurate statistical power: A method correcting sample #' effect sizes for uncertainty and publication bias. \emph{Psychological #' Science, 28,} 1547-1562. #' #' Taylor, D. J., & Muller, K. E. (1996). Bias in linear model power and #' sample size calculation due to estimating noncentrality. #' \emph{Communications in Statistics: Theory and Methods, 25,} 1595-1610. ss.power.wa.general <- function(F.observed, N, df.numerator, alpha.prior=.05, alpha.planned=.05, assurance=.80, power=.80, step=.001) { if(alpha.prior > 1 | alpha.prior <= 0) stop("There is a problem with 'alpha' of the prior study (i.e., the Type I error rate), please specify as a value between 0 and 1 (the default is .05).") if(alpha.prior == 1) {alpha.prior <- .999 } if(alpha.planned >= 1 | alpha.planned <= 0) stop("There is a problem with 'alpha' of the planned study (i.e., the Type I error rate), please specify as a value between 0 and 1 (the default is .05).") if(assurance >= 1) { assurance <- assurance/100 } if(assurance<0 | assurance>1) { stop("There is a problem with 'assurance' (i.e., the proportion of times statistical power is at or above the desired value), please specify as a value between 0 and 1 (the default is .80).") } if(assurance <.5) { warning( "THe assurance you have entered is < .5, which implies you will have under a 50% chance at achieving your desired level of power" ) } if(power >= 1) power <- power/100 if(power<0 | power>1) stop("There is a problem with 'power' (i.e., desired statistical power), please specify as a value between 0 and 1 (the default is .80).") if(missing(N)) stop("You must specify 'N', which is the total sample size.") NCP <- seq(from=0, to=100, by=step) # sequence of possible values for the noncentral parameter. n <- N df.denominator <- df.numerator*(n-1) f.density <- df(F.observed, df1=df.numerator, df2=df.denominator, ncp=NCP) # density of F using F observed critF <- qf(1-alpha.prior, df1=df.numerator, df2=df.denominator) if(F.observed <= critF) stop("Your observed F statistic is nonsignificant based on your specfied alpha of the prior study. Please increase 'alpha.prior' so 'F.observed' exceeds the critical value") power.values <- 1 - pf(critF, df1=df.numerator, df2=df.denominator, ncp = NCP) # area above critical F area.above.F <- 1 - pf(F.observed, df1=df.numerator, df2=df.denominator, ncp = NCP) # area above observed F area.area.between <- power.values - area.above.F TM <- area.area.between/power.values TM.Percentile <- min(NCP[which(abs(TM-assurance)==min(abs(TM-assurance)))]) if(TM.Percentile==0) stop("The corrected noncentrality parameter is zero. Please either choose a lower value of assurance and/or a higher value of alpha for the prior study (e.g. accounting for less publication bias)") if (TM.Percentile > 0) { nrep <- 2 denom.df <- df.numerator*(nrep-1) diff <- -1 while (diff < 0 ) { criticalF <- qf(1-alpha.planned, df1 = df.numerator, df2 = denom.df) powers <- 1 - pf(criticalF, df1 = df.numerator, df2 = denom.df, ncp = (nrep/n)*TM.Percentile) diff <- powers - power nrep <- nrep + 1 denom.df <- df.numerator*(nrep-1) } } repn <- nrep-1 return(list(repn, TM.Percentile)) # This is the same as total N needed }
/R/ss.power.wa.general.R
no_license
cran/BUCSS
R
false
false
10,244
r
# Within subjects ANOVA (general case) #' Necessary sample size to reach desired power for a within-subjects ANOVA with #' any number of factors using an uncertainty and publication bias correction #' procedure #' #' @description \code{ss.power.wa.general} returns the necessary per-group #' sample size to achieve a desired level of statistical power for a planned #' study testing any type of effect (omnibus, contrast) using a fully #' within-subjects ANOVA with any number of factors, based on information #' obtained from a previous study. The effect from the previous study can be #' corrected for publication bias and/or uncertainty to provide a sample size #' that will achieve more accurate statistical power for a planned study, when #' compared to approaches that use a sample effect size at face value or rely #' on sample size only. The bias and uncertainty adjusted previous study #' noncentrality parameter is also returned, which can be transformed to #' various effect size metrics. #' #' @details Researchers often use the sample effect size from a prior study as #' an estimate of the likely size of an expected future effect in sample size #' planning. However, sample effect size estimates should not usually be used #' at face value to plan sample size, due to both publication bias and #' uncertainty. #' #' The approach implemented in \code{ss.power.wa.general} uses the observed #' \eqn{F}-value and sample size from a previous study to correct the #' noncentrality parameter associated with the effect of interest for #' publication bias and/or uncertainty. This new estimated noncentrality #' parameter is then used to calculate the necessary per-group sample size to #' achieve the desired level of power in the planned study. #' #' The approach uses a likelihood function of a truncated non-central F #' distribution, where the truncation occurs due to small effect sizes being #' unobserved due to publication bias. The numerator of the likelihood #' function is simply the density of a noncentral F distribution. The #' denominator is the power of the test, which serves to truncate the #' distribution. Thus, the ratio of the numerator and the denominator is a #' truncated noncentral F distribution. (See Taylor & Muller, 1996, Equation #' 2.1. and Anderson & Maxwell, 2017 for more details.) #' #' Assurance is the proportion of times that power will be at or above the #' desired level, if the experiment were to be reproduced many times. For #' example, assurance = .5 means that power will be above the desired level #' half of the time, but below the desired level the other half of the time. #' Selecting assurance = .5 (selecting the noncentrality parameter at the 50th #' percentile of the likelihood distribution) results in a median-unbiased #' estimate of the population noncentrality parameter and does not correct for #' uncertainty. In order to correct for uncertainty, assurance > .5 #' can be selected, which corresponds to selecting the noncentrality parameter #' associated with the (1 - assurance) quantile of the likelihood #' distribution. #' #' If the previous study of interest has not been subjected to publication #' bias (e.g., a pilot study), \code{alpha.prior} can be set to 1 to indicate #' no publication bias. Alternative \eqn{\alpha}-levels can also be #' accommodated to represent differing amounts of publication bias. For #' example, setting \code{alpha.prior}=.20 would reflect less severe #' publication bias than the default of .05. In essence, setting #' \code{alpha.prior} at .20 assumes that studies with \eqn{p}-values less #' than .20 are published, whereas those with larger \eqn{p}-values are not. #' #' In some cases, the corrected noncentrality parameter for a given level of #' assurance will be estimated to be zero. This is an indication that, at the #' desired level of assurance, the previous study's effect cannot be #' accurately estimated due to high levels of uncertainty and bias. When this #' happens, subsequent sample size planning is not possible with the chosen #' specifications. Two alternatives are recommended. First, users can select a #' lower value of assurance (e.g. .8 instead of .95). Second, users can reduce #' the influence of publciation bias by setting \code{alpha.prior} at a value #' greater than .05. It is possible to correct for uncertainty only by setting #' \code{alpha.prior}=1 and choosing the desired level of assurance. We #' encourage users to make the adjustments as minimal as possible. #' #' \code{ss.power.wa.general} assumes sphericity for the within-subjects #' effects. #' #' @param F.observed Observed F-value from a previous study used to plan sample #' size for a planned study #' @param N Total sample size of the previous study #' @param df.numerator Numerator degrees of freedom for the effect of interest #' @param alpha.prior Alpha-level \eqn{\alpha} for the previous study or the #' assumed statistical significance necessary for publishing in the field; to #' assume no publication bias, a value of 1 can be entered #' @param alpha.planned Alpha-level (\eqn{\alpha}) assumed for the planned study #' @param assurance Desired level of assurance, or the long run proportion of #' times that the planned study power will reach or surpass desired level #' (assurance > .5 corrects for uncertainty; assurance < .5 not recommended) #' @param power Desired level of statistical power for the planned study #' @param step Value used in the iterative scheme to determine the noncentrality #' parameter necessary for sample size planning (0 < step < .01) (users should #' not generally need to change this value; smaller values lead to more #' accurate sample size planning results, but unnecessarily small values will #' add unnecessary computational time) #' #' @return Suggested per-group sample size for planned study #' Publication bias and uncertainty- adjusted prior study noncentrality parameter #' #' @export #' @import stats #' #' @examples ss.power.wa.general(F.observed=6.5, N=80, df.numerator=1, #' alpha.prior=.05, alpha.planned=.05, assurance=.50, power=.80, step=.001) #' #' @author Samantha F. Anderson \email{samantha.f.anderson@asu.edu}, #' Ken Kelley \email{kkelley@@nd.edu} #' #' @references Anderson, S. F., & Maxwell, S. E. (2017). #' Addressing the 'replication crisis': Using original studies to design #' replication studies with appropriate statistical power. \emph{Multivariate #' Behavioral Research, 52,} 305-322. #' #' Anderson, S. F., Kelley, K., & Maxwell, S. E. (2017). Sample size #' planning for more accurate statistical power: A method correcting sample #' effect sizes for uncertainty and publication bias. \emph{Psychological #' Science, 28,} 1547-1562. #' #' Taylor, D. J., & Muller, K. E. (1996). Bias in linear model power and #' sample size calculation due to estimating noncentrality. #' \emph{Communications in Statistics: Theory and Methods, 25,} 1595-1610. ss.power.wa.general <- function(F.observed, N, df.numerator, alpha.prior=.05, alpha.planned=.05, assurance=.80, power=.80, step=.001) { if(alpha.prior > 1 | alpha.prior <= 0) stop("There is a problem with 'alpha' of the prior study (i.e., the Type I error rate), please specify as a value between 0 and 1 (the default is .05).") if(alpha.prior == 1) {alpha.prior <- .999 } if(alpha.planned >= 1 | alpha.planned <= 0) stop("There is a problem with 'alpha' of the planned study (i.e., the Type I error rate), please specify as a value between 0 and 1 (the default is .05).") if(assurance >= 1) { assurance <- assurance/100 } if(assurance<0 | assurance>1) { stop("There is a problem with 'assurance' (i.e., the proportion of times statistical power is at or above the desired value), please specify as a value between 0 and 1 (the default is .80).") } if(assurance <.5) { warning( "THe assurance you have entered is < .5, which implies you will have under a 50% chance at achieving your desired level of power" ) } if(power >= 1) power <- power/100 if(power<0 | power>1) stop("There is a problem with 'power' (i.e., desired statistical power), please specify as a value between 0 and 1 (the default is .80).") if(missing(N)) stop("You must specify 'N', which is the total sample size.") NCP <- seq(from=0, to=100, by=step) # sequence of possible values for the noncentral parameter. n <- N df.denominator <- df.numerator*(n-1) f.density <- df(F.observed, df1=df.numerator, df2=df.denominator, ncp=NCP) # density of F using F observed critF <- qf(1-alpha.prior, df1=df.numerator, df2=df.denominator) if(F.observed <= critF) stop("Your observed F statistic is nonsignificant based on your specfied alpha of the prior study. Please increase 'alpha.prior' so 'F.observed' exceeds the critical value") power.values <- 1 - pf(critF, df1=df.numerator, df2=df.denominator, ncp = NCP) # area above critical F area.above.F <- 1 - pf(F.observed, df1=df.numerator, df2=df.denominator, ncp = NCP) # area above observed F area.area.between <- power.values - area.above.F TM <- area.area.between/power.values TM.Percentile <- min(NCP[which(abs(TM-assurance)==min(abs(TM-assurance)))]) if(TM.Percentile==0) stop("The corrected noncentrality parameter is zero. Please either choose a lower value of assurance and/or a higher value of alpha for the prior study (e.g. accounting for less publication bias)") if (TM.Percentile > 0) { nrep <- 2 denom.df <- df.numerator*(nrep-1) diff <- -1 while (diff < 0 ) { criticalF <- qf(1-alpha.planned, df1 = df.numerator, df2 = denom.df) powers <- 1 - pf(criticalF, df1 = df.numerator, df2 = denom.df, ncp = (nrep/n)*TM.Percentile) diff <- powers - power nrep <- nrep + 1 denom.df <- df.numerator*(nrep-1) } } repn <- nrep-1 return(list(repn, TM.Percentile)) # This is the same as total N needed }
testlist <- list(type = 1869636974L, z = 1.32515051110006e-105) result <- do.call(esreg::G1_fun,testlist) str(result)
/esreg/inst/testfiles/G1_fun/libFuzzer_G1_fun/G1_fun_valgrind_files/1609891926-test.R
no_license
akhikolla/updated-only-Issues
R
false
false
117
r
testlist <- list(type = 1869636974L, z = 1.32515051110006e-105) result <- do.call(esreg::G1_fun,testlist) str(result)
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/white-center-place.R \name{prepare_white_center_place} \alias{prepare_white_center_place} \alias{make_white_center_place} \title{Make White Center Place} \usage{ prepare_white_center_place(path) make_white_center_place(path) } \value{ a MULTIPOLYGON simple feature (class = \code{sf}) } \description{ This is a temporary function. }
/man/white-center-place.Rd
permissive
tiernanmartin/NeighborhoodChangeTypology
R
false
true
413
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/white-center-place.R \name{prepare_white_center_place} \alias{prepare_white_center_place} \alias{make_white_center_place} \title{Make White Center Place} \usage{ prepare_white_center_place(path) make_white_center_place(path) } \value{ a MULTIPOLYGON simple feature (class = \code{sf}) } \description{ This is a temporary function. }
#Script to get distribution of FST values under neutral model using MSMS simulated genos #103012019 setwd("~/ms_sims/extra_sims/Quart_Founder/") #Loading packages x <- c("adegenet", "hierfstat") lapply(x, FUN = function(X) { do.call("library", list(X)) }) #function to paste alleles 1 and 2 together on same line. f <- function(x) { s <- seq(2, length(x), 2) paste(x[s-1], x[s], sep="|") } out.file <- "" file.names2 <- tail(dir("./", pattern ="output.txt"), n=5000) for(i in 1:length(file.names2)){ df <- read.csv(file.names2[i], header = F) # run algorithm for each column df2 <- as.data.frame(lapply(df, f), stringsAsFactors=FALSE) pop <- c("GA", "GA", "GA", "GA", "GA", "GAR", "GAR", "GAR", "GAR", "GAR") df2_genind <- df2genind(df2, pop=pop, sep = "\\|", ploidy = 2) df2_genind_sum <- summary(df2_genind) df2_hier <- genind2hierfstat(df2_genind, pop = pop) FST_mat <- pairwise.WCfst(df2_hier, diploid = T) print(FST_mat[1,2]) out.file <- rbind(out.file, FST_mat[1,2]) } write.table(out.file, "FST_msSims10.out", row.names = F, col.names = F)
/msms_sim_analysis_FSTvalsPart10.R
no_license
mcadamme/GA_and_GAR
R
false
false
1,092
r
#Script to get distribution of FST values under neutral model using MSMS simulated genos #103012019 setwd("~/ms_sims/extra_sims/Quart_Founder/") #Loading packages x <- c("adegenet", "hierfstat") lapply(x, FUN = function(X) { do.call("library", list(X)) }) #function to paste alleles 1 and 2 together on same line. f <- function(x) { s <- seq(2, length(x), 2) paste(x[s-1], x[s], sep="|") } out.file <- "" file.names2 <- tail(dir("./", pattern ="output.txt"), n=5000) for(i in 1:length(file.names2)){ df <- read.csv(file.names2[i], header = F) # run algorithm for each column df2 <- as.data.frame(lapply(df, f), stringsAsFactors=FALSE) pop <- c("GA", "GA", "GA", "GA", "GA", "GAR", "GAR", "GAR", "GAR", "GAR") df2_genind <- df2genind(df2, pop=pop, sep = "\\|", ploidy = 2) df2_genind_sum <- summary(df2_genind) df2_hier <- genind2hierfstat(df2_genind, pop = pop) FST_mat <- pairwise.WCfst(df2_hier, diploid = T) print(FST_mat[1,2]) out.file <- rbind(out.file, FST_mat[1,2]) } write.table(out.file, "FST_msSims10.out", row.names = F, col.names = F)
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% # This is a bundled copy of # https://github.com/satijalab/seurat/blob/master/R/differential_expression.R # only to over-write the behaviour of FindMarkers and derivatives ... # this addition is intended to preserve compatibility in the output given # by our function and #%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% #' @importFrom Seurat FindMarkers DefaultAssay GetAssayData WhichCells NULL #' Gene expression markers of identity classes #' #' Finds markers (differentially expressed genes) for identity classes #' #' @param object An object #' @param ... Arguments passed to other methods and to specific DE methods #' @return data.frame with a ranked list of putative markers as rows, and associated #' statistics as columns (p-values, ROC score, etc., depending on the test used (\code{test.use})). The following columns are always present: #' \itemize{ #' \item \code{avg_logFC}: log fold-chage of the average expression between the two groups. Positive values indicate that the gene is more highly expressed in the first group #' \item \code{pct.1}: The percentage of cells where the gene is detected in the first group #' \item \code{pct.2}: The percentage of cells where the gene is detected in the second group #' \item \code{p_val_adj}: Adjusted p-value, based on bonferroni correction using all genes in the dataset #' } #' #' @details p-value adjustment is performed using bonferroni correction based on #' the total number of genes in the dataset. Other correction methods are not #' recommended, as Seurat pre-filters genes using the arguments above, reducing #' the number of tests performed. Lastly, as Aaron Lun has pointed out, p-values #' should be interpreted cautiously, as the genes used for clustering are the #' same genes tested for differential expression. #' #' @references McDavid A, Finak G, Chattopadyay PK, et al. Data exploration, #' quality control and testing in single-cell qPCR-based gene expression experiments. #' Bioinformatics. 2013;29(4):461-467. doi:10.1093/bioinformatics/bts714 #' @references Trapnell C, et al. The dynamics and regulators of cell fate #' decisions are revealed by pseudotemporal ordering of single cells. Nature #' Biotechnology volume 32, pages 381-386 (2014) #' @references Andrew McDavid, Greg Finak and Masanao Yajima (2017). MAST: Model-based #' Analysis of Single Cell Transcriptomics. R package version 1.2.1. #' https://github.com/RGLab/MAST/ #' @references Love MI, Huber W and Anders S (2014). "Moderated estimation of #' fold change and dispersion for RNA-seq data with DESeq2." Genome Biology. #' https://bioconductor.org/packages/release/bioc/html/DESeq2.html #' #' @export #' #' @examples #' # Find markers for cluster 2 #' markers <- FindMarkers(object = pbmc_small, ident.1 = 2) #' head(x = markers) #' #' # Take all cells in cluster 2, and find markers that separate cells in the 'g1' group (metadata #' # variable 'group') #' suppressWarnings(markers <- FindMarkers(pbmc_small, ident.1 = "g1", group.by = 'groups', subset.ident = "2")) #' head(x = markers) #' #' # Pass 'clustertree' or an object of class phylo to ident.1 and #' # a node to ident.2 as a replacement for FindMarkersNode #' # pbmc_small <- BuildClusterTree(object = pbmc_small) #' # markers <- FindMarkers(object = pbmc_small, ident.1 = 'clustertree', ident.2 = 5) #' # head(x = markers) #' #' @rdname FindMarkers #' @export FindMarkers #' #' @aliases FindMarkersNode #' FindMarkers <- function(object, ...) { UseMethod(generic = 'FindMarkers', object = object) } #%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% # Functions #%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% # globalVariables( # names = c('myAUC', 'p_val', 'avg_logFC'), # package = 'Seurat', # add = TRUE # ) #' Gene expression markers for all identity classes #' #' Finds markers (differentially expressed genes) for each of the identity classes in a dataset #' #' @inheritParams FindMarkers #' @param node A node to find markers for and all its children; requires #' \code{\link{BuildClusterTree}} to have been run previously; replaces \code{FindAllMarkersNode} #' @param return.thresh Only return markers that have a p-value < return.thresh, or a power > return.thresh (if the test is ROC) #' #' @return Matrix containing a ranked list of putative markers, and associated #' statistics (p-values, ROC score, etc.) #' #' @importFrom ape drop.tip #' @importFrom stats setNames #' #' @export #' #' @aliases FindAllMarkersNode #' #' @examples #' # Find markers for all clusters #' suppressWarnings(all.markers <- FindAllMarkers(object = pbmc_small)) #' head(x = all.markers) #' \dontrun{ #' # Pass a value to node as a replacement for FindAllMarkersNode #' pbmc_small <- BuildClusterTree(object = pbmc_small) #' all.markers <- FindAllMarkers(object = pbmc_small, node = 4) #' head(x = all.markers) #' } #' FindAllMarkers <- function( object, assay = NULL, features = NULL, logfc.threshold = 0.25, test.use = 'wilcox', slot = 'data', min.pct = 0.1, min.diff.pct = -Inf, node = NULL, verbose = TRUE, only.pos = FALSE, max.cells.per.ident = Inf, random.seed = 1, latent.vars = NULL, min.cells.feature = 3, min.cells.group = 3, pseudocount.use = 1, return.thresh = 1e-2, ... ) { MapVals <- function(vec, from, to) { vec2 <- setNames(object = to, nm = from)[as.character(x = vec)] vec2[is.na(x = vec2)] <- vec[is.na(x = vec2)] return(unname(obj = vec2)) } if ((test.use == "roc") && (return.thresh == 1e-2)) { return.thresh <- 0.7 } if (is.null(x = node)) { idents.all <- sort(x = unique(x = Idents(object = object))) } else { tree <- Seurat:::Tool(object = object, slot = 'BuildClusterTree') if (is.null(x = tree)) { stop("Please run 'BuildClusterTree' before finding markers on nodes") } descendants <- Seurat:::DFT(tree = tree, node = node, include.children = TRUE) all.children <- sort(x = tree$edge[, 2][!tree$edge[, 2] %in% tree$edge[, 1]]) descendants <- MapVals( vec = descendants, from = all.children, to = tree$tip.label ) drop.children <- setdiff(x = tree$tip.label, y = descendants) keep.children <- setdiff(x = tree$tip.label, y = drop.children) orig.nodes <- c( node, as.numeric(x = setdiff(x = descendants, y = keep.children)) ) tree <- drop.tip(phy = tree, tip = drop.children) new.nodes <- unique(x = tree$edge[, 1, drop = TRUE]) idents.all <- (tree$Nnode + 2):max(tree$edge) } genes.de <- list() messages <- list() for (i in 1:length(x = idents.all)) { if (verbose) { message("Calculating cluster ", idents.all[i]) } genes.de[[i]] <- tryCatch( expr = { FindMarkers( object = object, assay = assay, ident.1 = if (is.null(x = node)) { idents.all[i] } else { tree }, ident.2 = if (is.null(x = node)) { NULL } else { idents.all[i] }, features = features, logfc.threshold = logfc.threshold, test.use = test.use, slot = slot, min.pct = min.pct, min.diff.pct = min.diff.pct, verbose = verbose, only.pos = only.pos, max.cells.per.ident = max.cells.per.ident, random.seed = random.seed, latent.vars = latent.vars, min.cells.feature = min.cells.feature, min.cells.group = min.cells.group, pseudocount.use = pseudocount.use, ... ) }, error = function(cond) { return(cond$message) } ) if (class(x = genes.de[[i]]) == "character") { messages[[i]] <- genes.de[[i]] genes.de[[i]] <- NULL } } gde.all <- data.frame() for (i in 1:length(x = idents.all)) { if (is.null(x = unlist(x = genes.de[i]))) { next } gde <- genes.de[[i]] if (nrow(x = gde) > 0) { if (test.use == "roc") { gde <- subset( x = gde, subset = (myAUC > return.thresh | myAUC < (1 - return.thresh)) ) } else if (is.null(x = node) || test.use %in% c('bimod', 't')) { gde <- gde[order(gde$p_val, -gde[, 2]), ] gde <- subset(x = gde, subset = p_val < return.thresh) } if (nrow(x = gde) > 0) { gde$cluster <- idents.all[i] gde$gene <- rownames(x = gde) } if (nrow(x = gde) > 0) { gde.all <- rbind(gde.all, gde) } } } if ((only.pos) && nrow(x = gde.all) > 0) { diff.col <- ifelse(test = slot == "scale.data" || test.use == "roc", yes = "avg_diff", no = "avg_logFC") return(subset(x = gde.all, subset = gde.all[, diff.col] > 0)) } rownames(x = gde.all) <- make.unique(names = as.character(x = gde.all$gene)) if (nrow(x = gde.all) == 0) { warning("No DE genes identified", call. = FALSE, immediate. = TRUE) } if (length(x = messages) > 0) { warning("The following tests were not performed: ", call. = FALSE, immediate. = TRUE) for (i in 1:length(x = messages)) { if (!is.null(x = messages[[i]])) { warning("When testing ", idents.all[i], " versus all:\n\t", messages[[i]], call. = FALSE, immediate. = TRUE) } } } if (!is.null(x = node)) { gde.all$cluster <- MapVals( vec = gde.all$cluster, from = new.nodes, to = orig.nodes ) } return(gde.all) } #' Finds markers that are conserved between the groups #' #' @inheritParams FindMarkers #' @param ident.1 Identity class to define markers for #' @param ident.2 A second identity class for comparison. If NULL (default) - #' use all other cells for comparison. #' @param grouping.var grouping variable #' @param assay of assay to fetch data for (default is RNA) #' @param meta.method method for combining p-values. Should be a function from #' the metap package (NOTE: pass the function, not a string) #' @param \dots parameters to pass to FindMarkers #' #' @return data.frame containing a ranked list of putative conserved markers, and #' associated statistics (p-values within each group and a combined p-value #' (such as Fishers combined p-value or others from the metap package), #' percentage of cells expressing the marker, average differences). Name of group is appended to each #' associated output column (e.g. CTRL_p_val). If only one group is tested in the grouping.var, max #' and combined p-values are not returned. #' #' @importFrom metap minimump #' #' @export #' #' @examples #' \dontrun{ #' pbmc_small #' # Create a simulated grouping variable #' pbmc_small[['groups']] <- sample(x = c('g1', 'g2'), size = ncol(x = pbmc_small), replace = TRUE) #' FindConservedMarkers(pbmc_small, ident.1 = 0, ident.2 = 1, grouping.var = "groups") #' } #' FindConservedMarkers <- function( object, ident.1, ident.2 = NULL, grouping.var, assay = 'RNA', slot = 'data', meta.method = minimump, verbose = TRUE, ... ) { if (class(x = meta.method) != "function") { stop("meta.method should be a function from the metap package. Please see https://cran.r-project.org/web/packages/metap/metap.pdf for a detailed description of the available functions.") } object.var <- FetchData(object = object, vars = grouping.var) object <- Seurat::SetIdent( object = object, cells = colnames(x = object), value = paste(Idents(object = object), object.var[, 1], sep = "_") ) levels.split <- names(x = sort(x = table(object.var[, 1]))) num.groups <- length(levels.split) cells <- list() for (i in 1:num.groups) { cells[[i]] <- rownames( x = object.var[object.var[, 1] == levels.split[i], , drop = FALSE] ) } marker.test <- list() # do marker tests for (i in 1:num.groups) { level.use <- levels.split[i] ident.use.1 <- paste(ident.1, level.use, sep = "_") ident.use.1.exists <- ident.use.1 %in% Idents(object = object) if (!all(ident.use.1.exists)) { bad.ids <- ident.1[!ident.use.1.exists] warning( "Identity: ", paste(bad.ids, collapse = ", "), " not present in group ", level.use, ". Skipping ", level.use, call. = FALSE, immediate. = TRUE ) next } cells.1 <- Seurat::WhichCells(object = object, idents = ident.use.1) if (is.null(x = ident.2)) { cells.2 <- setdiff(x = cells[[i]], y = cells.1) ident.use.2 <- names(x = which(x = table(Idents(object = object)[cells.2]) > 0)) ident.2 <- gsub(pattern = paste0("_", level.use), replacement = "", x = ident.use.2) if (length(x = ident.use.2) == 0) { stop(paste("Only one identity class present:", ident.1)) } } else { ident.use.2 <- paste(ident.2, level.use, sep = "_") } if (verbose) { message( "Testing group ", level.use, ": (", paste(ident.1, collapse = ", "), ") vs (", paste(ident.2, collapse = ", "), ")" ) } ident.use.2.exists <- ident.use.2 %in% Idents(object = object) if (!all(ident.use.2.exists)) { bad.ids <- ident.2[!ident.use.2.exists] warning( "Identity: ", paste(bad.ids, collapse = ", "), " not present in group ", level.use, ". Skipping ", level.use, call. = FALSE, immediate. = TRUE ) next } marker.test[[i]] <- FindMarkers( object = object, assay = assay, slot = slot, ident.1 = ident.use.1, ident.2 = ident.use.2, verbose = verbose, ... ) } names(x = marker.test) <- levels.split marker.test <- Filter(f = Negate(f = is.null), x = marker.test) genes.conserved <- Reduce( f = intersect, x = lapply( X = marker.test, FUN = function(x) { return(rownames(x = x)) } ) ) markers.conserved <- list() for (i in 1:length(x = marker.test)) { markers.conserved[[i]] <- marker.test[[i]][genes.conserved, ] colnames(x = markers.conserved[[i]]) <- paste( names(x = marker.test)[i], colnames(x = markers.conserved[[i]]), sep = "_" ) } markers.combined <- Reduce(cbind, markers.conserved) pval.codes <- colnames(x = markers.combined)[grepl(pattern = "*_p_val$", x = colnames(x = markers.combined))] if (length(x = pval.codes) > 1) { markers.combined$max_pval <- apply( X = markers.combined[, pval.codes, drop = FALSE], MARGIN = 1, FUN = max ) combined.pval <- data.frame(cp = apply( X = markers.combined[, pval.codes, drop = FALSE], MARGIN = 1, FUN = function(x) { return(meta.method(x)$p) } )) colnames(x = combined.pval) <- paste0( as.character(x = formals()$meta.method), "_p_val" ) markers.combined <- cbind(markers.combined, combined.pval) markers.combined <- markers.combined[order(markers.combined[, paste0(as.character(x = formals()$meta.method), "_p_val")]), ] } else { warning("Only a single group was tested", call. = FALSE, immediate. = TRUE) } return(markers.combined) } #%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% # Methods for Seurat-defined generics #%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% #' @param cells.1 Vector of cell names belonging to group 1 #' @param cells.2 Vector of cell names belonging to group 2 #' @param counts Count matrix if using scale.data for DE tests. This is used for #' computing pct.1 and pct.2 and for filtering features based on fraction #' expressing #' @param features Genes to test. Default is to use all genes #' @param logfc.threshold Limit testing to genes which show, on average, at least #' X-fold difference (log-scale) between the two groups of cells. Default is 0.25 #' Increasing logfc.threshold speeds up the function, but can miss weaker signals. #' @param test.use Denotes which test to use. Available options are: #' \itemize{ #' \item{"wilcox"} : Identifies differentially expressed genes between two #' groups of cells using a Wilcoxon Rank Sum test (default) #' \item{"bimod"} : Likelihood-ratio test for single cell gene expression, #' (McDavid et al., Bioinformatics, 2013) #' \item{"roc"} : Identifies 'markers' of gene expression using ROC analysis. #' For each gene, evaluates (using AUC) a classifier built on that gene alone, #' to classify between two groups of cells. An AUC value of 1 means that #' expression values for this gene alone can perfectly classify the two #' groupings (i.e. Each of the cells in cells.1 exhibit a higher level than #' each of the cells in cells.2). An AUC value of 0 also means there is perfect #' classification, but in the other direction. A value of 0.5 implies that #' the gene has no predictive power to classify the two groups. Returns a #' 'predictive power' (abs(AUC-0.5) * 2) ranked matrix of putative differentially #' expressed genes. #' \item{"t"} : Identify differentially expressed genes between two groups of #' cells using the Student's t-test. #' \item{"negbinom"} : Identifies differentially expressed genes between two #' groups of cells using a negative binomial generalized linear model. #' Use only for UMI-based datasets #' \item{"poisson"} : Identifies differentially expressed genes between two #' groups of cells using a poisson generalized linear model. #' Use only for UMI-based datasets #' \item{"LR"} : Uses a logistic regression framework to determine differentially #' expressed genes. Constructs a logistic regression model predicting group #' membership based on each feature individually and compares this to a null #' model with a likelihood ratio test. #' \item{"MAST"} : Identifies differentially expressed genes between two groups #' of cells using a hurdle model tailored to scRNA-seq data. Utilizes the MAST #' package to run the DE testing. #' \item{"DESeq2"} : Identifies differentially expressed genes between two groups #' of cells based on a model using DESeq2 which uses a negative binomial #' distribution (Love et al, Genome Biology, 2014).This test does not support #' pre-filtering of genes based on average difference (or percent detection rate) #' between cell groups. However, genes may be pre-filtered based on their #' minimum detection rate (min.pct) across both cell groups. To use this method, #' please install DESeq2, using the instructions at #' https://bioconductor.org/packages/release/bioc/html/DESeq2.html #' } #' @param min.pct only test genes that are detected in a minimum fraction of #' min.pct cells in either of the two populations. Meant to speed up the function #' by not testing genes that are very infrequently expressed. Default is 0.1 #' @param min.diff.pct only test genes that show a minimum difference in the #' fraction of detection between the two groups. Set to -Inf by default #' @param only.pos Only return positive markers (FALSE by default) #' @param verbose Print a progress bar once expression testing begins #' @param max.cells.per.ident Down sample each identity class to a max number. #' Default is no downsampling. Not activated by default (set to Inf) #' @param random.seed Random seed for downsampling #' @param latent.vars Variables to test, used only when \code{test.use} is one of #' 'LR', 'negbinom', 'poisson', or 'MAST' #' @param min.cells.feature Minimum number of cells expressing the feature in at least one #' of the two groups, currently only used for poisson and negative binomial tests #' @param min.cells.group Minimum number of cells in one of the groups #' @param pseudocount.use Pseudocount to add to averaged expression values when #' calculating logFC. 1 by default. #' #' @importFrom Matrix rowSums #' @importFrom rlang %||% #' @importFrom stats p.adjust #' #' @rdname FindMarkers #' @export #' @method FindMarkers default #' FindMarkers.default <- function( object, slot = "data", counts = numeric(), cells.1 = NULL, cells.2 = NULL, features = NULL, reduction = NULL, logfc.threshold = 0.25, test.use = 'wilcox', min.pct = 0.1, min.diff.pct = -Inf, verbose = TRUE, only.pos = FALSE, max.cells.per.ident = Inf, random.seed = 1, latent.vars = NULL, min.cells.feature = 3, min.cells.group = 3, pseudocount.use = 1, ... ) { features <- features %||% rownames(x = object) methods.noprefiliter <- c("DESeq2") if (test.use %in% methods.noprefiliter) { features <- rownames(x = object) min.diff.pct <- -Inf logfc.threshold <- 0 } # error checking if (length(x = cells.1) == 0) { stop("Cell group 1 is empty - no cells with identity class ", cells.1) } else if (length(x = cells.2) == 0) { stop("Cell group 2 is empty - no cells with identity class ", cells.2) return(NULL) } else if (length(x = cells.1) < min.cells.group) { stop("Cell group 1 has fewer than ", min.cells.group, " cells") } else if (length(x = cells.2) < min.cells.group) { stop("Cell group 2 has fewer than ", min.cells.group, " cells") } else if (any(!cells.1 %in% colnames(x = object))) { bad.cells <- colnames(x = object)[which(x = !as.character(x = cells.1) %in% colnames(x = object))] stop( "The following cell names provided to cells.1 are not present: ", paste(bad.cells, collapse = ", ") ) } else if (any(!cells.2 %in% colnames(x = object))) { bad.cells <- colnames(x = object)[which(x = !as.character(x = cells.2) %in% colnames(x = object))] stop( "The following cell names provided to cells.2 are not present: ", paste(bad.cells, collapse = ", ") ) } # feature selection (based on percentages) data <- switch( EXPR = slot, 'scale.data' = counts, object ) if (is.null(x = reduction)) { thresh.min <- 0 pct.1 <- round( x = rowSums(x = data[features, cells.1, drop = FALSE] > thresh.min) / length(x = cells.1), digits = 3 ) pct.2 <- round( x = rowSums(x = data[features, cells.2, drop = FALSE] > thresh.min) / length(x = cells.2), digits = 3 ) data.alpha <- cbind(pct.1, pct.2) colnames(x = data.alpha) <- c("pct.1", "pct.2") alpha.min <- apply(X = data.alpha, MARGIN = 1, FUN = max) names(x = alpha.min) <- rownames(x = data.alpha) features <- names(x = which(x = alpha.min > min.pct)) if (length(x = features) == 0) { stop("No features pass min.pct threshold") } alpha.diff <- alpha.min - apply(X = data.alpha, MARGIN = 1, FUN = min) features <- names( x = which(x = alpha.min > min.pct & alpha.diff > min.diff.pct) ) if (length(x = features) == 0) { stop("No features pass min.diff.pct threshold") } } else { data.alpha <- data.frame( pct.1 = rep(x = NA, times = length(x = features)), pct.2 = rep(x = NA, times = length(x = features)) ) } # feature selection (based on average difference) mean.fxn <- if (is.null(x = reduction) && slot != "scale.data") { switch( EXPR = slot, 'data' = function(x) { return(log(x = mean(x = expm1(x = x)) + pseudocount.use)) }, function(x) { return(log(x = mean(x = x) + pseudocount.use)) } ) } else { mean } data.1 <- apply( X = data[features, cells.1, drop = FALSE], MARGIN = 1, FUN = mean.fxn ) data.2 <- apply( X = data[features, cells.2, drop = FALSE], MARGIN = 1, FUN = mean.fxn ) total.diff <- (data.1 - data.2) if (is.null(x = reduction) && slot != "scale.data") { features.diff <- if (only.pos) { names(x = which(x = total.diff > logfc.threshold)) } else { names(x = which(x = abs(x = total.diff) > logfc.threshold)) } features <- intersect(x = features, y = features.diff) if (length(x = features) == 0) { stop("No features pass logfc.threshold threshold") } } if (max.cells.per.ident < Inf) { set.seed(seed = random.seed) # Should be cells.1 and cells.2? if (length(x = cells.1) > max.cells.per.ident) { cells.1 <- sample(x = cells.1, size = max.cells.per.ident) } if (length(x = cells.2) > max.cells.per.ident) { cells.2 <- sample(x = cells.2, size = max.cells.per.ident) } if (!is.null(x = latent.vars)) { latent.vars <- latent.vars[c(cells.1, cells.2), , drop = FALSE] } } # perform DE if (!(test.use %in% c('negbinom', 'poisson', 'MAST', "LR")) && !is.null(x = latent.vars)) { warning("'latent.vars' is only used for 'negbinom', 'poisson', 'LR', and 'MAST' tests") } std.arguments <- list( data.use = object[features, c(cells.1, cells.2), drop = FALSE], cells.1 = cells.1, cells.2 = cells.2, verbose = verbose) de.results <- switch( EXPR = test.use, 'wilcox' = do.call("WilcoxDETest", c(std.arguments, ...)), 'bimod' = do.call("DiffExpTest", c(std.arguments, ...)), 'roc' = do.call("MarkerTest", std.arguments), 't' = do.call("DiffTTest", std.arguments), 'negbinom' = do.call("GLMDETest", c(std.arguments, min.cells = min.cells.feature, latent.vars = latent.vars, test.use = test.use)), 'poisson' = do.call("GLMDETest", c(std.arguments, min.cells = min.cells.feature, latent.vars = latent.vars, test.use = test.use)), 'MAST' = do.call("MASTDETest", c(std.arguments, min.cells = min.cells.feature, latent.vars = latent.vars, test.use = test.use)), "DESeq2" = do.call("DESeq2DETest", std.arguments), "LR" = do.call("LRDETest", c(std.arguments, latent.vars = latent.vars)), { # If no test is matched, tries to call a function with the name # of the provided test found_functions <- findFunction(test.use) if (length(found_functions) == 0) { stop("Unknown test: ", test.use, "\n", "This might be casued because the function that provides ", "that test hast not been loaded. Make sure to load the ", "package that would contain it.") } de.results <- do.call(test.use, c(std.arguments, ...), envir = found_functions[[1]]) de.results } ) if (is.null(x = reduction)) { diff.col <- ifelse( test = slot == "scale.data" || test.use == 'roc', yes = "avg_diff", no = "avg_logFC" ) de.results[, diff.col] <- total.diff[rownames(x = de.results)] de.results <- cbind(de.results, data.alpha[rownames(x = de.results), , drop = FALSE]) } else { diff.col <- "avg_diff" de.results[, diff.col] <- total.diff[rownames(x = de.results)] } if (only.pos) { de.results <- de.results[de.results[, diff.col] > 0, , drop = FALSE] } if (test.use == "roc") { de.results <- de.results[order(-de.results$power, -de.results[, diff.col]), ] } else { de.results <- de.results[order(de.results$p_val, -de.results[, diff.col]), ] de.results$p_val_adj = p.adjust( p = de.results$p_val, method = "bonferroni", n = nrow(x = object) ) } return(de.results) } #' @param ident.1 Identity class to define markers for; pass an object of class #' \code{phylo} or 'clustertree' to find markers for a node in a cluster tree; #' passing 'clustertree' requires \code{\link{BuildClusterTree}} to have been run #' @param ident.2 A second identity class for comparison; if \code{NULL}, #' use all other cells for comparison; if an object of class \code{phylo} or #' 'clustertree' is passed to \code{ident.1}, must pass a node to find markers for #' @param reduction Reduction to use in differential expression testing - will test for DE on cell embeddings #' @param group.by Regroup cells into a different identity class prior to performing differential expression (see example) #' @param subset.ident Subset a particular identity class prior to regrouping. Only relevant if group.by is set (see example) #' @param assay Assay to use in differential expression testing #' @param slot Slot to pull data from; note that if \code{test.use} is "negbinom", "poisson", or "DESeq2", #' \code{slot} will be set to "counts" #' #' @importFrom methods is #' #' @rdname FindMarkers #' @export #' @method FindMarkers Seurat #' FindMarkers.Seurat <- function( object, ident.1 = NULL, ident.2 = NULL, group.by = NULL, subset.ident = NULL, assay = NULL, slot = 'data', reduction = NULL, features = NULL, logfc.threshold = 0.25, test.use = "wilcox", min.pct = 0.1, min.diff.pct = -Inf, verbose = TRUE, only.pos = FALSE, max.cells.per.ident = Inf, random.seed = 1, latent.vars = NULL, min.cells.feature = 3, min.cells.group = 3, pseudocount.use = 1, ... ) { if (!is.null(x = group.by)) { if (!is.null(x = subset.ident)) { object <- subset(x = object, idents = subset.ident) } Idents(object = object) <- group.by } if (!is.null(x = assay) && !is.null(x = reduction)) { stop("Please only specify either assay or reduction.") } data.slot <- ifelse( test = test.use %in% c("negbinom", "poisson", "DESeq2"), yes = 'counts', no = slot ) if (is.null(x = reduction)) { assay <- assay %||% DefaultAssay(object = object) data.use <- GetAssayData(object = object[[assay]], slot = data.slot) } else { if (data.slot == "counts") { stop("The following tests cannot be used when specifying a reduction as they assume a count model: negbinom, poisson, DESeq2") } data.use <- t(x = Embeddings(object = object, reduction = reduction)) } if (is.null(x = ident.1)) { stop("Please provide ident.1") } else if ((length(x = ident.1) == 1 && ident.1[1] == 'clustertree') || is(object = ident.1, class2 = 'phylo')) { if (is.null(x = ident.2)) { stop("Please pass a node to 'ident.2' to run FindMarkers on a tree") } tree <- if (is(object = ident.1, class2 = 'phylo')) { ident.1 } else { Tool(object = object, slot = 'BuildClusterTree') } if (is.null(x = tree)) { stop("Please run 'BuildClusterTree' or pass an object of class 'phylo' as 'ident.1'") } ident.1 <- tree$tip.label[GetLeftDescendants(tree = tree, node = ident.2)] ident.2 <- tree$tip.label[GetRightDescendants(tree = tree, node = ident.2)] } if (length(x = as.vector(x = ident.1)) > 1 && any(as.character(x = ident.1) %in% colnames(x = data.use))) { bad.cells <- colnames(x = data.use)[which(x = !as.character(x = ident.1) %in% colnames(x = data.use))] if (length(x = bad.cells) > 0) { stop(paste0("The following cell names provided to ident.1 are not present in the object: ", paste(bad.cells, collapse = ", "))) } } else { ident.1 <- Seurat::WhichCells(object = object, idents = ident.1) } # if NULL for ident.2, use all other cells if (length(x = as.vector(x = ident.2)) > 1 && any(as.character(x = ident.2) %in% colnames(x = data.use))) { bad.cells <- colnames(x = data.use)[which(!as.character(x = ident.2) %in% colnames(x = data.use))] if (length(x = bad.cells) > 0) { stop(paste0("The following cell names provided to ident.2 are not present in the object: ", paste(bad.cells, collapse = ", "))) } } else { if (is.null(x = ident.2)) { ident.2 <- setdiff(x = colnames(x = data.use), y = ident.1) } else { ident.2 <- Seurat::WhichCells(object = object, idents = ident.2) } } if (!is.null(x = latent.vars)) { latent.vars <- FetchData( object = object, vars = latent.vars, cells = c(ident.1, ident.2) ) } counts <- switch( EXPR = data.slot, 'scale.data' = GetAssayData(object = object[[assay]], slot = "counts"), numeric() ) de.results <- FindMarkers( object = data.use, slot = data.slot, counts = counts, cells.1 = ident.1, cells.2 = ident.2, features = features, reduction = reduction, logfc.threshold = logfc.threshold, test.use = test.use, min.pct = min.pct, min.diff.pct = min.diff.pct, verbose = verbose, only.pos = only.pos, max.cells.per.ident = max.cells.per.ident, random.seed = random.seed, latent.vars = latent.vars, min.cells.feature = min.cells.feature, min.cells.group = min.cells.group, pseudocount.use = pseudocount.use, ... ) return(de.results) } #%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% # Internal #%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% # internal function to calculate AUC values #' @importFrom pbapply pblapply # AUCMarkerTest <- function(data1, data2, mygenes, print.bar = TRUE) { myAUC <- unlist(x = lapply( X = mygenes, FUN = function(x) { return(DifferentialAUC( x = as.numeric(x = data1[x, ]), y = as.numeric(x = data2[x, ]) )) } )) myAUC[is.na(x = myAUC)] <- 0 iterate.fxn <- ifelse(test = print.bar, yes = pblapply, no = lapply) avg_diff <- unlist(x = iterate.fxn( X = mygenes, FUN = function(x) { return( ExpMean( x = as.numeric(x = data1[x, ]) ) - ExpMean( x = as.numeric(x = data2[x, ]) ) ) } )) toRet <- data.frame(cbind(myAUC, avg_diff), row.names = mygenes) toRet <- toRet[rev(x = order(toRet$myAUC)), ] return(toRet) } #internal function to run mcdavid et al. DE test # #' @importFrom stats sd dnorm # bimodLikData <- function(x, xmin = 0) { x1 <- x[x <= xmin] x2 <- x[x > xmin] xal <- MinMax( data = length(x = x2) / length(x = x), min = 1e-5, max = (1 - 1e-5) ) likA <- length(x = x1) * log(x = 1 - xal) if (length(x = x2) < 2) { mysd <- 1 } else { mysd <- sd(x = x2) } likB <- length(x = x2) * log(x = xal) + sum(dnorm(x = x2, mean = mean(x = x2), sd = mysd, log = TRUE)) return(likA + likB) } # Differential expression using DESeq2 # # Identifies differentially expressed genes between two groups of cells using # DESeq2 # # @references Love MI, Huber W and Anders S (2014). "Moderated estimation of # fold change and dispersion for RNA-seq data with DESeq2." Genome Biology. # https://bioconductor.org/packages/release/bioc/html/DESeq2.html # @param data.use Data matrix to test # @param cells.1 Group 1 cells # @param cells.2 Group 2 cells # @param verbose Print a progress bar # @param ... Extra parameters to pass to DESeq2::results # @return Returns a p-value ranked matrix of putative differentially expressed # genes. # # @details # This test does not support pre-filtering of genes based on average difference # (or percent detection rate) between cell groups. However, genes may be # pre-filtered based on their minimum detection rate (min.pct) across both cell # groups. To use this method, please install DESeq2, using the instructions at # https://bioconductor.org/packages/release/bioc/html/DESeq2.html # # @export # # @examples # \dontrun{ # pbmc_small # DESeq2DETest(pbmc_small, cells.1 = WhichCells(object = pbmc_small, idents = 1), # cells.2 = WhichCells(object = pbmc_small, idents = 2)) # } # DESeq2DETest <- function( data.use, cells.1, cells.2, verbose = TRUE, ... ) { if (!Seurat:::PackageCheck('DESeq2', error = FALSE)) { stop("Please install DESeq2 - learn more at https://bioconductor.org/packages/release/bioc/html/DESeq2.html") } group.info <- data.frame(row.names = c(cells.1, cells.2)) group.info[cells.1, "group"] <- "Group1" group.info[cells.2, "group"] <- "Group2" group.info[, "group"] <- factor(x = group.info[, "group"]) group.info$wellKey <- rownames(x = group.info) dds1 <- DESeq2::DESeqDataSetFromMatrix( countData = data.use, colData = group.info, design = ~ group ) dds1 <- DESeq2::estimateSizeFactors(object = dds1) dds1 <- DESeq2::estimateDispersions(object = dds1, fitType = "local") dds1 <- DESeq2::nbinomWaldTest(object = dds1) res <- DESeq2::results( object = dds1, contrast = c("group", "Group1", "Group2"), alpha = 0.05, ... ) to.return <- data.frame(p_val = res$pvalue, row.names = rownames(res)) return(to.return) } # internal function to calculate AUC values #' @importFrom ROCR prediction performance #' DifferentialAUC <- function(x, y) { prediction.use <- prediction( predictions = c(x, y), labels = c(rep(x = 1, length(x = x)), rep(x = 0, length(x = y))), label.ordering = 0:1 ) perf.use <- performance(prediction.obj = prediction.use, measure = "auc") auc.use <- round(x = perf.use@y.values[[1]], digits = 3) return(auc.use) } #internal function to run mcdavid et al. DE test # #' @importFrom stats pchisq # DifferentialLRT <- function(x, y, xmin = 0) { lrtX <- bimodLikData(x = x) lrtY <- bimodLikData(x = y) lrtZ <- bimodLikData(x = c(x, y)) lrt_diff <- 2 * (lrtX + lrtY - lrtZ) return(pchisq(q = lrt_diff, df = 3, lower.tail = F)) } # Likelihood ratio test for zero-inflated data # # Identifies differentially expressed genes between two groups of cells using # the LRT model proposed in McDavid et al, Bioinformatics, 2013 # # @inheritParams FindMarkers # @param object Seurat object # @param cells.1 Group 1 cells # @param cells.2 Group 2 cells # @param assay.type Type of assay to fetch data for (default is RNA) # @return Returns a p-value ranked matrix of putative differentially expressed # genes. # #' @importFrom pbapply pbsapply #' @importFrom future.apply future_sapply #' @importFrom future nbrOfWorkers # # @export # @examples # pbmc_small # DiffExpTest(pbmc_small, cells.1 = WhichCells(object = pbmc_small, idents = 1), # cells.2 = WhichCells(object = pbmc_small, idents = 2)) # DiffExpTest <- function( data.use, cells.1, cells.2, verbose = TRUE ) { my.sapply <- ifelse( test = verbose && nbrOfWorkers() == 1, yes = pbsapply, no = future_sapply ) p_val <- unlist( x = my.sapply( X = 1:nrow(x = data.use), FUN = function(x) { return(DifferentialLRT( x = as.numeric(x = data.use[x, cells.1]), y = as.numeric(x = data.use[x, cells.2]) )) } ) ) to.return <- data.frame(p_val, row.names = rownames(x = data.use)) return(to.return) } # Differential expression testing using Student's t-test # # Identify differentially expressed genes between two groups of cells using # the Student's t-test # # @return Returns a p-value ranked matrix of putative differentially expressed # genes. # #' @importFrom stats t.test #' @importFrom pbapply pbsapply #' @importFrom future.apply future_sapply #' @importFrom future nbrOfWorkers # # @export # # @examples # pbmc_small # DiffTTest(pbmc_small, cells.1 = WhichCells(object = pbmc_small, idents = 1), # cells.2 = WhichCells(object = pbmc_small, idents = 2)) DiffTTest <- function( data.use, cells.1, cells.2, verbose = TRUE ) { my.sapply <- ifelse( test = verbose && nbrOfWorkers() == 1, yes = pbsapply, no = future_sapply ) p_val <- unlist( x = my.sapply( X = 1:nrow(data.use), FUN = function(x) { t.test(x = data.use[x, cells.1], y = data.use[x, cells.2])$p.value } ) ) to.return <- data.frame(p_val,row.names = rownames(x = data.use)) return(to.return) } # Tests for UMI-count based data # # Identifies differentially expressed genes between two groups of cells using # either a negative binomial or poisson generalized linear model # # @param data.use Data to test # @param cells.1 Group 1 cells # @param cells.2 Group 2 cells # @param min.cells Minimum number of cells threshold # @param latent.vars Latent variables to test # @param test.use parameterizes the glm # @param verbose Print progress bar # # @return Returns a p-value ranked matrix of putative differentially expressed # genes. # #' @importFrom MASS glm.nb #' @importFrom pbapply pbsapply #' @importFrom stats var as.formula #' @importFrom future.apply future_sapply #' @importFrom future nbrOfWorkers #' # @export # # @examples # pbmc_small # # Note, not recommended for particularly small datasets - expect warnings # NegBinomDETest(pbmc_small, cells.1 = WhichCells(object = pbmc_small, idents = 1), # cells.2 = WhichCells(object = pbmc_small, idents = 2)) # GLMDETest <- function( data.use, cells.1, cells.2, min.cells = 3, latent.vars = NULL, test.use = NULL, verbose = TRUE ) { group.info <- data.frame( group = rep( x = c('Group1', 'Group2'), times = c(length(x = cells.1), length(x = cells.2)) ) ) rownames(group.info) <- c(cells.1, cells.2) group.info[, "group"] <- factor(x = group.info[, "group"]) latent.vars <- if (is.null(x = latent.vars)) { group.info } else { cbind(x = group.info, latent.vars) } latent.var.names <- colnames(x = latent.vars) my.sapply <- ifelse( test = verbose && nbrOfWorkers() == 1, yes = pbsapply, no = future_sapply ) p_val <- unlist( x = my.sapply( X = 1:nrow(data.use), FUN = function(x) { latent.vars[, "GENE"] <- as.numeric(x = data.use[x, ]) # check that gene is expressed in specified number of cells in one group if (sum(latent.vars$GENE[latent.vars$group == "Group1"] > 0) < min.cells && sum(latent.vars$GENE[latent.vars$group == "Group2"] > 0) < min.cells) { warning(paste0( "Skipping gene --- ", x, ". Fewer than ", min.cells, " cells in both clusters." )) return(2) } # check that variance between groups is not 0 if (var(x = latent.vars$GENE) == 0) { warning(paste0( "Skipping gene -- ", x, ". No variance in expression between the two clusters." )) return(2) } fmla <- as.formula(object = paste( "GENE ~", paste(latent.var.names, collapse = "+") )) p.estimate <- 2 if (test.use == "negbinom") { try( expr = p.estimate <- summary( object = glm.nb(formula = fmla, data = latent.vars) )$coef[2, 4], silent = TRUE ) return(p.estimate) } else if (test.use == "poisson") { return(summary(object = glm( formula = fmla, data = latent.vars, family = "poisson" ))$coef[2,4]) } } ) ) features.keep <- rownames(data.use) if (length(x = which(x = p_val == 2)) > 0) { features.keep <- features.keep[-which(x = p_val == 2)] p_val <- p_val[!p_val == 2] } to.return <- data.frame(p_val, row.names = features.keep) return(to.return) } # Perform differential expression testing using a logistic regression framework # # Constructs a logistic regression model predicting group membership based on a # given feature and compares this to a null model with a likelihood ratio test. # # @param data.use expression matrix # @param cells.1 Vector of cells in group 1 # @param cells2. Vector of cells in group 2 # @param latent.vars Latent variables to include in model # @param verbose Print messages # #' @importFrom lmtest lrtest #' @importFrom pbapply pbsapply #' @importFrom stats as.formula glm #' @importFrom future.apply future_sapply #' @importFrom future nbrOfWorkers # LRDETest <- function( data.use, cells.1, cells.2, latent.vars = NULL, verbose = TRUE, ... ) { group.info <- data.frame(row.names = c(cells.1, cells.2)) group.info[cells.1, "group"] <- "Group1" group.info[cells.2, "group"] <- "Group2" group.info[, "group"] <- factor(x = group.info[, "group"]) data.use <- data.use[, rownames(group.info), drop = FALSE] latent.vars <- latent.vars[rownames(group.info), , drop = FALSE] my.sapply <- ifelse( test = verbose && nbrOfWorkers() == 1, yes = pbsapply, no = future_sapply ) p_val <- my.sapply( X = 1:nrow(x = data.use), FUN = function(x) { if (is.null(x = latent.vars)) { model.data <- cbind(GENE = data.use[x, ], group.info) fmla <- as.formula(object = "group ~ GENE") fmla2 <- as.formula(object = "group ~ 1") } else { model.data <- cbind(GENE = data.use[x, ], group.info, latent.vars) fmla <- as.formula(object = paste( "group ~ GENE +", paste(colnames(x = latent.vars), collapse = "+") )) fmla2 <- as.formula(object = paste( "group ~", paste(colnames(x = latent.vars), collapse = "+") )) } model1 <- glm(formula = fmla, data = model.data, family = "binomial") model2 <- glm(formula = fmla2, data = model.data, family = "binomial") lrtest <- lrtest(model1, model2) return(lrtest$Pr[2]) } ) to.return <- data.frame(p_val, row.names = rownames(data.use)) return(to.return) } # ROC-based marker discovery # # Identifies 'markers' of gene expression using ROC analysis. For each gene, # evaluates (using AUC) a classifier built on that gene alone, to classify # between two groups of cells. # # An AUC value of 1 means that expression values for this gene alone can # perfectly classify the two groupings (i.e. Each of the cells in cells.1 # exhibit a higher level than each of the cells in cells.2). An AUC value of 0 # also means there is perfect classification, but in the other direction. A # value of 0.5 implies that the gene has no predictive power to classify the # two groups. # # @return Returns a 'predictive power' (abs(AUC-0.5) * 2) ranked matrix of # putative differentially expressed genes. # # @export # # @examples # pbmc_small # MarkerTest(pbmc_small, cells.1 = WhichCells(object = pbmc_small, idents = 1), # cells.2 = WhichCells(object = pbmc_small, idents = 2)) # MarkerTest <- function( data.use, cells.1, cells.2, verbose = TRUE ) { to.return <- AUCMarkerTest( data1 = data.use[, cells.1, drop = FALSE], data2 = data.use[, cells.2, drop = FALSE], mygenes = rownames(x = data.use), print.bar = verbose ) to.return$power <- abs(x = to.return$myAUC - 0.5) * 2 return(to.return) } # Differential expression using MAST # # Identifies differentially expressed genes between two groups of cells using # a hurdle model tailored to scRNA-seq data. Utilizes the MAST package to run # the DE testing. # # @references Andrew McDavid, Greg Finak and Masanao Yajima (2017). MAST: Model-based # Analysis of Single Cell Transcriptomics. R package version 1.2.1. # https://github.com/RGLab/MAST/ # # @param data.use Data to test # @param cells.1 Group 1 cells # @param cells.2 Group 2 cells # @param latent.vars Confounding variables to adjust for in DE test. Default is # "nUMI", which adjusts for cellular depth (i.e. cellular detection rate). For # non-UMI based data, set to nGene instead. # @param verbose print output # @param \dots Additional parameters to zero-inflated regression (zlm) function # in MAST # @details # To use this method, please install MAST, using instructions at https://github.com/RGLab/MAST/ # # @return Returns a p-value ranked matrix of putative differentially expressed # genes. # #' @importFrom stats relevel # # @export # # @examples # \dontrun{ # pbmc_small # MASTDETest(pbmc_small, cells.1 = WhichCells(object = pbmc_small, idents = 1), # cells.2 = WhichCells(object = pbmc_small, idents = 2)) # } # MASTDETest <- function( data.use, cells.1, cells.2, latent.vars = NULL, verbose = TRUE, ... ) { # Check for MAST if (!Seurat:::PackageCheck('MAST', error = FALSE)) { stop("Please install MAST - learn more at https://github.com/RGLab/MAST") } if (length(x = latent.vars) > 0) { latent.vars <- scale(x = latent.vars) } group.info <- data.frame(row.names = c(cells.1, cells.2)) latent.vars <- latent.vars %||% group.info group.info[cells.1, "group"] <- "Group1" group.info[cells.2, "group"] <- "Group2" group.info[, "group"] <- factor(x = group.info[, "group"]) latent.vars.names <- c("condition", colnames(x = latent.vars)) latent.vars <- cbind(latent.vars, group.info) latent.vars$wellKey <- rownames(x = latent.vars) fdat <- data.frame(rownames(x = data.use)) colnames(x = fdat)[1] <- "primerid" rownames(x = fdat) <- fdat[, 1] sca <- MAST::FromMatrix( exprsArray = as.matrix(x = data.use), cData = latent.vars, fData = fdat ) cond <- factor(x = SummarizedExperiment::colData(sca)$group) cond <- relevel(x = cond, ref = "Group1") SummarizedExperiment::colData(sca)$condition <- cond fmla <- as.formula( object = paste0(" ~ ", paste(latent.vars.names, collapse = "+")) ) zlmCond <- MAST::zlm(formula = fmla, sca = sca, ...) summaryCond <- summary(object = zlmCond, doLRT = 'conditionGroup2') summaryDt <- summaryCond$datatable # fcHurdle <- merge( # summaryDt[contrast=='conditionGroup2' & component=='H', .(primerid, `Pr(>Chisq)`)], #hurdle P values # summaryDt[contrast=='conditionGroup2' & component=='logFC', .(primerid, coef, ci.hi, ci.lo)], by='primerid' # ) #logFC coefficients # fcHurdle[,fdr:=p.adjust(`Pr(>Chisq)`, 'fdr')] p_val <- summaryDt[summaryDt[, "component"] == "H", 4] genes.return <- summaryDt[summaryDt[, "component"] == "H", 1] # p_val <- subset(summaryDt, component == "H")[, 4] # genes.return <- subset(summaryDt, component == "H")[, 1] to.return <- data.frame(p_val, row.names = genes.return) return(to.return) } # compare two negative binomial regression models # model one uses only common factors (com.fac) # model two additionally uses group factor (grp.fac) # #' @importFrom stats glm anova coef # NBModelComparison <- function(y, theta, latent.data, com.fac, grp.fac) { tab <- as.matrix(x = table(y > 0, latent.data[, grp.fac])) freqs <- tab['TRUE', ] / apply(X = tab, MARGIN = 2, FUN = sum) fit2 <- 0 fit4 <- 0 try( expr = fit2 <- glm( formula = y ~ ., data = latent.data[, com.fac, drop = FALSE], family = MASS::negative.binomial(theta = theta) ), silent=TRUE ) try( fit4 <- glm( formula = y ~ ., data = latent.data[, c(com.fac, grp.fac)], family = MASS::negative.binomial(theta = theta) ), silent = TRUE ) if (class(x = fit2)[1] == 'numeric' | class(x = fit4)[1] == 'numeric') { message('One of the glm.nb calls failed') return(c(rep(x = NA, 5), freqs)) } pval <- anova(fit2, fit4, test = 'Chisq')$'Pr(>Chi)'[2] foi <- 2 + length(x = com.fac) log2.fc <- log2(x = 1 / exp(x = coef(object = fit4)[foi])) ret <- c( fit2$deviance, fit4$deviance, pval, coef(object = fit4)[foi], log2.fc, freqs ) names(x = ret) <- c( 'dev1', 'dev2', 'pval', 'coef', 'log2.fc', 'freq1', 'freq2' ) return(ret) } # given a UMI count matrix, estimate NB theta parameter for each gene # and use fit of relationship with mean to assign regularized theta to each gene # #' @importFrom stats glm loess poisson #' @importFrom utils txtProgressBar setTxtProgressBar # RegularizedTheta <- function(cm, latent.data, min.theta = 0.01, bin.size = 128) { genes.regress <- rownames(x = cm) bin.ind <- ceiling(x = 1:length(x = genes.regress) / bin.size) max.bin <- max(bin.ind) message('Running Poisson regression (to get initial mean), and theta estimation per gene') pb <- txtProgressBar(min = 0, max = max.bin, style = 3, file = stderr()) theta.estimate <- c() for (i in 1:max.bin) { genes.bin.regress <- genes.regress[bin.ind == i] bin.theta.estimate <- unlist( x = parallel::mclapply( X = genes.bin.regress, FUN = function(j) { return(as.numeric(x = MASS::theta.ml( y = cm[j, ], mu = glm( formula = cm[j, ] ~ ., data = latent.data, family = poisson )$fitted ))) } ), use.names = FALSE ) theta.estimate <- c(theta.estimate, bin.theta.estimate) setTxtProgressBar(pb = pb, value = i) } close(con = pb) UMI.mean <- apply(X = cm, MARGIN = 1, FUN = mean) var.estimate <- UMI.mean + (UMI.mean ^ 2) / theta.estimate for (span in c(1/3, 1/2, 3/4, 1)) { fit <- loess( formula = log10(x = var.estimate) ~ log10(x = UMI.mean), span = span ) if (! any(is.na(x = fit$fitted))) { message(sprintf( 'Used loess with span %1.2f to fit mean-variance relationship\n', span )) break } } if (any(is.na(x = fit$fitted))) { stop('Problem when fitting NB gene variance in RegularizedTheta - NA values were fitted.') } theta.fit <- (UMI.mean ^ 2) / ((10 ^ fit$fitted) - UMI.mean) names(x = theta.fit) <- genes.regress to.fix <- theta.fit <= min.theta | is.infinite(x = theta.fit) if (any(to.fix)) { message( 'Fitted theta below ', min.theta, ' for ', sum(to.fix), ' genes, setting them to ', min.theta ) theta.fit[to.fix] <- min.theta } return(theta.fit) } # Differential expression using Wilcoxon Rank Sum # # Identifies differentially expressed genes between two groups of cells using # a Wilcoxon Rank Sum test # # @param data.use Data matrix to test # @param cells.1 Group 1 cells # @param cells.2 Group 2 cells # @param verbose Print a progress bar # @param ... Extra parameters passed to wilcox.test # # @return Returns a p-value ranked matrix of putative differentially expressed # features # #' @importFrom pbapply pbsapply #' @importFrom stats wilcox.test #' @importFrom future.apply future_sapply #' @importFrom future nbrOfWorkers # # @export # # @examples # pbmc_small # WilcoxDETest(pbmc_small, cells.1 = WhichCells(object = pbmc_small, idents = 1), # cells.2 = WhichCells(object = pbmc_small, idents = 2)) # WilcoxDETest <- function( data.use, cells.1, cells.2, verbose = TRUE, ... ) { group.info <- data.frame(row.names = c(cells.1, cells.2)) group.info[cells.1, "group"] <- "Group1" group.info[cells.2, "group"] <- "Group2" group.info[, "group"] <- factor(x = group.info[, "group"]) data.use <- data.use[, rownames(x = group.info), drop = FALSE] my.sapply <- ifelse( test = verbose && nbrOfWorkers() == 1, yes = pbsapply, no = future_sapply ) p_val <- my.sapply( X = 1:nrow(x = data.use), FUN = function(x) { return(wilcox.test(data.use[x, ] ~ group.info[, "group"], ...)$p.value) } ) return(data.frame(p_val, row.names = rownames(x = data.use))) }
/R/seurat_utils.R
permissive
jspaezp/sctree
R
false
false
59,332
r
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% # This is a bundled copy of # https://github.com/satijalab/seurat/blob/master/R/differential_expression.R # only to over-write the behaviour of FindMarkers and derivatives ... # this addition is intended to preserve compatibility in the output given # by our function and #%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% #' @importFrom Seurat FindMarkers DefaultAssay GetAssayData WhichCells NULL #' Gene expression markers of identity classes #' #' Finds markers (differentially expressed genes) for identity classes #' #' @param object An object #' @param ... Arguments passed to other methods and to specific DE methods #' @return data.frame with a ranked list of putative markers as rows, and associated #' statistics as columns (p-values, ROC score, etc., depending on the test used (\code{test.use})). The following columns are always present: #' \itemize{ #' \item \code{avg_logFC}: log fold-chage of the average expression between the two groups. Positive values indicate that the gene is more highly expressed in the first group #' \item \code{pct.1}: The percentage of cells where the gene is detected in the first group #' \item \code{pct.2}: The percentage of cells where the gene is detected in the second group #' \item \code{p_val_adj}: Adjusted p-value, based on bonferroni correction using all genes in the dataset #' } #' #' @details p-value adjustment is performed using bonferroni correction based on #' the total number of genes in the dataset. Other correction methods are not #' recommended, as Seurat pre-filters genes using the arguments above, reducing #' the number of tests performed. Lastly, as Aaron Lun has pointed out, p-values #' should be interpreted cautiously, as the genes used for clustering are the #' same genes tested for differential expression. #' #' @references McDavid A, Finak G, Chattopadyay PK, et al. Data exploration, #' quality control and testing in single-cell qPCR-based gene expression experiments. #' Bioinformatics. 2013;29(4):461-467. doi:10.1093/bioinformatics/bts714 #' @references Trapnell C, et al. The dynamics and regulators of cell fate #' decisions are revealed by pseudotemporal ordering of single cells. Nature #' Biotechnology volume 32, pages 381-386 (2014) #' @references Andrew McDavid, Greg Finak and Masanao Yajima (2017). MAST: Model-based #' Analysis of Single Cell Transcriptomics. R package version 1.2.1. #' https://github.com/RGLab/MAST/ #' @references Love MI, Huber W and Anders S (2014). "Moderated estimation of #' fold change and dispersion for RNA-seq data with DESeq2." Genome Biology. #' https://bioconductor.org/packages/release/bioc/html/DESeq2.html #' #' @export #' #' @examples #' # Find markers for cluster 2 #' markers <- FindMarkers(object = pbmc_small, ident.1 = 2) #' head(x = markers) #' #' # Take all cells in cluster 2, and find markers that separate cells in the 'g1' group (metadata #' # variable 'group') #' suppressWarnings(markers <- FindMarkers(pbmc_small, ident.1 = "g1", group.by = 'groups', subset.ident = "2")) #' head(x = markers) #' #' # Pass 'clustertree' or an object of class phylo to ident.1 and #' # a node to ident.2 as a replacement for FindMarkersNode #' # pbmc_small <- BuildClusterTree(object = pbmc_small) #' # markers <- FindMarkers(object = pbmc_small, ident.1 = 'clustertree', ident.2 = 5) #' # head(x = markers) #' #' @rdname FindMarkers #' @export FindMarkers #' #' @aliases FindMarkersNode #' FindMarkers <- function(object, ...) { UseMethod(generic = 'FindMarkers', object = object) } #%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% # Functions #%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% # globalVariables( # names = c('myAUC', 'p_val', 'avg_logFC'), # package = 'Seurat', # add = TRUE # ) #' Gene expression markers for all identity classes #' #' Finds markers (differentially expressed genes) for each of the identity classes in a dataset #' #' @inheritParams FindMarkers #' @param node A node to find markers for and all its children; requires #' \code{\link{BuildClusterTree}} to have been run previously; replaces \code{FindAllMarkersNode} #' @param return.thresh Only return markers that have a p-value < return.thresh, or a power > return.thresh (if the test is ROC) #' #' @return Matrix containing a ranked list of putative markers, and associated #' statistics (p-values, ROC score, etc.) #' #' @importFrom ape drop.tip #' @importFrom stats setNames #' #' @export #' #' @aliases FindAllMarkersNode #' #' @examples #' # Find markers for all clusters #' suppressWarnings(all.markers <- FindAllMarkers(object = pbmc_small)) #' head(x = all.markers) #' \dontrun{ #' # Pass a value to node as a replacement for FindAllMarkersNode #' pbmc_small <- BuildClusterTree(object = pbmc_small) #' all.markers <- FindAllMarkers(object = pbmc_small, node = 4) #' head(x = all.markers) #' } #' FindAllMarkers <- function( object, assay = NULL, features = NULL, logfc.threshold = 0.25, test.use = 'wilcox', slot = 'data', min.pct = 0.1, min.diff.pct = -Inf, node = NULL, verbose = TRUE, only.pos = FALSE, max.cells.per.ident = Inf, random.seed = 1, latent.vars = NULL, min.cells.feature = 3, min.cells.group = 3, pseudocount.use = 1, return.thresh = 1e-2, ... ) { MapVals <- function(vec, from, to) { vec2 <- setNames(object = to, nm = from)[as.character(x = vec)] vec2[is.na(x = vec2)] <- vec[is.na(x = vec2)] return(unname(obj = vec2)) } if ((test.use == "roc") && (return.thresh == 1e-2)) { return.thresh <- 0.7 } if (is.null(x = node)) { idents.all <- sort(x = unique(x = Idents(object = object))) } else { tree <- Seurat:::Tool(object = object, slot = 'BuildClusterTree') if (is.null(x = tree)) { stop("Please run 'BuildClusterTree' before finding markers on nodes") } descendants <- Seurat:::DFT(tree = tree, node = node, include.children = TRUE) all.children <- sort(x = tree$edge[, 2][!tree$edge[, 2] %in% tree$edge[, 1]]) descendants <- MapVals( vec = descendants, from = all.children, to = tree$tip.label ) drop.children <- setdiff(x = tree$tip.label, y = descendants) keep.children <- setdiff(x = tree$tip.label, y = drop.children) orig.nodes <- c( node, as.numeric(x = setdiff(x = descendants, y = keep.children)) ) tree <- drop.tip(phy = tree, tip = drop.children) new.nodes <- unique(x = tree$edge[, 1, drop = TRUE]) idents.all <- (tree$Nnode + 2):max(tree$edge) } genes.de <- list() messages <- list() for (i in 1:length(x = idents.all)) { if (verbose) { message("Calculating cluster ", idents.all[i]) } genes.de[[i]] <- tryCatch( expr = { FindMarkers( object = object, assay = assay, ident.1 = if (is.null(x = node)) { idents.all[i] } else { tree }, ident.2 = if (is.null(x = node)) { NULL } else { idents.all[i] }, features = features, logfc.threshold = logfc.threshold, test.use = test.use, slot = slot, min.pct = min.pct, min.diff.pct = min.diff.pct, verbose = verbose, only.pos = only.pos, max.cells.per.ident = max.cells.per.ident, random.seed = random.seed, latent.vars = latent.vars, min.cells.feature = min.cells.feature, min.cells.group = min.cells.group, pseudocount.use = pseudocount.use, ... ) }, error = function(cond) { return(cond$message) } ) if (class(x = genes.de[[i]]) == "character") { messages[[i]] <- genes.de[[i]] genes.de[[i]] <- NULL } } gde.all <- data.frame() for (i in 1:length(x = idents.all)) { if (is.null(x = unlist(x = genes.de[i]))) { next } gde <- genes.de[[i]] if (nrow(x = gde) > 0) { if (test.use == "roc") { gde <- subset( x = gde, subset = (myAUC > return.thresh | myAUC < (1 - return.thresh)) ) } else if (is.null(x = node) || test.use %in% c('bimod', 't')) { gde <- gde[order(gde$p_val, -gde[, 2]), ] gde <- subset(x = gde, subset = p_val < return.thresh) } if (nrow(x = gde) > 0) { gde$cluster <- idents.all[i] gde$gene <- rownames(x = gde) } if (nrow(x = gde) > 0) { gde.all <- rbind(gde.all, gde) } } } if ((only.pos) && nrow(x = gde.all) > 0) { diff.col <- ifelse(test = slot == "scale.data" || test.use == "roc", yes = "avg_diff", no = "avg_logFC") return(subset(x = gde.all, subset = gde.all[, diff.col] > 0)) } rownames(x = gde.all) <- make.unique(names = as.character(x = gde.all$gene)) if (nrow(x = gde.all) == 0) { warning("No DE genes identified", call. = FALSE, immediate. = TRUE) } if (length(x = messages) > 0) { warning("The following tests were not performed: ", call. = FALSE, immediate. = TRUE) for (i in 1:length(x = messages)) { if (!is.null(x = messages[[i]])) { warning("When testing ", idents.all[i], " versus all:\n\t", messages[[i]], call. = FALSE, immediate. = TRUE) } } } if (!is.null(x = node)) { gde.all$cluster <- MapVals( vec = gde.all$cluster, from = new.nodes, to = orig.nodes ) } return(gde.all) } #' Finds markers that are conserved between the groups #' #' @inheritParams FindMarkers #' @param ident.1 Identity class to define markers for #' @param ident.2 A second identity class for comparison. If NULL (default) - #' use all other cells for comparison. #' @param grouping.var grouping variable #' @param assay of assay to fetch data for (default is RNA) #' @param meta.method method for combining p-values. Should be a function from #' the metap package (NOTE: pass the function, not a string) #' @param \dots parameters to pass to FindMarkers #' #' @return data.frame containing a ranked list of putative conserved markers, and #' associated statistics (p-values within each group and a combined p-value #' (such as Fishers combined p-value or others from the metap package), #' percentage of cells expressing the marker, average differences). Name of group is appended to each #' associated output column (e.g. CTRL_p_val). If only one group is tested in the grouping.var, max #' and combined p-values are not returned. #' #' @importFrom metap minimump #' #' @export #' #' @examples #' \dontrun{ #' pbmc_small #' # Create a simulated grouping variable #' pbmc_small[['groups']] <- sample(x = c('g1', 'g2'), size = ncol(x = pbmc_small), replace = TRUE) #' FindConservedMarkers(pbmc_small, ident.1 = 0, ident.2 = 1, grouping.var = "groups") #' } #' FindConservedMarkers <- function( object, ident.1, ident.2 = NULL, grouping.var, assay = 'RNA', slot = 'data', meta.method = minimump, verbose = TRUE, ... ) { if (class(x = meta.method) != "function") { stop("meta.method should be a function from the metap package. Please see https://cran.r-project.org/web/packages/metap/metap.pdf for a detailed description of the available functions.") } object.var <- FetchData(object = object, vars = grouping.var) object <- Seurat::SetIdent( object = object, cells = colnames(x = object), value = paste(Idents(object = object), object.var[, 1], sep = "_") ) levels.split <- names(x = sort(x = table(object.var[, 1]))) num.groups <- length(levels.split) cells <- list() for (i in 1:num.groups) { cells[[i]] <- rownames( x = object.var[object.var[, 1] == levels.split[i], , drop = FALSE] ) } marker.test <- list() # do marker tests for (i in 1:num.groups) { level.use <- levels.split[i] ident.use.1 <- paste(ident.1, level.use, sep = "_") ident.use.1.exists <- ident.use.1 %in% Idents(object = object) if (!all(ident.use.1.exists)) { bad.ids <- ident.1[!ident.use.1.exists] warning( "Identity: ", paste(bad.ids, collapse = ", "), " not present in group ", level.use, ". Skipping ", level.use, call. = FALSE, immediate. = TRUE ) next } cells.1 <- Seurat::WhichCells(object = object, idents = ident.use.1) if (is.null(x = ident.2)) { cells.2 <- setdiff(x = cells[[i]], y = cells.1) ident.use.2 <- names(x = which(x = table(Idents(object = object)[cells.2]) > 0)) ident.2 <- gsub(pattern = paste0("_", level.use), replacement = "", x = ident.use.2) if (length(x = ident.use.2) == 0) { stop(paste("Only one identity class present:", ident.1)) } } else { ident.use.2 <- paste(ident.2, level.use, sep = "_") } if (verbose) { message( "Testing group ", level.use, ": (", paste(ident.1, collapse = ", "), ") vs (", paste(ident.2, collapse = ", "), ")" ) } ident.use.2.exists <- ident.use.2 %in% Idents(object = object) if (!all(ident.use.2.exists)) { bad.ids <- ident.2[!ident.use.2.exists] warning( "Identity: ", paste(bad.ids, collapse = ", "), " not present in group ", level.use, ". Skipping ", level.use, call. = FALSE, immediate. = TRUE ) next } marker.test[[i]] <- FindMarkers( object = object, assay = assay, slot = slot, ident.1 = ident.use.1, ident.2 = ident.use.2, verbose = verbose, ... ) } names(x = marker.test) <- levels.split marker.test <- Filter(f = Negate(f = is.null), x = marker.test) genes.conserved <- Reduce( f = intersect, x = lapply( X = marker.test, FUN = function(x) { return(rownames(x = x)) } ) ) markers.conserved <- list() for (i in 1:length(x = marker.test)) { markers.conserved[[i]] <- marker.test[[i]][genes.conserved, ] colnames(x = markers.conserved[[i]]) <- paste( names(x = marker.test)[i], colnames(x = markers.conserved[[i]]), sep = "_" ) } markers.combined <- Reduce(cbind, markers.conserved) pval.codes <- colnames(x = markers.combined)[grepl(pattern = "*_p_val$", x = colnames(x = markers.combined))] if (length(x = pval.codes) > 1) { markers.combined$max_pval <- apply( X = markers.combined[, pval.codes, drop = FALSE], MARGIN = 1, FUN = max ) combined.pval <- data.frame(cp = apply( X = markers.combined[, pval.codes, drop = FALSE], MARGIN = 1, FUN = function(x) { return(meta.method(x)$p) } )) colnames(x = combined.pval) <- paste0( as.character(x = formals()$meta.method), "_p_val" ) markers.combined <- cbind(markers.combined, combined.pval) markers.combined <- markers.combined[order(markers.combined[, paste0(as.character(x = formals()$meta.method), "_p_val")]), ] } else { warning("Only a single group was tested", call. = FALSE, immediate. = TRUE) } return(markers.combined) } #%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% # Methods for Seurat-defined generics #%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% #' @param cells.1 Vector of cell names belonging to group 1 #' @param cells.2 Vector of cell names belonging to group 2 #' @param counts Count matrix if using scale.data for DE tests. This is used for #' computing pct.1 and pct.2 and for filtering features based on fraction #' expressing #' @param features Genes to test. Default is to use all genes #' @param logfc.threshold Limit testing to genes which show, on average, at least #' X-fold difference (log-scale) between the two groups of cells. Default is 0.25 #' Increasing logfc.threshold speeds up the function, but can miss weaker signals. #' @param test.use Denotes which test to use. Available options are: #' \itemize{ #' \item{"wilcox"} : Identifies differentially expressed genes between two #' groups of cells using a Wilcoxon Rank Sum test (default) #' \item{"bimod"} : Likelihood-ratio test for single cell gene expression, #' (McDavid et al., Bioinformatics, 2013) #' \item{"roc"} : Identifies 'markers' of gene expression using ROC analysis. #' For each gene, evaluates (using AUC) a classifier built on that gene alone, #' to classify between two groups of cells. An AUC value of 1 means that #' expression values for this gene alone can perfectly classify the two #' groupings (i.e. Each of the cells in cells.1 exhibit a higher level than #' each of the cells in cells.2). An AUC value of 0 also means there is perfect #' classification, but in the other direction. A value of 0.5 implies that #' the gene has no predictive power to classify the two groups. Returns a #' 'predictive power' (abs(AUC-0.5) * 2) ranked matrix of putative differentially #' expressed genes. #' \item{"t"} : Identify differentially expressed genes between two groups of #' cells using the Student's t-test. #' \item{"negbinom"} : Identifies differentially expressed genes between two #' groups of cells using a negative binomial generalized linear model. #' Use only for UMI-based datasets #' \item{"poisson"} : Identifies differentially expressed genes between two #' groups of cells using a poisson generalized linear model. #' Use only for UMI-based datasets #' \item{"LR"} : Uses a logistic regression framework to determine differentially #' expressed genes. Constructs a logistic regression model predicting group #' membership based on each feature individually and compares this to a null #' model with a likelihood ratio test. #' \item{"MAST"} : Identifies differentially expressed genes between two groups #' of cells using a hurdle model tailored to scRNA-seq data. Utilizes the MAST #' package to run the DE testing. #' \item{"DESeq2"} : Identifies differentially expressed genes between two groups #' of cells based on a model using DESeq2 which uses a negative binomial #' distribution (Love et al, Genome Biology, 2014).This test does not support #' pre-filtering of genes based on average difference (or percent detection rate) #' between cell groups. However, genes may be pre-filtered based on their #' minimum detection rate (min.pct) across both cell groups. To use this method, #' please install DESeq2, using the instructions at #' https://bioconductor.org/packages/release/bioc/html/DESeq2.html #' } #' @param min.pct only test genes that are detected in a minimum fraction of #' min.pct cells in either of the two populations. Meant to speed up the function #' by not testing genes that are very infrequently expressed. Default is 0.1 #' @param min.diff.pct only test genes that show a minimum difference in the #' fraction of detection between the two groups. Set to -Inf by default #' @param only.pos Only return positive markers (FALSE by default) #' @param verbose Print a progress bar once expression testing begins #' @param max.cells.per.ident Down sample each identity class to a max number. #' Default is no downsampling. Not activated by default (set to Inf) #' @param random.seed Random seed for downsampling #' @param latent.vars Variables to test, used only when \code{test.use} is one of #' 'LR', 'negbinom', 'poisson', or 'MAST' #' @param min.cells.feature Minimum number of cells expressing the feature in at least one #' of the two groups, currently only used for poisson and negative binomial tests #' @param min.cells.group Minimum number of cells in one of the groups #' @param pseudocount.use Pseudocount to add to averaged expression values when #' calculating logFC. 1 by default. #' #' @importFrom Matrix rowSums #' @importFrom rlang %||% #' @importFrom stats p.adjust #' #' @rdname FindMarkers #' @export #' @method FindMarkers default #' FindMarkers.default <- function( object, slot = "data", counts = numeric(), cells.1 = NULL, cells.2 = NULL, features = NULL, reduction = NULL, logfc.threshold = 0.25, test.use = 'wilcox', min.pct = 0.1, min.diff.pct = -Inf, verbose = TRUE, only.pos = FALSE, max.cells.per.ident = Inf, random.seed = 1, latent.vars = NULL, min.cells.feature = 3, min.cells.group = 3, pseudocount.use = 1, ... ) { features <- features %||% rownames(x = object) methods.noprefiliter <- c("DESeq2") if (test.use %in% methods.noprefiliter) { features <- rownames(x = object) min.diff.pct <- -Inf logfc.threshold <- 0 } # error checking if (length(x = cells.1) == 0) { stop("Cell group 1 is empty - no cells with identity class ", cells.1) } else if (length(x = cells.2) == 0) { stop("Cell group 2 is empty - no cells with identity class ", cells.2) return(NULL) } else if (length(x = cells.1) < min.cells.group) { stop("Cell group 1 has fewer than ", min.cells.group, " cells") } else if (length(x = cells.2) < min.cells.group) { stop("Cell group 2 has fewer than ", min.cells.group, " cells") } else if (any(!cells.1 %in% colnames(x = object))) { bad.cells <- colnames(x = object)[which(x = !as.character(x = cells.1) %in% colnames(x = object))] stop( "The following cell names provided to cells.1 are not present: ", paste(bad.cells, collapse = ", ") ) } else if (any(!cells.2 %in% colnames(x = object))) { bad.cells <- colnames(x = object)[which(x = !as.character(x = cells.2) %in% colnames(x = object))] stop( "The following cell names provided to cells.2 are not present: ", paste(bad.cells, collapse = ", ") ) } # feature selection (based on percentages) data <- switch( EXPR = slot, 'scale.data' = counts, object ) if (is.null(x = reduction)) { thresh.min <- 0 pct.1 <- round( x = rowSums(x = data[features, cells.1, drop = FALSE] > thresh.min) / length(x = cells.1), digits = 3 ) pct.2 <- round( x = rowSums(x = data[features, cells.2, drop = FALSE] > thresh.min) / length(x = cells.2), digits = 3 ) data.alpha <- cbind(pct.1, pct.2) colnames(x = data.alpha) <- c("pct.1", "pct.2") alpha.min <- apply(X = data.alpha, MARGIN = 1, FUN = max) names(x = alpha.min) <- rownames(x = data.alpha) features <- names(x = which(x = alpha.min > min.pct)) if (length(x = features) == 0) { stop("No features pass min.pct threshold") } alpha.diff <- alpha.min - apply(X = data.alpha, MARGIN = 1, FUN = min) features <- names( x = which(x = alpha.min > min.pct & alpha.diff > min.diff.pct) ) if (length(x = features) == 0) { stop("No features pass min.diff.pct threshold") } } else { data.alpha <- data.frame( pct.1 = rep(x = NA, times = length(x = features)), pct.2 = rep(x = NA, times = length(x = features)) ) } # feature selection (based on average difference) mean.fxn <- if (is.null(x = reduction) && slot != "scale.data") { switch( EXPR = slot, 'data' = function(x) { return(log(x = mean(x = expm1(x = x)) + pseudocount.use)) }, function(x) { return(log(x = mean(x = x) + pseudocount.use)) } ) } else { mean } data.1 <- apply( X = data[features, cells.1, drop = FALSE], MARGIN = 1, FUN = mean.fxn ) data.2 <- apply( X = data[features, cells.2, drop = FALSE], MARGIN = 1, FUN = mean.fxn ) total.diff <- (data.1 - data.2) if (is.null(x = reduction) && slot != "scale.data") { features.diff <- if (only.pos) { names(x = which(x = total.diff > logfc.threshold)) } else { names(x = which(x = abs(x = total.diff) > logfc.threshold)) } features <- intersect(x = features, y = features.diff) if (length(x = features) == 0) { stop("No features pass logfc.threshold threshold") } } if (max.cells.per.ident < Inf) { set.seed(seed = random.seed) # Should be cells.1 and cells.2? if (length(x = cells.1) > max.cells.per.ident) { cells.1 <- sample(x = cells.1, size = max.cells.per.ident) } if (length(x = cells.2) > max.cells.per.ident) { cells.2 <- sample(x = cells.2, size = max.cells.per.ident) } if (!is.null(x = latent.vars)) { latent.vars <- latent.vars[c(cells.1, cells.2), , drop = FALSE] } } # perform DE if (!(test.use %in% c('negbinom', 'poisson', 'MAST', "LR")) && !is.null(x = latent.vars)) { warning("'latent.vars' is only used for 'negbinom', 'poisson', 'LR', and 'MAST' tests") } std.arguments <- list( data.use = object[features, c(cells.1, cells.2), drop = FALSE], cells.1 = cells.1, cells.2 = cells.2, verbose = verbose) de.results <- switch( EXPR = test.use, 'wilcox' = do.call("WilcoxDETest", c(std.arguments, ...)), 'bimod' = do.call("DiffExpTest", c(std.arguments, ...)), 'roc' = do.call("MarkerTest", std.arguments), 't' = do.call("DiffTTest", std.arguments), 'negbinom' = do.call("GLMDETest", c(std.arguments, min.cells = min.cells.feature, latent.vars = latent.vars, test.use = test.use)), 'poisson' = do.call("GLMDETest", c(std.arguments, min.cells = min.cells.feature, latent.vars = latent.vars, test.use = test.use)), 'MAST' = do.call("MASTDETest", c(std.arguments, min.cells = min.cells.feature, latent.vars = latent.vars, test.use = test.use)), "DESeq2" = do.call("DESeq2DETest", std.arguments), "LR" = do.call("LRDETest", c(std.arguments, latent.vars = latent.vars)), { # If no test is matched, tries to call a function with the name # of the provided test found_functions <- findFunction(test.use) if (length(found_functions) == 0) { stop("Unknown test: ", test.use, "\n", "This might be casued because the function that provides ", "that test hast not been loaded. Make sure to load the ", "package that would contain it.") } de.results <- do.call(test.use, c(std.arguments, ...), envir = found_functions[[1]]) de.results } ) if (is.null(x = reduction)) { diff.col <- ifelse( test = slot == "scale.data" || test.use == 'roc', yes = "avg_diff", no = "avg_logFC" ) de.results[, diff.col] <- total.diff[rownames(x = de.results)] de.results <- cbind(de.results, data.alpha[rownames(x = de.results), , drop = FALSE]) } else { diff.col <- "avg_diff" de.results[, diff.col] <- total.diff[rownames(x = de.results)] } if (only.pos) { de.results <- de.results[de.results[, diff.col] > 0, , drop = FALSE] } if (test.use == "roc") { de.results <- de.results[order(-de.results$power, -de.results[, diff.col]), ] } else { de.results <- de.results[order(de.results$p_val, -de.results[, diff.col]), ] de.results$p_val_adj = p.adjust( p = de.results$p_val, method = "bonferroni", n = nrow(x = object) ) } return(de.results) } #' @param ident.1 Identity class to define markers for; pass an object of class #' \code{phylo} or 'clustertree' to find markers for a node in a cluster tree; #' passing 'clustertree' requires \code{\link{BuildClusterTree}} to have been run #' @param ident.2 A second identity class for comparison; if \code{NULL}, #' use all other cells for comparison; if an object of class \code{phylo} or #' 'clustertree' is passed to \code{ident.1}, must pass a node to find markers for #' @param reduction Reduction to use in differential expression testing - will test for DE on cell embeddings #' @param group.by Regroup cells into a different identity class prior to performing differential expression (see example) #' @param subset.ident Subset a particular identity class prior to regrouping. Only relevant if group.by is set (see example) #' @param assay Assay to use in differential expression testing #' @param slot Slot to pull data from; note that if \code{test.use} is "negbinom", "poisson", or "DESeq2", #' \code{slot} will be set to "counts" #' #' @importFrom methods is #' #' @rdname FindMarkers #' @export #' @method FindMarkers Seurat #' FindMarkers.Seurat <- function( object, ident.1 = NULL, ident.2 = NULL, group.by = NULL, subset.ident = NULL, assay = NULL, slot = 'data', reduction = NULL, features = NULL, logfc.threshold = 0.25, test.use = "wilcox", min.pct = 0.1, min.diff.pct = -Inf, verbose = TRUE, only.pos = FALSE, max.cells.per.ident = Inf, random.seed = 1, latent.vars = NULL, min.cells.feature = 3, min.cells.group = 3, pseudocount.use = 1, ... ) { if (!is.null(x = group.by)) { if (!is.null(x = subset.ident)) { object <- subset(x = object, idents = subset.ident) } Idents(object = object) <- group.by } if (!is.null(x = assay) && !is.null(x = reduction)) { stop("Please only specify either assay or reduction.") } data.slot <- ifelse( test = test.use %in% c("negbinom", "poisson", "DESeq2"), yes = 'counts', no = slot ) if (is.null(x = reduction)) { assay <- assay %||% DefaultAssay(object = object) data.use <- GetAssayData(object = object[[assay]], slot = data.slot) } else { if (data.slot == "counts") { stop("The following tests cannot be used when specifying a reduction as they assume a count model: negbinom, poisson, DESeq2") } data.use <- t(x = Embeddings(object = object, reduction = reduction)) } if (is.null(x = ident.1)) { stop("Please provide ident.1") } else if ((length(x = ident.1) == 1 && ident.1[1] == 'clustertree') || is(object = ident.1, class2 = 'phylo')) { if (is.null(x = ident.2)) { stop("Please pass a node to 'ident.2' to run FindMarkers on a tree") } tree <- if (is(object = ident.1, class2 = 'phylo')) { ident.1 } else { Tool(object = object, slot = 'BuildClusterTree') } if (is.null(x = tree)) { stop("Please run 'BuildClusterTree' or pass an object of class 'phylo' as 'ident.1'") } ident.1 <- tree$tip.label[GetLeftDescendants(tree = tree, node = ident.2)] ident.2 <- tree$tip.label[GetRightDescendants(tree = tree, node = ident.2)] } if (length(x = as.vector(x = ident.1)) > 1 && any(as.character(x = ident.1) %in% colnames(x = data.use))) { bad.cells <- colnames(x = data.use)[which(x = !as.character(x = ident.1) %in% colnames(x = data.use))] if (length(x = bad.cells) > 0) { stop(paste0("The following cell names provided to ident.1 are not present in the object: ", paste(bad.cells, collapse = ", "))) } } else { ident.1 <- Seurat::WhichCells(object = object, idents = ident.1) } # if NULL for ident.2, use all other cells if (length(x = as.vector(x = ident.2)) > 1 && any(as.character(x = ident.2) %in% colnames(x = data.use))) { bad.cells <- colnames(x = data.use)[which(!as.character(x = ident.2) %in% colnames(x = data.use))] if (length(x = bad.cells) > 0) { stop(paste0("The following cell names provided to ident.2 are not present in the object: ", paste(bad.cells, collapse = ", "))) } } else { if (is.null(x = ident.2)) { ident.2 <- setdiff(x = colnames(x = data.use), y = ident.1) } else { ident.2 <- Seurat::WhichCells(object = object, idents = ident.2) } } if (!is.null(x = latent.vars)) { latent.vars <- FetchData( object = object, vars = latent.vars, cells = c(ident.1, ident.2) ) } counts <- switch( EXPR = data.slot, 'scale.data' = GetAssayData(object = object[[assay]], slot = "counts"), numeric() ) de.results <- FindMarkers( object = data.use, slot = data.slot, counts = counts, cells.1 = ident.1, cells.2 = ident.2, features = features, reduction = reduction, logfc.threshold = logfc.threshold, test.use = test.use, min.pct = min.pct, min.diff.pct = min.diff.pct, verbose = verbose, only.pos = only.pos, max.cells.per.ident = max.cells.per.ident, random.seed = random.seed, latent.vars = latent.vars, min.cells.feature = min.cells.feature, min.cells.group = min.cells.group, pseudocount.use = pseudocount.use, ... ) return(de.results) } #%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% # Internal #%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% # internal function to calculate AUC values #' @importFrom pbapply pblapply # AUCMarkerTest <- function(data1, data2, mygenes, print.bar = TRUE) { myAUC <- unlist(x = lapply( X = mygenes, FUN = function(x) { return(DifferentialAUC( x = as.numeric(x = data1[x, ]), y = as.numeric(x = data2[x, ]) )) } )) myAUC[is.na(x = myAUC)] <- 0 iterate.fxn <- ifelse(test = print.bar, yes = pblapply, no = lapply) avg_diff <- unlist(x = iterate.fxn( X = mygenes, FUN = function(x) { return( ExpMean( x = as.numeric(x = data1[x, ]) ) - ExpMean( x = as.numeric(x = data2[x, ]) ) ) } )) toRet <- data.frame(cbind(myAUC, avg_diff), row.names = mygenes) toRet <- toRet[rev(x = order(toRet$myAUC)), ] return(toRet) } #internal function to run mcdavid et al. DE test # #' @importFrom stats sd dnorm # bimodLikData <- function(x, xmin = 0) { x1 <- x[x <= xmin] x2 <- x[x > xmin] xal <- MinMax( data = length(x = x2) / length(x = x), min = 1e-5, max = (1 - 1e-5) ) likA <- length(x = x1) * log(x = 1 - xal) if (length(x = x2) < 2) { mysd <- 1 } else { mysd <- sd(x = x2) } likB <- length(x = x2) * log(x = xal) + sum(dnorm(x = x2, mean = mean(x = x2), sd = mysd, log = TRUE)) return(likA + likB) } # Differential expression using DESeq2 # # Identifies differentially expressed genes between two groups of cells using # DESeq2 # # @references Love MI, Huber W and Anders S (2014). "Moderated estimation of # fold change and dispersion for RNA-seq data with DESeq2." Genome Biology. # https://bioconductor.org/packages/release/bioc/html/DESeq2.html # @param data.use Data matrix to test # @param cells.1 Group 1 cells # @param cells.2 Group 2 cells # @param verbose Print a progress bar # @param ... Extra parameters to pass to DESeq2::results # @return Returns a p-value ranked matrix of putative differentially expressed # genes. # # @details # This test does not support pre-filtering of genes based on average difference # (or percent detection rate) between cell groups. However, genes may be # pre-filtered based on their minimum detection rate (min.pct) across both cell # groups. To use this method, please install DESeq2, using the instructions at # https://bioconductor.org/packages/release/bioc/html/DESeq2.html # # @export # # @examples # \dontrun{ # pbmc_small # DESeq2DETest(pbmc_small, cells.1 = WhichCells(object = pbmc_small, idents = 1), # cells.2 = WhichCells(object = pbmc_small, idents = 2)) # } # DESeq2DETest <- function( data.use, cells.1, cells.2, verbose = TRUE, ... ) { if (!Seurat:::PackageCheck('DESeq2', error = FALSE)) { stop("Please install DESeq2 - learn more at https://bioconductor.org/packages/release/bioc/html/DESeq2.html") } group.info <- data.frame(row.names = c(cells.1, cells.2)) group.info[cells.1, "group"] <- "Group1" group.info[cells.2, "group"] <- "Group2" group.info[, "group"] <- factor(x = group.info[, "group"]) group.info$wellKey <- rownames(x = group.info) dds1 <- DESeq2::DESeqDataSetFromMatrix( countData = data.use, colData = group.info, design = ~ group ) dds1 <- DESeq2::estimateSizeFactors(object = dds1) dds1 <- DESeq2::estimateDispersions(object = dds1, fitType = "local") dds1 <- DESeq2::nbinomWaldTest(object = dds1) res <- DESeq2::results( object = dds1, contrast = c("group", "Group1", "Group2"), alpha = 0.05, ... ) to.return <- data.frame(p_val = res$pvalue, row.names = rownames(res)) return(to.return) } # internal function to calculate AUC values #' @importFrom ROCR prediction performance #' DifferentialAUC <- function(x, y) { prediction.use <- prediction( predictions = c(x, y), labels = c(rep(x = 1, length(x = x)), rep(x = 0, length(x = y))), label.ordering = 0:1 ) perf.use <- performance(prediction.obj = prediction.use, measure = "auc") auc.use <- round(x = perf.use@y.values[[1]], digits = 3) return(auc.use) } #internal function to run mcdavid et al. DE test # #' @importFrom stats pchisq # DifferentialLRT <- function(x, y, xmin = 0) { lrtX <- bimodLikData(x = x) lrtY <- bimodLikData(x = y) lrtZ <- bimodLikData(x = c(x, y)) lrt_diff <- 2 * (lrtX + lrtY - lrtZ) return(pchisq(q = lrt_diff, df = 3, lower.tail = F)) } # Likelihood ratio test for zero-inflated data # # Identifies differentially expressed genes between two groups of cells using # the LRT model proposed in McDavid et al, Bioinformatics, 2013 # # @inheritParams FindMarkers # @param object Seurat object # @param cells.1 Group 1 cells # @param cells.2 Group 2 cells # @param assay.type Type of assay to fetch data for (default is RNA) # @return Returns a p-value ranked matrix of putative differentially expressed # genes. # #' @importFrom pbapply pbsapply #' @importFrom future.apply future_sapply #' @importFrom future nbrOfWorkers # # @export # @examples # pbmc_small # DiffExpTest(pbmc_small, cells.1 = WhichCells(object = pbmc_small, idents = 1), # cells.2 = WhichCells(object = pbmc_small, idents = 2)) # DiffExpTest <- function( data.use, cells.1, cells.2, verbose = TRUE ) { my.sapply <- ifelse( test = verbose && nbrOfWorkers() == 1, yes = pbsapply, no = future_sapply ) p_val <- unlist( x = my.sapply( X = 1:nrow(x = data.use), FUN = function(x) { return(DifferentialLRT( x = as.numeric(x = data.use[x, cells.1]), y = as.numeric(x = data.use[x, cells.2]) )) } ) ) to.return <- data.frame(p_val, row.names = rownames(x = data.use)) return(to.return) } # Differential expression testing using Student's t-test # # Identify differentially expressed genes between two groups of cells using # the Student's t-test # # @return Returns a p-value ranked matrix of putative differentially expressed # genes. # #' @importFrom stats t.test #' @importFrom pbapply pbsapply #' @importFrom future.apply future_sapply #' @importFrom future nbrOfWorkers # # @export # # @examples # pbmc_small # DiffTTest(pbmc_small, cells.1 = WhichCells(object = pbmc_small, idents = 1), # cells.2 = WhichCells(object = pbmc_small, idents = 2)) DiffTTest <- function( data.use, cells.1, cells.2, verbose = TRUE ) { my.sapply <- ifelse( test = verbose && nbrOfWorkers() == 1, yes = pbsapply, no = future_sapply ) p_val <- unlist( x = my.sapply( X = 1:nrow(data.use), FUN = function(x) { t.test(x = data.use[x, cells.1], y = data.use[x, cells.2])$p.value } ) ) to.return <- data.frame(p_val,row.names = rownames(x = data.use)) return(to.return) } # Tests for UMI-count based data # # Identifies differentially expressed genes between two groups of cells using # either a negative binomial or poisson generalized linear model # # @param data.use Data to test # @param cells.1 Group 1 cells # @param cells.2 Group 2 cells # @param min.cells Minimum number of cells threshold # @param latent.vars Latent variables to test # @param test.use parameterizes the glm # @param verbose Print progress bar # # @return Returns a p-value ranked matrix of putative differentially expressed # genes. # #' @importFrom MASS glm.nb #' @importFrom pbapply pbsapply #' @importFrom stats var as.formula #' @importFrom future.apply future_sapply #' @importFrom future nbrOfWorkers #' # @export # # @examples # pbmc_small # # Note, not recommended for particularly small datasets - expect warnings # NegBinomDETest(pbmc_small, cells.1 = WhichCells(object = pbmc_small, idents = 1), # cells.2 = WhichCells(object = pbmc_small, idents = 2)) # GLMDETest <- function( data.use, cells.1, cells.2, min.cells = 3, latent.vars = NULL, test.use = NULL, verbose = TRUE ) { group.info <- data.frame( group = rep( x = c('Group1', 'Group2'), times = c(length(x = cells.1), length(x = cells.2)) ) ) rownames(group.info) <- c(cells.1, cells.2) group.info[, "group"] <- factor(x = group.info[, "group"]) latent.vars <- if (is.null(x = latent.vars)) { group.info } else { cbind(x = group.info, latent.vars) } latent.var.names <- colnames(x = latent.vars) my.sapply <- ifelse( test = verbose && nbrOfWorkers() == 1, yes = pbsapply, no = future_sapply ) p_val <- unlist( x = my.sapply( X = 1:nrow(data.use), FUN = function(x) { latent.vars[, "GENE"] <- as.numeric(x = data.use[x, ]) # check that gene is expressed in specified number of cells in one group if (sum(latent.vars$GENE[latent.vars$group == "Group1"] > 0) < min.cells && sum(latent.vars$GENE[latent.vars$group == "Group2"] > 0) < min.cells) { warning(paste0( "Skipping gene --- ", x, ". Fewer than ", min.cells, " cells in both clusters." )) return(2) } # check that variance between groups is not 0 if (var(x = latent.vars$GENE) == 0) { warning(paste0( "Skipping gene -- ", x, ". No variance in expression between the two clusters." )) return(2) } fmla <- as.formula(object = paste( "GENE ~", paste(latent.var.names, collapse = "+") )) p.estimate <- 2 if (test.use == "negbinom") { try( expr = p.estimate <- summary( object = glm.nb(formula = fmla, data = latent.vars) )$coef[2, 4], silent = TRUE ) return(p.estimate) } else if (test.use == "poisson") { return(summary(object = glm( formula = fmla, data = latent.vars, family = "poisson" ))$coef[2,4]) } } ) ) features.keep <- rownames(data.use) if (length(x = which(x = p_val == 2)) > 0) { features.keep <- features.keep[-which(x = p_val == 2)] p_val <- p_val[!p_val == 2] } to.return <- data.frame(p_val, row.names = features.keep) return(to.return) } # Perform differential expression testing using a logistic regression framework # # Constructs a logistic regression model predicting group membership based on a # given feature and compares this to a null model with a likelihood ratio test. # # @param data.use expression matrix # @param cells.1 Vector of cells in group 1 # @param cells2. Vector of cells in group 2 # @param latent.vars Latent variables to include in model # @param verbose Print messages # #' @importFrom lmtest lrtest #' @importFrom pbapply pbsapply #' @importFrom stats as.formula glm #' @importFrom future.apply future_sapply #' @importFrom future nbrOfWorkers # LRDETest <- function( data.use, cells.1, cells.2, latent.vars = NULL, verbose = TRUE, ... ) { group.info <- data.frame(row.names = c(cells.1, cells.2)) group.info[cells.1, "group"] <- "Group1" group.info[cells.2, "group"] <- "Group2" group.info[, "group"] <- factor(x = group.info[, "group"]) data.use <- data.use[, rownames(group.info), drop = FALSE] latent.vars <- latent.vars[rownames(group.info), , drop = FALSE] my.sapply <- ifelse( test = verbose && nbrOfWorkers() == 1, yes = pbsapply, no = future_sapply ) p_val <- my.sapply( X = 1:nrow(x = data.use), FUN = function(x) { if (is.null(x = latent.vars)) { model.data <- cbind(GENE = data.use[x, ], group.info) fmla <- as.formula(object = "group ~ GENE") fmla2 <- as.formula(object = "group ~ 1") } else { model.data <- cbind(GENE = data.use[x, ], group.info, latent.vars) fmla <- as.formula(object = paste( "group ~ GENE +", paste(colnames(x = latent.vars), collapse = "+") )) fmla2 <- as.formula(object = paste( "group ~", paste(colnames(x = latent.vars), collapse = "+") )) } model1 <- glm(formula = fmla, data = model.data, family = "binomial") model2 <- glm(formula = fmla2, data = model.data, family = "binomial") lrtest <- lrtest(model1, model2) return(lrtest$Pr[2]) } ) to.return <- data.frame(p_val, row.names = rownames(data.use)) return(to.return) } # ROC-based marker discovery # # Identifies 'markers' of gene expression using ROC analysis. For each gene, # evaluates (using AUC) a classifier built on that gene alone, to classify # between two groups of cells. # # An AUC value of 1 means that expression values for this gene alone can # perfectly classify the two groupings (i.e. Each of the cells in cells.1 # exhibit a higher level than each of the cells in cells.2). An AUC value of 0 # also means there is perfect classification, but in the other direction. A # value of 0.5 implies that the gene has no predictive power to classify the # two groups. # # @return Returns a 'predictive power' (abs(AUC-0.5) * 2) ranked matrix of # putative differentially expressed genes. # # @export # # @examples # pbmc_small # MarkerTest(pbmc_small, cells.1 = WhichCells(object = pbmc_small, idents = 1), # cells.2 = WhichCells(object = pbmc_small, idents = 2)) # MarkerTest <- function( data.use, cells.1, cells.2, verbose = TRUE ) { to.return <- AUCMarkerTest( data1 = data.use[, cells.1, drop = FALSE], data2 = data.use[, cells.2, drop = FALSE], mygenes = rownames(x = data.use), print.bar = verbose ) to.return$power <- abs(x = to.return$myAUC - 0.5) * 2 return(to.return) } # Differential expression using MAST # # Identifies differentially expressed genes between two groups of cells using # a hurdle model tailored to scRNA-seq data. Utilizes the MAST package to run # the DE testing. # # @references Andrew McDavid, Greg Finak and Masanao Yajima (2017). MAST: Model-based # Analysis of Single Cell Transcriptomics. R package version 1.2.1. # https://github.com/RGLab/MAST/ # # @param data.use Data to test # @param cells.1 Group 1 cells # @param cells.2 Group 2 cells # @param latent.vars Confounding variables to adjust for in DE test. Default is # "nUMI", which adjusts for cellular depth (i.e. cellular detection rate). For # non-UMI based data, set to nGene instead. # @param verbose print output # @param \dots Additional parameters to zero-inflated regression (zlm) function # in MAST # @details # To use this method, please install MAST, using instructions at https://github.com/RGLab/MAST/ # # @return Returns a p-value ranked matrix of putative differentially expressed # genes. # #' @importFrom stats relevel # # @export # # @examples # \dontrun{ # pbmc_small # MASTDETest(pbmc_small, cells.1 = WhichCells(object = pbmc_small, idents = 1), # cells.2 = WhichCells(object = pbmc_small, idents = 2)) # } # MASTDETest <- function( data.use, cells.1, cells.2, latent.vars = NULL, verbose = TRUE, ... ) { # Check for MAST if (!Seurat:::PackageCheck('MAST', error = FALSE)) { stop("Please install MAST - learn more at https://github.com/RGLab/MAST") } if (length(x = latent.vars) > 0) { latent.vars <- scale(x = latent.vars) } group.info <- data.frame(row.names = c(cells.1, cells.2)) latent.vars <- latent.vars %||% group.info group.info[cells.1, "group"] <- "Group1" group.info[cells.2, "group"] <- "Group2" group.info[, "group"] <- factor(x = group.info[, "group"]) latent.vars.names <- c("condition", colnames(x = latent.vars)) latent.vars <- cbind(latent.vars, group.info) latent.vars$wellKey <- rownames(x = latent.vars) fdat <- data.frame(rownames(x = data.use)) colnames(x = fdat)[1] <- "primerid" rownames(x = fdat) <- fdat[, 1] sca <- MAST::FromMatrix( exprsArray = as.matrix(x = data.use), cData = latent.vars, fData = fdat ) cond <- factor(x = SummarizedExperiment::colData(sca)$group) cond <- relevel(x = cond, ref = "Group1") SummarizedExperiment::colData(sca)$condition <- cond fmla <- as.formula( object = paste0(" ~ ", paste(latent.vars.names, collapse = "+")) ) zlmCond <- MAST::zlm(formula = fmla, sca = sca, ...) summaryCond <- summary(object = zlmCond, doLRT = 'conditionGroup2') summaryDt <- summaryCond$datatable # fcHurdle <- merge( # summaryDt[contrast=='conditionGroup2' & component=='H', .(primerid, `Pr(>Chisq)`)], #hurdle P values # summaryDt[contrast=='conditionGroup2' & component=='logFC', .(primerid, coef, ci.hi, ci.lo)], by='primerid' # ) #logFC coefficients # fcHurdle[,fdr:=p.adjust(`Pr(>Chisq)`, 'fdr')] p_val <- summaryDt[summaryDt[, "component"] == "H", 4] genes.return <- summaryDt[summaryDt[, "component"] == "H", 1] # p_val <- subset(summaryDt, component == "H")[, 4] # genes.return <- subset(summaryDt, component == "H")[, 1] to.return <- data.frame(p_val, row.names = genes.return) return(to.return) } # compare two negative binomial regression models # model one uses only common factors (com.fac) # model two additionally uses group factor (grp.fac) # #' @importFrom stats glm anova coef # NBModelComparison <- function(y, theta, latent.data, com.fac, grp.fac) { tab <- as.matrix(x = table(y > 0, latent.data[, grp.fac])) freqs <- tab['TRUE', ] / apply(X = tab, MARGIN = 2, FUN = sum) fit2 <- 0 fit4 <- 0 try( expr = fit2 <- glm( formula = y ~ ., data = latent.data[, com.fac, drop = FALSE], family = MASS::negative.binomial(theta = theta) ), silent=TRUE ) try( fit4 <- glm( formula = y ~ ., data = latent.data[, c(com.fac, grp.fac)], family = MASS::negative.binomial(theta = theta) ), silent = TRUE ) if (class(x = fit2)[1] == 'numeric' | class(x = fit4)[1] == 'numeric') { message('One of the glm.nb calls failed') return(c(rep(x = NA, 5), freqs)) } pval <- anova(fit2, fit4, test = 'Chisq')$'Pr(>Chi)'[2] foi <- 2 + length(x = com.fac) log2.fc <- log2(x = 1 / exp(x = coef(object = fit4)[foi])) ret <- c( fit2$deviance, fit4$deviance, pval, coef(object = fit4)[foi], log2.fc, freqs ) names(x = ret) <- c( 'dev1', 'dev2', 'pval', 'coef', 'log2.fc', 'freq1', 'freq2' ) return(ret) } # given a UMI count matrix, estimate NB theta parameter for each gene # and use fit of relationship with mean to assign regularized theta to each gene # #' @importFrom stats glm loess poisson #' @importFrom utils txtProgressBar setTxtProgressBar # RegularizedTheta <- function(cm, latent.data, min.theta = 0.01, bin.size = 128) { genes.regress <- rownames(x = cm) bin.ind <- ceiling(x = 1:length(x = genes.regress) / bin.size) max.bin <- max(bin.ind) message('Running Poisson regression (to get initial mean), and theta estimation per gene') pb <- txtProgressBar(min = 0, max = max.bin, style = 3, file = stderr()) theta.estimate <- c() for (i in 1:max.bin) { genes.bin.regress <- genes.regress[bin.ind == i] bin.theta.estimate <- unlist( x = parallel::mclapply( X = genes.bin.regress, FUN = function(j) { return(as.numeric(x = MASS::theta.ml( y = cm[j, ], mu = glm( formula = cm[j, ] ~ ., data = latent.data, family = poisson )$fitted ))) } ), use.names = FALSE ) theta.estimate <- c(theta.estimate, bin.theta.estimate) setTxtProgressBar(pb = pb, value = i) } close(con = pb) UMI.mean <- apply(X = cm, MARGIN = 1, FUN = mean) var.estimate <- UMI.mean + (UMI.mean ^ 2) / theta.estimate for (span in c(1/3, 1/2, 3/4, 1)) { fit <- loess( formula = log10(x = var.estimate) ~ log10(x = UMI.mean), span = span ) if (! any(is.na(x = fit$fitted))) { message(sprintf( 'Used loess with span %1.2f to fit mean-variance relationship\n', span )) break } } if (any(is.na(x = fit$fitted))) { stop('Problem when fitting NB gene variance in RegularizedTheta - NA values were fitted.') } theta.fit <- (UMI.mean ^ 2) / ((10 ^ fit$fitted) - UMI.mean) names(x = theta.fit) <- genes.regress to.fix <- theta.fit <= min.theta | is.infinite(x = theta.fit) if (any(to.fix)) { message( 'Fitted theta below ', min.theta, ' for ', sum(to.fix), ' genes, setting them to ', min.theta ) theta.fit[to.fix] <- min.theta } return(theta.fit) } # Differential expression using Wilcoxon Rank Sum # # Identifies differentially expressed genes between two groups of cells using # a Wilcoxon Rank Sum test # # @param data.use Data matrix to test # @param cells.1 Group 1 cells # @param cells.2 Group 2 cells # @param verbose Print a progress bar # @param ... Extra parameters passed to wilcox.test # # @return Returns a p-value ranked matrix of putative differentially expressed # features # #' @importFrom pbapply pbsapply #' @importFrom stats wilcox.test #' @importFrom future.apply future_sapply #' @importFrom future nbrOfWorkers # # @export # # @examples # pbmc_small # WilcoxDETest(pbmc_small, cells.1 = WhichCells(object = pbmc_small, idents = 1), # cells.2 = WhichCells(object = pbmc_small, idents = 2)) # WilcoxDETest <- function( data.use, cells.1, cells.2, verbose = TRUE, ... ) { group.info <- data.frame(row.names = c(cells.1, cells.2)) group.info[cells.1, "group"] <- "Group1" group.info[cells.2, "group"] <- "Group2" group.info[, "group"] <- factor(x = group.info[, "group"]) data.use <- data.use[, rownames(x = group.info), drop = FALSE] my.sapply <- ifelse( test = verbose && nbrOfWorkers() == 1, yes = pbsapply, no = future_sapply ) p_val <- my.sapply( X = 1:nrow(x = data.use), FUN = function(x) { return(wilcox.test(data.use[x, ] ~ group.info[, "group"], ...)$p.value) } ) return(data.frame(p_val, row.names = rownames(x = data.use))) }
library(plyr) library(dplyr) df1 <- read.table("X_test.txt", stringsAsFactors = FALSE) df2 <- read.table("features.txt", stringsAsFactors = FALSE) vec1 <- df2[,2] names(df1) <- vec1 df3 <- read.table("y_test.txt", stringsAsFactors = FALSE) act_lab <- read.table("activity_labels.txt", stringsAsFactors = FALSE) df5 <- left_join(df3, act_lab) Activity <- df5[,2] df7 <- read.table("subject_test.txt", stringsAsFactors = FALSE ) Subject <- df7[,1] test2 <- grep("mean", vec1) test3 <- grep("std", vec1) df8 <- df1[,c(test2,test3)] df9 <- cbind(Activity, df8) dftest <- cbind(Subject,df9) dftrain1 <- read.table("X_train.txt", stringsAsFactors = FALSE) dftrain3 <- read.table("y_train.txt", stringsAsFactors = FALSE) names(dftrain1) <- vec1 df5 <- left_join(dftrain3, act_lab) Activity <- df5[,2] df7 <- read.table("subject_train.txt", stringsAsFactors = FALSE ) Subject <- df7[,1] df8 <- dftrain1[,c(test2,test3)] df9 <- cbind(Activity, df8) dftrain <- cbind(Subject,df9) dfcombine <- rbind(dftest, dftrain) dfFinal <- dfcombine %>% group_by(Subject, Activity) %>% summarise_each(funs(mean)) write.table(dfFinal, file = "dfFinal.txt", row.names = FALSE)
/run_analysis.R
no_license
TarunSin/TidyData
R
false
false
1,169
r
library(plyr) library(dplyr) df1 <- read.table("X_test.txt", stringsAsFactors = FALSE) df2 <- read.table("features.txt", stringsAsFactors = FALSE) vec1 <- df2[,2] names(df1) <- vec1 df3 <- read.table("y_test.txt", stringsAsFactors = FALSE) act_lab <- read.table("activity_labels.txt", stringsAsFactors = FALSE) df5 <- left_join(df3, act_lab) Activity <- df5[,2] df7 <- read.table("subject_test.txt", stringsAsFactors = FALSE ) Subject <- df7[,1] test2 <- grep("mean", vec1) test3 <- grep("std", vec1) df8 <- df1[,c(test2,test3)] df9 <- cbind(Activity, df8) dftest <- cbind(Subject,df9) dftrain1 <- read.table("X_train.txt", stringsAsFactors = FALSE) dftrain3 <- read.table("y_train.txt", stringsAsFactors = FALSE) names(dftrain1) <- vec1 df5 <- left_join(dftrain3, act_lab) Activity <- df5[,2] df7 <- read.table("subject_train.txt", stringsAsFactors = FALSE ) Subject <- df7[,1] df8 <- dftrain1[,c(test2,test3)] df9 <- cbind(Activity, df8) dftrain <- cbind(Subject,df9) dfcombine <- rbind(dftest, dftrain) dfFinal <- dfcombine %>% group_by(Subject, Activity) %>% summarise_each(funs(mean)) write.table(dfFinal, file = "dfFinal.txt", row.names = FALSE)
#' Sample-mean Estimation #' #' Cluster cells using SNN and a list of given genes, estimate missing expression #' values for each cell-gene combination with the within-cluster non-zero expression #' mean #' #' #' @param expression_matrix Row by column log-normalized expression matrix #' @param subset_genes A vector of informative gene names, defaults to all genes #' @param scale_data Whether to standardize expression by gene, default TRUE #' @param number_pcs Number of dimensions to inform SNN clustering #' @param k_neighbors Number of k neighbors to use for NN network #' @param snn_resolution Resolution parameter for SNN #' @param impute_index Index to impute, will default to all zeroes #' @param pseudo_zero Pseudo-zero expression value #' @param python_path path to your python binary (default = system path) #' @param verbose Print progress output to the console #' #' @return Returns a sparse matrix of class 'dgCMatrix' #' #' #' @export #' #' @examples #' set.seed(0) #' requireNamespace("Matrix") #' #' ## generate (meaningless) counts #' c1 <- stats::rpois(5e3, 1) #' c2 <- stats::rpois(5e3, 2) #' m <- t( #' rbind( #' matrix(c1, nrow = 20), #' matrix(c2, nrow = 20) #' ) #' ) #' #' ## construct an expression matrix m #' colnames(m) <- paste0('cell', 1:ncol(m)) #' rownames(m) <- paste0('gene', 1:nrow(m)) #' m <- log(m/colSums(m)*1e4 + 1) #' m <- methods::as(m, 'dgCMatrix') #' #' ## impute #' \donttest{ #' m_imputed <- rescue::sampleImputation( #' expression_matrix = m, #' k_neighbors = 10 #' ) #' } #' sampleImputation <- function( expression_matrix, subset_genes = NULL, scale_data = TRUE, number_pcs = 8, k_neighbors = 30, snn_resolution = .9, impute_index = NULL, pseudo_zero = NULL, python_path = NULL, verbose = FALSE ) { # consider only a subset of genes for clustering: # e.g. highly variable genes or random subsets of genes if(!is.null(subset_genes)) { if(verbose) cat('subsetting genes \n \n') my_small_matrix <- expression_matrix[subset_genes, ] } else { my_small_matrix <- expression_matrix } # impute a temporary pseudo_zero to ensure SNN convergence if(!is.null(pseudo_zero)) my_small_matrix[my_small_matrix == 0] <- pseudo_zero ## STANDARDIZE ## if(verbose) cat('standardizing data \n \n') my_small_matrix_scaled <- Matrix::t(scale(Matrix::t(my_small_matrix), center = TRUE, scale = TRUE)) ## PCA ## if(verbose) cat('computing principal components \n \n') pcs_compute <- min(number_pcs, nrow(x = my_small_matrix_scaled)-1) pca_results <- irlba::irlba(A = my_small_matrix_scaled, nv = pcs_compute) cell_embeddings <- methods::as(pca_results$v, 'dgCMatrix') colnames(cell_embeddings) <- paste0('PC',1:number_pcs) rownames(cell_embeddings) <- colnames(expression_matrix) ## SNN ## if(verbose) cat('constructing nearest neighbors graph \n \n') nn <- constructNN(reduced_object = cell_embeddings, k_neighbors = k_neighbors) if(verbose) cat('clustering nearest neighbors \n \n') cluster_results <- clusterLouvain(nn_network = nn, resolution = snn_resolution, python_path = python_path) clusters <- cluster_results$cluster names(clusters) <- cluster_results$cell_ID ## IMPUTATION step ## impute_result_list <- list() for(cluster in unique(clusters)) { if(verbose) cat('estimating for cluster: ', cluster, '\n') # cells that belong to the cluster temp_mat <- expression_matrix[, which(colnames(expression_matrix) %in% names(clusters[clusters == cluster]))] temp_nonzero <- temp_mat != 0 # calculate mean expression and set NaN to 0 imputed_expr <- Matrix::rowMeans(temp_mat) imputed_expr[is.nan(imputed_expr)] <- 0 imputed_matrix <- Matrix::Matrix( data = rep(imputed_expr, ncol(temp_mat)), ncol = ncol(temp_mat), byrow = FALSE, dimnames = list(rownames(temp_mat), colnames(temp_mat)) ) # replace zero values (or low values) with imputed values if( is.null(impute_index) ) zero_index <- temp_mat == 0 else zero_index <- impute_index[, which(colnames(expression_matrix) %in% names(clusters[clusters == cluster]))] imp_temp_mat <- temp_mat imp_temp_mat[zero_index] <- imputed_matrix[zero_index] impute_result_list[[as.character(cluster)]] <- imp_temp_mat } final_impute_result <- do.call('cbind', impute_result_list) final_impute_result <- final_impute_result[match(rownames(expression_matrix), rownames(final_impute_result)), match(colnames(expression_matrix), colnames(final_impute_result))] return(final_impute_result) } #' Bootstrap Imputation #' #' Subsample informative genes, cluster cells using SNN, estimate missing expression #' values with the distribution mean of means extrapolated from these cell clusterings #' #' #' @param expression_matrix Row by column log-normalized expression matrix #' @param select_cells Subset cells if desired #' @param select_genes A vector of highly variable of differentially expressed gene names, #' defaults to the most variable #' @param log_transformed Whether the expression matrix has been log-transformed #' @param log_base If log-transformed, log-base used #' @param proportion_genes Proportion of informative genes to sample #' @param bootstrap_samples Number of samples for the bootstrap #' @param number_pcs Number of dimensions to inform SNN clustering #' @param k_neighbors Number of k neighbors to use for NN network #' @param snn_resolution Resolution parameter for SNN #' @param impute_index Index to impute, will default to all zeroes #' @param use_mclapply Run in parallel, default FALSE #' @param cores Number of cores for parallelization #' @param return_individual_results Return a list of subsampled means #' @param python_path path to your python binary (default = system path) #' @param verbose Print progress output to the console #' #' @return Returns a list with the imputed and original expression matrices #' #' @export #' #' @examples #' set.seed(0) #' requireNamespace("Matrix") #' #' ## generate (meaningless) counts #' c1 <- stats::rpois(5e3, 1) #' c2 <- stats::rpois(5e3, 2) #' m <- t( #' rbind( #' matrix(c1, nrow = 20), #' matrix(c2, nrow = 20) #' ) #' ) #' #' ## construct an expression matrix m #' colnames(m) <- paste0('cell', 1:ncol(m)) #' rownames(m) <- paste0('gene', 1:nrow(m)) #' m <- log(m/colSums(m)*1e4 + 1) #' m <- methods::as(m, 'dgCMatrix') #' #' ## impute #' \donttest{ #' m_imputed <- rescue::bootstrapImputation( #' expression_matrix = m, #' proportion_genes = .9, #' bootstrap_samples = 2, #' k_neighbors = 10 #' ) #' } #' bootstrapImputation <- function( expression_matrix, select_cells = NULL, select_genes = NULL, log_transformed = TRUE, log_base = exp(1), proportion_genes = 0.6, bootstrap_samples = 100, number_pcs = 8, k_neighbors = 30, snn_resolution = .9, impute_index = NULL, use_mclapply = FALSE, cores = 2, return_individual_results = FALSE, python_path = NULL, verbose = FALSE ) { if(class(expression_matrix) != 'dgCMatrix') expression_matrix <- methods::as(expression_matrix, 'dgCMatrix') # test cell subsets if(is.null(select_cells)) select_cells = 1:ncol(expression_matrix) else if(verbose) cat('subsetting cells \n \n') expression_matrix <- expression_matrix[, select_cells] ## compute pseudo_zero ## pseudo_zero <- min(expression_matrix[which(methods::as(expression_matrix > 0, 'matrix'))])/2 ## store zero index ## if(is.null(impute_index)) impute_index <- expression_matrix == 0 ## determine variable genes ## if(is.null(select_genes)){ if(verbose) cat('finding variable genes \n \n') # data.table variables selected = NULL hvgs <- computeHVG(expression_matrix, reverse_log_scale = log_transformed, log_base = log_base) select_genes <- hvgs[ selected == 'yes', ]$genes if(is.null(select_genes)) stop('No HVGs detected by default!') else if(verbose) cat('using', length(select_genes), 'variable genes \n \n') } ## determine number of genes to sample ## total_number_of_genes <- length(select_genes) number_of_genes_to_use <- floor(total_number_of_genes*proportion_genes) ## BOOTSTRAP ## if(use_mclapply) { result_list <- parallel::mclapply( X = 1:bootstrap_samples, mc.preschedule = FALSE, mc.cores = cores, FUN = function(round) { gene_sample <- sample(x = 1:total_number_of_genes, size = number_of_genes_to_use, replace = FALSE) genes_to_use <- select_genes[gene_sample] temp_impute <- sampleImputation( expression_matrix = expression_matrix, subset_genes = genes_to_use, number_pcs = number_pcs, k_neighbors = k_neighbors, snn_resolution = snn_resolution, impute_index = impute_index, pseudo_zero = pseudo_zero, python_path = python_path, verbose = verbose ) return(temp_impute) }) } else { result_list <- list() for(round in 1:bootstrap_samples) { gene_sample = sample(x = 1:total_number_of_genes, size = number_of_genes_to_use, replace = FALSE) genes_to_use = select_genes[gene_sample] temp_impute = sampleImputation( expression_matrix = expression_matrix, subset_genes = genes_to_use, number_pcs = number_pcs, k_neighbors = k_neighbors, snn_resolution = snn_resolution, impute_index = impute_index, pseudo_zero = pseudo_zero, python_path = python_path, verbose = verbose ) result_list[[as.character(round)]] <- temp_impute if(verbose) cat('sample: ', round, '\n \n') } } ## calculate average of bootstrapped sample means ## final_imputation <- Reduce('+', result_list)/bootstrap_samples final_imputation <- final_imputation ## return data ## if(return_individual_results) { return( list( individual_results = result_list, final_imputation = final_imputation, original_matrix = expression_matrix ) ) } else { return( list( final_imputation = final_imputation, original_matrix = expression_matrix ) ) } }
/R/imputation.R
no_license
RubD/rescue
R
false
false
10,250
r
#' Sample-mean Estimation #' #' Cluster cells using SNN and a list of given genes, estimate missing expression #' values for each cell-gene combination with the within-cluster non-zero expression #' mean #' #' #' @param expression_matrix Row by column log-normalized expression matrix #' @param subset_genes A vector of informative gene names, defaults to all genes #' @param scale_data Whether to standardize expression by gene, default TRUE #' @param number_pcs Number of dimensions to inform SNN clustering #' @param k_neighbors Number of k neighbors to use for NN network #' @param snn_resolution Resolution parameter for SNN #' @param impute_index Index to impute, will default to all zeroes #' @param pseudo_zero Pseudo-zero expression value #' @param python_path path to your python binary (default = system path) #' @param verbose Print progress output to the console #' #' @return Returns a sparse matrix of class 'dgCMatrix' #' #' #' @export #' #' @examples #' set.seed(0) #' requireNamespace("Matrix") #' #' ## generate (meaningless) counts #' c1 <- stats::rpois(5e3, 1) #' c2 <- stats::rpois(5e3, 2) #' m <- t( #' rbind( #' matrix(c1, nrow = 20), #' matrix(c2, nrow = 20) #' ) #' ) #' #' ## construct an expression matrix m #' colnames(m) <- paste0('cell', 1:ncol(m)) #' rownames(m) <- paste0('gene', 1:nrow(m)) #' m <- log(m/colSums(m)*1e4 + 1) #' m <- methods::as(m, 'dgCMatrix') #' #' ## impute #' \donttest{ #' m_imputed <- rescue::sampleImputation( #' expression_matrix = m, #' k_neighbors = 10 #' ) #' } #' sampleImputation <- function( expression_matrix, subset_genes = NULL, scale_data = TRUE, number_pcs = 8, k_neighbors = 30, snn_resolution = .9, impute_index = NULL, pseudo_zero = NULL, python_path = NULL, verbose = FALSE ) { # consider only a subset of genes for clustering: # e.g. highly variable genes or random subsets of genes if(!is.null(subset_genes)) { if(verbose) cat('subsetting genes \n \n') my_small_matrix <- expression_matrix[subset_genes, ] } else { my_small_matrix <- expression_matrix } # impute a temporary pseudo_zero to ensure SNN convergence if(!is.null(pseudo_zero)) my_small_matrix[my_small_matrix == 0] <- pseudo_zero ## STANDARDIZE ## if(verbose) cat('standardizing data \n \n') my_small_matrix_scaled <- Matrix::t(scale(Matrix::t(my_small_matrix), center = TRUE, scale = TRUE)) ## PCA ## if(verbose) cat('computing principal components \n \n') pcs_compute <- min(number_pcs, nrow(x = my_small_matrix_scaled)-1) pca_results <- irlba::irlba(A = my_small_matrix_scaled, nv = pcs_compute) cell_embeddings <- methods::as(pca_results$v, 'dgCMatrix') colnames(cell_embeddings) <- paste0('PC',1:number_pcs) rownames(cell_embeddings) <- colnames(expression_matrix) ## SNN ## if(verbose) cat('constructing nearest neighbors graph \n \n') nn <- constructNN(reduced_object = cell_embeddings, k_neighbors = k_neighbors) if(verbose) cat('clustering nearest neighbors \n \n') cluster_results <- clusterLouvain(nn_network = nn, resolution = snn_resolution, python_path = python_path) clusters <- cluster_results$cluster names(clusters) <- cluster_results$cell_ID ## IMPUTATION step ## impute_result_list <- list() for(cluster in unique(clusters)) { if(verbose) cat('estimating for cluster: ', cluster, '\n') # cells that belong to the cluster temp_mat <- expression_matrix[, which(colnames(expression_matrix) %in% names(clusters[clusters == cluster]))] temp_nonzero <- temp_mat != 0 # calculate mean expression and set NaN to 0 imputed_expr <- Matrix::rowMeans(temp_mat) imputed_expr[is.nan(imputed_expr)] <- 0 imputed_matrix <- Matrix::Matrix( data = rep(imputed_expr, ncol(temp_mat)), ncol = ncol(temp_mat), byrow = FALSE, dimnames = list(rownames(temp_mat), colnames(temp_mat)) ) # replace zero values (or low values) with imputed values if( is.null(impute_index) ) zero_index <- temp_mat == 0 else zero_index <- impute_index[, which(colnames(expression_matrix) %in% names(clusters[clusters == cluster]))] imp_temp_mat <- temp_mat imp_temp_mat[zero_index] <- imputed_matrix[zero_index] impute_result_list[[as.character(cluster)]] <- imp_temp_mat } final_impute_result <- do.call('cbind', impute_result_list) final_impute_result <- final_impute_result[match(rownames(expression_matrix), rownames(final_impute_result)), match(colnames(expression_matrix), colnames(final_impute_result))] return(final_impute_result) } #' Bootstrap Imputation #' #' Subsample informative genes, cluster cells using SNN, estimate missing expression #' values with the distribution mean of means extrapolated from these cell clusterings #' #' #' @param expression_matrix Row by column log-normalized expression matrix #' @param select_cells Subset cells if desired #' @param select_genes A vector of highly variable of differentially expressed gene names, #' defaults to the most variable #' @param log_transformed Whether the expression matrix has been log-transformed #' @param log_base If log-transformed, log-base used #' @param proportion_genes Proportion of informative genes to sample #' @param bootstrap_samples Number of samples for the bootstrap #' @param number_pcs Number of dimensions to inform SNN clustering #' @param k_neighbors Number of k neighbors to use for NN network #' @param snn_resolution Resolution parameter for SNN #' @param impute_index Index to impute, will default to all zeroes #' @param use_mclapply Run in parallel, default FALSE #' @param cores Number of cores for parallelization #' @param return_individual_results Return a list of subsampled means #' @param python_path path to your python binary (default = system path) #' @param verbose Print progress output to the console #' #' @return Returns a list with the imputed and original expression matrices #' #' @export #' #' @examples #' set.seed(0) #' requireNamespace("Matrix") #' #' ## generate (meaningless) counts #' c1 <- stats::rpois(5e3, 1) #' c2 <- stats::rpois(5e3, 2) #' m <- t( #' rbind( #' matrix(c1, nrow = 20), #' matrix(c2, nrow = 20) #' ) #' ) #' #' ## construct an expression matrix m #' colnames(m) <- paste0('cell', 1:ncol(m)) #' rownames(m) <- paste0('gene', 1:nrow(m)) #' m <- log(m/colSums(m)*1e4 + 1) #' m <- methods::as(m, 'dgCMatrix') #' #' ## impute #' \donttest{ #' m_imputed <- rescue::bootstrapImputation( #' expression_matrix = m, #' proportion_genes = .9, #' bootstrap_samples = 2, #' k_neighbors = 10 #' ) #' } #' bootstrapImputation <- function( expression_matrix, select_cells = NULL, select_genes = NULL, log_transformed = TRUE, log_base = exp(1), proportion_genes = 0.6, bootstrap_samples = 100, number_pcs = 8, k_neighbors = 30, snn_resolution = .9, impute_index = NULL, use_mclapply = FALSE, cores = 2, return_individual_results = FALSE, python_path = NULL, verbose = FALSE ) { if(class(expression_matrix) != 'dgCMatrix') expression_matrix <- methods::as(expression_matrix, 'dgCMatrix') # test cell subsets if(is.null(select_cells)) select_cells = 1:ncol(expression_matrix) else if(verbose) cat('subsetting cells \n \n') expression_matrix <- expression_matrix[, select_cells] ## compute pseudo_zero ## pseudo_zero <- min(expression_matrix[which(methods::as(expression_matrix > 0, 'matrix'))])/2 ## store zero index ## if(is.null(impute_index)) impute_index <- expression_matrix == 0 ## determine variable genes ## if(is.null(select_genes)){ if(verbose) cat('finding variable genes \n \n') # data.table variables selected = NULL hvgs <- computeHVG(expression_matrix, reverse_log_scale = log_transformed, log_base = log_base) select_genes <- hvgs[ selected == 'yes', ]$genes if(is.null(select_genes)) stop('No HVGs detected by default!') else if(verbose) cat('using', length(select_genes), 'variable genes \n \n') } ## determine number of genes to sample ## total_number_of_genes <- length(select_genes) number_of_genes_to_use <- floor(total_number_of_genes*proportion_genes) ## BOOTSTRAP ## if(use_mclapply) { result_list <- parallel::mclapply( X = 1:bootstrap_samples, mc.preschedule = FALSE, mc.cores = cores, FUN = function(round) { gene_sample <- sample(x = 1:total_number_of_genes, size = number_of_genes_to_use, replace = FALSE) genes_to_use <- select_genes[gene_sample] temp_impute <- sampleImputation( expression_matrix = expression_matrix, subset_genes = genes_to_use, number_pcs = number_pcs, k_neighbors = k_neighbors, snn_resolution = snn_resolution, impute_index = impute_index, pseudo_zero = pseudo_zero, python_path = python_path, verbose = verbose ) return(temp_impute) }) } else { result_list <- list() for(round in 1:bootstrap_samples) { gene_sample = sample(x = 1:total_number_of_genes, size = number_of_genes_to_use, replace = FALSE) genes_to_use = select_genes[gene_sample] temp_impute = sampleImputation( expression_matrix = expression_matrix, subset_genes = genes_to_use, number_pcs = number_pcs, k_neighbors = k_neighbors, snn_resolution = snn_resolution, impute_index = impute_index, pseudo_zero = pseudo_zero, python_path = python_path, verbose = verbose ) result_list[[as.character(round)]] <- temp_impute if(verbose) cat('sample: ', round, '\n \n') } } ## calculate average of bootstrapped sample means ## final_imputation <- Reduce('+', result_list)/bootstrap_samples final_imputation <- final_imputation ## return data ## if(return_individual_results) { return( list( individual_results = result_list, final_imputation = final_imputation, original_matrix = expression_matrix ) ) } else { return( list( final_imputation = final_imputation, original_matrix = expression_matrix ) ) } }
#!usr/bin/env Rscript library(methods) library(Matrix) library(MASS) #library(Rcpp) library(lme4) # Read in your data as an R dataframe basedir <- c("/seastor/helenhelen/ISR_2015") resultdir <- paste(basedir,"/me/tmap/results/mem",sep="/") setwd(resultdir) r.itemInfo <- matrix(data=NA, nr=2, nc=4) ## read data #get data for each trial item_file <- paste(basedir,"/me/tmap/data/item/mem.txt",sep="") item_data <- read.table(item_file,header=FALSE) load(paste("/home/helenhelen/DQ/project/gitrepo/ISR_2015/ROI_based/me/col_names.Rda",sep="")) colnames(item_data) <- col_names item_data$subid <- as.factor(item_data$subid) item_data$pid <- as.factor(item_data$pid) subdata <- item_data itemInfo_actmean <- lmer(DG_rsadiff~RANG_actmean+(1+RANG_actmean|subid),REML=FALSE,data=subdata) itemInfo_actmean.null <- lmer(DG_rsadiff~1+(1+RANG_actmean|subid),REML=FALSE,data=subdata) itemInfo_di <- lmer(DG_rsadiff~RANG_actmean+(1+RANG_rsadiff|subid),REML=FALSE,data=subdata) itemInfo_di.null <- lmer(DG_rsadiff~1+(1+RANG_rsadiff|subid),REML=FALSE,data=subdata) mainEffect.itemInfo_actmean <- anova(itemInfo_actmean,itemInfo_actmean.null) r.itemInfo[1,1]=mainEffect.itemInfo_actmean[2,6] r.itemInfo[1,2]=mainEffect.itemInfo_actmean[2,7] r.itemInfo[1,3]=mainEffect.itemInfo_actmean[2,8] r.itemInfo[1,4]=fixef(itemInfo_actmean)[2]; mainEffect.itemInfo_di <- anova(itemInfo_di,itemInfo_di.null) r.itemInfo[2,1]=mainEffect.itemInfo_di[2,6] r.itemInfo[2,2]=mainEffect.itemInfo_di[2,7] r.itemInfo[2,3]=mainEffect.itemInfo_di[2,8] r.itemInfo[2,4]=fixef(itemInfo_di)[2]; write.matrix(r.itemInfo,file="itemInfso_DG_RANG.txt",sep="\t")
/ROI_based/me/mem/itemInfo_DG_RANG.R
no_license
QQXiao/ISR_2015
R
false
false
1,618
r
#!usr/bin/env Rscript library(methods) library(Matrix) library(MASS) #library(Rcpp) library(lme4) # Read in your data as an R dataframe basedir <- c("/seastor/helenhelen/ISR_2015") resultdir <- paste(basedir,"/me/tmap/results/mem",sep="/") setwd(resultdir) r.itemInfo <- matrix(data=NA, nr=2, nc=4) ## read data #get data for each trial item_file <- paste(basedir,"/me/tmap/data/item/mem.txt",sep="") item_data <- read.table(item_file,header=FALSE) load(paste("/home/helenhelen/DQ/project/gitrepo/ISR_2015/ROI_based/me/col_names.Rda",sep="")) colnames(item_data) <- col_names item_data$subid <- as.factor(item_data$subid) item_data$pid <- as.factor(item_data$pid) subdata <- item_data itemInfo_actmean <- lmer(DG_rsadiff~RANG_actmean+(1+RANG_actmean|subid),REML=FALSE,data=subdata) itemInfo_actmean.null <- lmer(DG_rsadiff~1+(1+RANG_actmean|subid),REML=FALSE,data=subdata) itemInfo_di <- lmer(DG_rsadiff~RANG_actmean+(1+RANG_rsadiff|subid),REML=FALSE,data=subdata) itemInfo_di.null <- lmer(DG_rsadiff~1+(1+RANG_rsadiff|subid),REML=FALSE,data=subdata) mainEffect.itemInfo_actmean <- anova(itemInfo_actmean,itemInfo_actmean.null) r.itemInfo[1,1]=mainEffect.itemInfo_actmean[2,6] r.itemInfo[1,2]=mainEffect.itemInfo_actmean[2,7] r.itemInfo[1,3]=mainEffect.itemInfo_actmean[2,8] r.itemInfo[1,4]=fixef(itemInfo_actmean)[2]; mainEffect.itemInfo_di <- anova(itemInfo_di,itemInfo_di.null) r.itemInfo[2,1]=mainEffect.itemInfo_di[2,6] r.itemInfo[2,2]=mainEffect.itemInfo_di[2,7] r.itemInfo[2,3]=mainEffect.itemInfo_di[2,8] r.itemInfo[2,4]=fixef(itemInfo_di)[2]; write.matrix(r.itemInfo,file="itemInfso_DG_RANG.txt",sep="\t")
# SSF, Alaska, cygwin, RCP: 8.5 # split catch by taxa and store in global env split_Alaska <- split(alaska_ssf_cygwin, alaska_ssf_cygwin$taxonID) list2env(split_Alaska, envir = .GlobalEnv) cygwin_RCP85 <- read.csv("C:/Users/angmel/Documents/MSc-small-scale-fisheries/DBEM x SAU/cygwin_RCP85.csv", header = FALSE) %>% rename(taxonID = V1) # OOOOOOOOOOPS YOU DID NOT MODEL THESE BUT YOU SHOULD HAVE!!!!!!! # remove FOR NOW, accumulate list for after!!!!!!!!!!!!!!!!!!!!!! MISTAKES_al_85_ssf <- anti_join(alaska_ssf_cygwin, cygwin_RCP85, by = "taxonID") alaska_ssf_cygwin_partial <- anti_join(alaska_ssf_cygwin, MISTAKES_al_85_ssf) %>% filter(!taxonID %in% NA) errspp <- numeric() for(i in 1:length(alaska_ssf_cygwin_partial$taxonID)){ taxontemp <-as.character(alaska_ssf_cygwin_partial$taxonID[i]) dbem <- dbem_Import(taxontemp, 1990, 2060, 8.5, "Catch") #RCP dbem <- inner_join(dbem, Alaska_eez_cellID, by = "INDEX") dbem <- dbem %>% select(-eez) if (sum(dbem[1,], na.rm = TRUE) > 0) { # extract DBEM present day average distribution dbem_present <- dbem[1:22] # Sum total catch potential for each year dbem_present_total<-colSums(dbem_present,na.rm=T) #Calculate proportion of each cell in each year in terms of relative catch potential dbem_present_relative<-sweep(dbem_present,MARGIN=2,dbem_present_total,'/') #Take sea around us catch taxacatch<-get(taxontemp) # taxacatchbase<-taxacatch$year_2010[order(taxacatch$year)] # # Average catch over 20 years (2010-1990) # average_catch <- sum(taxacatchbase)/20 # average_catch <- as.numeric(average_catch) # prorate it based on relative abundance distribution dbem_present_prorated<-sweep(dbem_present_relative[,2:length(dbem_present_relative[1,])],MARGIN=2,taxacatch$year_2010,'*') dbem_present_prorated<-data.frame(INDEX=dbem_present$INDEX,dbem_present_prorated) # FUTURE calculations # extract DBEM for future years 2010-2060 relevant dbem_future <- dbem[c(1,22:72)] # sum total catch potential for each year dbem_future_total <- colSums(dbem_future, na.rm = T) # Calculate magnitude change between each year's total (i+1)/i dbem_future_total_mag <- data.frame(matrix(seq(1,52), ncol = 52)) for (i in 1:52){ dbem_future_total_mag[i] <- `dbem_future_total`[i+1]/`dbem_future_total`[i] } colnames(dbem_future_total_mag) <- c("INDEX", 2010:2060) # apply value to year n stated to get n+1 # Find future sea around us catch by applying magnitude change to present catch sau_present <- data.frame(matrix(seq(1,70), ncol = 70)) sau_present[1:20] <- taxacatch$year_2010 for (i in 1:51){ sau_present[i+20] <- sau_present[1,i+19]*dbem_future_total_mag[i+1] } colnames(sau_present) <- c(1990:2059) #****************************************** #Calculate proportion of each cell in each year in terms of relative catch potential dbem_future_relative<-sweep(dbem_future,MARGIN=2,dbem_future_total,'/') dbem_future_relative[is.na(dbem_future_relative)] <- 0 # Multiply future proportions with sau total to find actual cell by cell data sau_future <- (sau_present[20:71]) colnames(sau_future) <- c(2009:2060) # ignore 2009, just placeholder sau_future_list <- as.numeric(sau_future) # convert into proper format dbem_future_prorated<-sweep(dbem_future_relative,MARGIN=2,sau_future_list,'*') dbem_future_prorated$INDEX <- dbem_future$INDEX # Join 1990-2060 into one dataframe sau_dbem <- full_join(dbem_present_prorated, dbem_future_prorated, by = "INDEX") sau_dbem[22] <- NULL filepath<-paste("C:/Users/angmel/Documents/MSc-small-scale-fisheries/SAU_DBEM_RESULTS/Alaska/GFDL85/SSF/",taxontemp,".csv",sep="") write.csv(sau_dbem,filepath,row.names=F) } else{ errspp[i] <- taxontemp } } Alaska85_cygwin <- errspp # CHANGE THIS FOR DROBO/CYGWIN Alaska85_cygwin <- data.frame(Alaska85_cygwin) # make data frame format for saving write_csv(Alaska85_cygwin, "C:/Users/angmel/Documents/MSc-small-scale-fisheries/SAU_DBEM_RESULTS/Alaska/GFDL85/SSF/errspp85")
/DBEM x SAU/ssf_al_cygwin_85.R
no_license
angmelanie/small-scale-fisheries
R
false
false
4,233
r
# SSF, Alaska, cygwin, RCP: 8.5 # split catch by taxa and store in global env split_Alaska <- split(alaska_ssf_cygwin, alaska_ssf_cygwin$taxonID) list2env(split_Alaska, envir = .GlobalEnv) cygwin_RCP85 <- read.csv("C:/Users/angmel/Documents/MSc-small-scale-fisheries/DBEM x SAU/cygwin_RCP85.csv", header = FALSE) %>% rename(taxonID = V1) # OOOOOOOOOOPS YOU DID NOT MODEL THESE BUT YOU SHOULD HAVE!!!!!!! # remove FOR NOW, accumulate list for after!!!!!!!!!!!!!!!!!!!!!! MISTAKES_al_85_ssf <- anti_join(alaska_ssf_cygwin, cygwin_RCP85, by = "taxonID") alaska_ssf_cygwin_partial <- anti_join(alaska_ssf_cygwin, MISTAKES_al_85_ssf) %>% filter(!taxonID %in% NA) errspp <- numeric() for(i in 1:length(alaska_ssf_cygwin_partial$taxonID)){ taxontemp <-as.character(alaska_ssf_cygwin_partial$taxonID[i]) dbem <- dbem_Import(taxontemp, 1990, 2060, 8.5, "Catch") #RCP dbem <- inner_join(dbem, Alaska_eez_cellID, by = "INDEX") dbem <- dbem %>% select(-eez) if (sum(dbem[1,], na.rm = TRUE) > 0) { # extract DBEM present day average distribution dbem_present <- dbem[1:22] # Sum total catch potential for each year dbem_present_total<-colSums(dbem_present,na.rm=T) #Calculate proportion of each cell in each year in terms of relative catch potential dbem_present_relative<-sweep(dbem_present,MARGIN=2,dbem_present_total,'/') #Take sea around us catch taxacatch<-get(taxontemp) # taxacatchbase<-taxacatch$year_2010[order(taxacatch$year)] # # Average catch over 20 years (2010-1990) # average_catch <- sum(taxacatchbase)/20 # average_catch <- as.numeric(average_catch) # prorate it based on relative abundance distribution dbem_present_prorated<-sweep(dbem_present_relative[,2:length(dbem_present_relative[1,])],MARGIN=2,taxacatch$year_2010,'*') dbem_present_prorated<-data.frame(INDEX=dbem_present$INDEX,dbem_present_prorated) # FUTURE calculations # extract DBEM for future years 2010-2060 relevant dbem_future <- dbem[c(1,22:72)] # sum total catch potential for each year dbem_future_total <- colSums(dbem_future, na.rm = T) # Calculate magnitude change between each year's total (i+1)/i dbem_future_total_mag <- data.frame(matrix(seq(1,52), ncol = 52)) for (i in 1:52){ dbem_future_total_mag[i] <- `dbem_future_total`[i+1]/`dbem_future_total`[i] } colnames(dbem_future_total_mag) <- c("INDEX", 2010:2060) # apply value to year n stated to get n+1 # Find future sea around us catch by applying magnitude change to present catch sau_present <- data.frame(matrix(seq(1,70), ncol = 70)) sau_present[1:20] <- taxacatch$year_2010 for (i in 1:51){ sau_present[i+20] <- sau_present[1,i+19]*dbem_future_total_mag[i+1] } colnames(sau_present) <- c(1990:2059) #****************************************** #Calculate proportion of each cell in each year in terms of relative catch potential dbem_future_relative<-sweep(dbem_future,MARGIN=2,dbem_future_total,'/') dbem_future_relative[is.na(dbem_future_relative)] <- 0 # Multiply future proportions with sau total to find actual cell by cell data sau_future <- (sau_present[20:71]) colnames(sau_future) <- c(2009:2060) # ignore 2009, just placeholder sau_future_list <- as.numeric(sau_future) # convert into proper format dbem_future_prorated<-sweep(dbem_future_relative,MARGIN=2,sau_future_list,'*') dbem_future_prorated$INDEX <- dbem_future$INDEX # Join 1990-2060 into one dataframe sau_dbem <- full_join(dbem_present_prorated, dbem_future_prorated, by = "INDEX") sau_dbem[22] <- NULL filepath<-paste("C:/Users/angmel/Documents/MSc-small-scale-fisheries/SAU_DBEM_RESULTS/Alaska/GFDL85/SSF/",taxontemp,".csv",sep="") write.csv(sau_dbem,filepath,row.names=F) } else{ errspp[i] <- taxontemp } } Alaska85_cygwin <- errspp # CHANGE THIS FOR DROBO/CYGWIN Alaska85_cygwin <- data.frame(Alaska85_cygwin) # make data frame format for saving write_csv(Alaska85_cygwin, "C:/Users/angmel/Documents/MSc-small-scale-fisheries/SAU_DBEM_RESULTS/Alaska/GFDL85/SSF/errspp85")
library(methods) setClass("ecr.summary", # ==== Inheritance # ==== Properties representation ( varname = "character", detail = "logical", type = "character", by = "character", to = "character", summary = "data.frame" ) ) # ------------------------------------------------------------------------------ # Real constructor # ------------------------------------------------------------------------------ setMethod("initialize", "ecr.summary", function(.Object, x, namevar="x", detail=FALSE, ...) { .Object@detail <- detail; .Object@varname <- namevar; .Object@type <- class(x); if (.Object@type == "factor") { .Object@summary <- p.factorSummary(x, namevar); return(.Object); } if (detail == FALSE) { .Object@summary <- p.simpleSummary(x, namevar); } else { .Object@summary <- p.detailSummary(x, namevar); } .Object; } ); # ----------------------------------------------------------------------------- # method show # ----------------------------------------------------------------------------- setMethod("show" ,"ecr.summary" , function(object) { if (object@type == "factor") { #digits = c(0,0,0); align = c("l","r","c","c"); ec.xtable(object@summary, align=align); } else { if (object@detail == TRUE) { #digits = c(0,0,5); #align = c("l","r","c"); ec.xtable(object@summary, align=c("l","r","c"), digits=c(0,0,5)); } else { #digits = c(0,0,4,4,4,4,4); align = c("l","r","c","r","r","c","c"); ec.xtable(object@summary, align=align); } } } ) # ----------------------------------------------------------------------------- # function: ecr.summary (call real constructor) # Return: an object of type ecr.summary # ----------------------------------------------------------------------------- ecr.summary <- function(x, ...) { return(new("ecr.summary", x=x, ...)); }
/R/ecr.summary.class.R
no_license
Epiconcept-Paris/Epiconcepts
R
false
false
2,079
r
library(methods) setClass("ecr.summary", # ==== Inheritance # ==== Properties representation ( varname = "character", detail = "logical", type = "character", by = "character", to = "character", summary = "data.frame" ) ) # ------------------------------------------------------------------------------ # Real constructor # ------------------------------------------------------------------------------ setMethod("initialize", "ecr.summary", function(.Object, x, namevar="x", detail=FALSE, ...) { .Object@detail <- detail; .Object@varname <- namevar; .Object@type <- class(x); if (.Object@type == "factor") { .Object@summary <- p.factorSummary(x, namevar); return(.Object); } if (detail == FALSE) { .Object@summary <- p.simpleSummary(x, namevar); } else { .Object@summary <- p.detailSummary(x, namevar); } .Object; } ); # ----------------------------------------------------------------------------- # method show # ----------------------------------------------------------------------------- setMethod("show" ,"ecr.summary" , function(object) { if (object@type == "factor") { #digits = c(0,0,0); align = c("l","r","c","c"); ec.xtable(object@summary, align=align); } else { if (object@detail == TRUE) { #digits = c(0,0,5); #align = c("l","r","c"); ec.xtable(object@summary, align=c("l","r","c"), digits=c(0,0,5)); } else { #digits = c(0,0,4,4,4,4,4); align = c("l","r","c","r","r","c","c"); ec.xtable(object@summary, align=align); } } } ) # ----------------------------------------------------------------------------- # function: ecr.summary (call real constructor) # Return: an object of type ecr.summary # ----------------------------------------------------------------------------- ecr.summary <- function(x, ...) { return(new("ecr.summary", x=x, ...)); }
\name{s_summarise} \alias{s_summarise} \title{Modified version of dplyr's summarise that uses string arguments} \usage{ s_summarise(.data, ...) } \description{ Modified version of dplyr's summarise that uses string arguments }
/man/s_summarise.Rd
no_license
arturochian/modify
R
false
false
228
rd
\name{s_summarise} \alias{s_summarise} \title{Modified version of dplyr's summarise that uses string arguments} \usage{ s_summarise(.data, ...) } \description{ Modified version of dplyr's summarise that uses string arguments }
#!/usr/bin/R # ---------------------------------------------------------------------------- # library("optparse") option_list <- list( make_option(c("-r", "--reference_csv"), type="character", default=NULL, help="csv ID locations", metavar="character"), make_option(c("-d", "--rates"), type="character", default=NULL, help="Table with rates measurements", metavar="character"), make_option(c("-o", "--out_csv"), type="character", default="out.csv", help="Output [default= %default]", metavar="character")) opt_parser <- OptionParser(option_list=option_list) opt <- parse_args(opt_parser) # ---------------------------------------------------------------------------- # # Input ## get the location of each transcript mouse_reference <- read.csv(opt$reference_csv) rates <- read.table(opt$rates, header=TRUE) # rates <- read.table("input/muridae_dnds_per_orthogroup_wide.txt", header=TRUE) # ---------------------------------------------------------------------------- # mouse_reference <- mouse_reference[!is.na(mouse_reference$orthogroup), ] mouse_reference <- mouse_reference[mouse_reference$orthogroup %in% rates$orthogroup,] # ---------------------------------------------------------------------------- # rates_per_mouse <- rates[match(mouse_reference$orthogroup, rates$orthogroup), c(2:ncol(rates))] rates_per_mouse <- cbind(mouse_reference, rates_per_mouse) # ---------------------------------------------------------------------------- # # Write out write.csv(rates_per_mouse, file = opt$out_csv, row.names = FALSE) # ---------------------------------------------------------------------------- #
/02-alignments_and_rates/bin/dn_ds_measurements_mouse_coord.R
permissive
roddypr/divergent_gerbil_genes
R
false
false
1,670
r
#!/usr/bin/R # ---------------------------------------------------------------------------- # library("optparse") option_list <- list( make_option(c("-r", "--reference_csv"), type="character", default=NULL, help="csv ID locations", metavar="character"), make_option(c("-d", "--rates"), type="character", default=NULL, help="Table with rates measurements", metavar="character"), make_option(c("-o", "--out_csv"), type="character", default="out.csv", help="Output [default= %default]", metavar="character")) opt_parser <- OptionParser(option_list=option_list) opt <- parse_args(opt_parser) # ---------------------------------------------------------------------------- # # Input ## get the location of each transcript mouse_reference <- read.csv(opt$reference_csv) rates <- read.table(opt$rates, header=TRUE) # rates <- read.table("input/muridae_dnds_per_orthogroup_wide.txt", header=TRUE) # ---------------------------------------------------------------------------- # mouse_reference <- mouse_reference[!is.na(mouse_reference$orthogroup), ] mouse_reference <- mouse_reference[mouse_reference$orthogroup %in% rates$orthogroup,] # ---------------------------------------------------------------------------- # rates_per_mouse <- rates[match(mouse_reference$orthogroup, rates$orthogroup), c(2:ncol(rates))] rates_per_mouse <- cbind(mouse_reference, rates_per_mouse) # ---------------------------------------------------------------------------- # # Write out write.csv(rates_per_mouse, file = opt$out_csv, row.names = FALSE) # ---------------------------------------------------------------------------- #
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/park_wait_time.R \name{park_wait_times} \alias{park_wait_times} \title{Get park wait times} \usage{ park_wait_times(park) } \arguments{ \item{park}{Park ID, see \code{parks()}} } \description{ Return a list of park wait times. } \examples{ park_wait_times("WaltDisneyWorldMagicKingdom") }
/man/park_wait_times.Rd
permissive
r4fun/themeparks
R
false
true
368
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/park_wait_time.R \name{park_wait_times} \alias{park_wait_times} \title{Get park wait times} \usage{ park_wait_times(park) } \arguments{ \item{park}{Park ID, see \code{parks()}} } \description{ Return a list of park wait times. } \examples{ park_wait_times("WaltDisneyWorldMagicKingdom") }
# CM3050 - An?lise de S?ries Temporais # Encontro 04 - 18/09/2019 - Series temporais e scripts # script "qualidade_do_ar.R # Desenha o grafico da serie temporal interna "AirPassenger", da # biblioteca "datasets". Emseguida, insere no grafico a linha # de valor medio da serie e sua linha de tendencia data("AirPassengers") # Carrega a serie temporal ?AirPassengers # Exibe a ajuda do RStudio sobre a s?rie # Exibe no console a configuracao temporal da serie print(start((AirPassengers))) print(end((AirPassengers))) print(frequency((AirPassengers))) # Cria o grafico da serie plot(AirPassengers) # Calcula o valor medio e o modelo temporal de regressao linear M <- mean(AirPassengers) ML <- lm(AirPassengers~time(AirPassengers)) # Extrai os coeficientes da linha de tend?ncia CL <- coef(ML)[1] # Coeficiente linear (intercepto) CA <- coef(ML)[2] # Coeficiente angular # Traca a reta de valor medio # lty (line type) = 2: linha tracejada # lwd (line width) = 2: aumenta a espessura # Funcao abline tra?a uma linha reta sobre o grafico ATUAL # Parametros: a = intercpto (no eixo y); b = coeficiente angular abline(a = M, b = 0, col = "blue", lty = 2, lwd = 2) # Traca a linha de tendencia "a + b*t" abline(a = CL, b = CA, col = "red", lty = 4, lwd = 2)
/files/qualidade_do_ar.R
no_license
matheusmf1/TimeSeriesAnalysis
R
false
false
1,269
r
# CM3050 - An?lise de S?ries Temporais # Encontro 04 - 18/09/2019 - Series temporais e scripts # script "qualidade_do_ar.R # Desenha o grafico da serie temporal interna "AirPassenger", da # biblioteca "datasets". Emseguida, insere no grafico a linha # de valor medio da serie e sua linha de tendencia data("AirPassengers") # Carrega a serie temporal ?AirPassengers # Exibe a ajuda do RStudio sobre a s?rie # Exibe no console a configuracao temporal da serie print(start((AirPassengers))) print(end((AirPassengers))) print(frequency((AirPassengers))) # Cria o grafico da serie plot(AirPassengers) # Calcula o valor medio e o modelo temporal de regressao linear M <- mean(AirPassengers) ML <- lm(AirPassengers~time(AirPassengers)) # Extrai os coeficientes da linha de tend?ncia CL <- coef(ML)[1] # Coeficiente linear (intercepto) CA <- coef(ML)[2] # Coeficiente angular # Traca a reta de valor medio # lty (line type) = 2: linha tracejada # lwd (line width) = 2: aumenta a espessura # Funcao abline tra?a uma linha reta sobre o grafico ATUAL # Parametros: a = intercpto (no eixo y); b = coeficiente angular abline(a = M, b = 0, col = "blue", lty = 2, lwd = 2) # Traca a linha de tendencia "a + b*t" abline(a = CL, b = CA, col = "red", lty = 4, lwd = 2)
#' Load entries of the sidb #' #' @param path character string with the path where isdb is stored #' @return R list with all entries #' @export #' @examples #' sidb=loadEntries() loadEntries <- function(path="~/sidb/data/") { entryNames=list.dirs(path, full.names=FALSE, recursive=FALSE) sidbList=lapply(entryNames, FUN=readEntry, path=path) return(sidbList) }
/Rpkg/R/loadEntries.R
no_license
GHGModels/sidb
R
false
false
368
r
#' Load entries of the sidb #' #' @param path character string with the path where isdb is stored #' @return R list with all entries #' @export #' @examples #' sidb=loadEntries() loadEntries <- function(path="~/sidb/data/") { entryNames=list.dirs(path, full.names=FALSE, recursive=FALSE) sidbList=lapply(entryNames, FUN=readEntry, path=path) return(sidbList) }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/test_coverage.R \name{test_coverage} \alias{test_coverage} \title{Test coverage of package} \usage{ test_coverage(from_tags = TRUE, from_desc = TRUE) } \arguments{ \item{from_tags}{\code{logical} scalar. Checks the files if your test directory for testthis tags. Specifically, if you have the comment \verb{#* @testing myfunction} in any of your test files, myfunction will be marked as tested.} \item{from_desc}{\code{logical} scalar. Checks the \code{desc} argument \code{test_that(...)} of the tests in your test directory for functions names. E.g. if you have a test file that contains \code{test_that("myfunction works", {...})}, myfunction will be marked as tested.} } \value{ A \code{Test_coverage} object. This is a \code{data.frame} containing the following columns: \itemize{ \item fun: Name of the function \item exp: Is function is exported? \item s3: Is function an S3 method? \item tested: Do unit tests exist for function? \item ignore: Is function listed in \file{tests/testthat/_testignore}? } } \description{ This determines the test coverage of the target package based on the \code{desc} argument of \code{test_that()} calls. If you require a more comprehensive analysis of test coverage, try the package \strong{covr} instead. } \details{ \code{test_coverage} looks in \code{.covrignore} for functions that should be ignored for coverage analysis (see \code{\link[usethis:use_coverage]{usethis::use_covr_ignore()}}) } \examples{ \dontrun{ x <- test_coverage() as.data.frame(x) } }
/man/test_coverage.Rd
no_license
jimsforks/testthis
R
false
true
1,583
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/test_coverage.R \name{test_coverage} \alias{test_coverage} \title{Test coverage of package} \usage{ test_coverage(from_tags = TRUE, from_desc = TRUE) } \arguments{ \item{from_tags}{\code{logical} scalar. Checks the files if your test directory for testthis tags. Specifically, if you have the comment \verb{#* @testing myfunction} in any of your test files, myfunction will be marked as tested.} \item{from_desc}{\code{logical} scalar. Checks the \code{desc} argument \code{test_that(...)} of the tests in your test directory for functions names. E.g. if you have a test file that contains \code{test_that("myfunction works", {...})}, myfunction will be marked as tested.} } \value{ A \code{Test_coverage} object. This is a \code{data.frame} containing the following columns: \itemize{ \item fun: Name of the function \item exp: Is function is exported? \item s3: Is function an S3 method? \item tested: Do unit tests exist for function? \item ignore: Is function listed in \file{tests/testthat/_testignore}? } } \description{ This determines the test coverage of the target package based on the \code{desc} argument of \code{test_that()} calls. If you require a more comprehensive analysis of test coverage, try the package \strong{covr} instead. } \details{ \code{test_coverage} looks in \code{.covrignore} for functions that should be ignored for coverage analysis (see \code{\link[usethis:use_coverage]{usethis::use_covr_ignore()}}) } \examples{ \dontrun{ x <- test_coverage() as.data.frame(x) } }
###Step 1### library(tree) crx = read.csv("crx.csv") #View(crx) n=dim(crx)[1] set.seed(12345) id=sample(1:n, floor(n*0.8)) training=crx[id,] test=crx[-id,] # id = n*0.8 # training=crx[1:id,] # test=crx[id+1:n,] training.modified = training training.modified[,ncol(training.modified)] = as.factor(training.modified[,ncol(training.modified)]) fit.tree <- tree(Class~.,data=training.modified) plot(fit.tree, type=c("uniform")) text(fit.tree, pretty=0) training.no2=training.modified[-2,] fit.tree.no2 <- tree(Class~.,data=training.no2) plot(fit.tree.no2, type=c("uniform")) text(fit.tree.no2, pretty = 0) ##Tree structure ändras ingenting??????? ###Step 2### cv.tree = cv.tree(fit.tree) plot(cv.tree$size, cv.tree$dev, ylab="Deviance", type="b", main="cross-validation plot") optimal.depth=cv.tree$size[which.min(cv.tree$dev)] pruned.tree = prune.tree(fit.tree, best=optimal.depth) plot(pruned.tree) text(pruned.tree, pretty=0) #Ans: The tree shuld have 6 leaves and the variables selected are A9, A3, A6, A15 and A11 ###Step 3### library(glmnet) x_train = model.matrix( ~ .-1, training[,-16]) lasso.CV <- cv.glmnet(x_train, scale(training$Class), alpha=1, family="binomial") coef(lasso.CV, s="lambda.min") #22st variabler plot(lasso.CV, main="cross-validation plot") optimal.penalty <- lasso.CV$lambda.min lasso.CV$nzero[39] #22st variabler # lambda > 0 is penalty factor. # Optimal penalty parameter value is 0.01037 # Number of components selected by LASSO: 22 # For the smallest value of penalty parameter has a deviance around 0.12 higher. # So the optimal model is better. But it is statistically more significant difference # for higher values on penalty parameter ###Step 4### error = function(targets, predictions) { return (sum(targets * log(predictions) + (1-targets) * log(1-predictions))) } test.targets <- test$Class tree.pred <- predict(pruned.tree, test) tree.err <- error(test.targets, tree.pred[,2]) #-41.81758 x_test = model.matrix( ~ .-1, test[,-16]) lasso.pred <- predict(lasso.CV, x_test, s="lambda.min", type="response") lasso.err <- error(test.targets, as.vector(lasso.pred)) #-41.83329 # According to this criterion the lasso model is better. # This criterion might be more reasonable to use than the misclassification rate # because it takes into consideration how likely it is that a prediction is correctly classified # and not only if it is correct or not.
/ML-Labs/Annalisa/tenta1/Assignment_1.R
no_license
Aroonii/MachineLearning_TDDE01
R
false
false
2,398
r
###Step 1### library(tree) crx = read.csv("crx.csv") #View(crx) n=dim(crx)[1] set.seed(12345) id=sample(1:n, floor(n*0.8)) training=crx[id,] test=crx[-id,] # id = n*0.8 # training=crx[1:id,] # test=crx[id+1:n,] training.modified = training training.modified[,ncol(training.modified)] = as.factor(training.modified[,ncol(training.modified)]) fit.tree <- tree(Class~.,data=training.modified) plot(fit.tree, type=c("uniform")) text(fit.tree, pretty=0) training.no2=training.modified[-2,] fit.tree.no2 <- tree(Class~.,data=training.no2) plot(fit.tree.no2, type=c("uniform")) text(fit.tree.no2, pretty = 0) ##Tree structure ändras ingenting??????? ###Step 2### cv.tree = cv.tree(fit.tree) plot(cv.tree$size, cv.tree$dev, ylab="Deviance", type="b", main="cross-validation plot") optimal.depth=cv.tree$size[which.min(cv.tree$dev)] pruned.tree = prune.tree(fit.tree, best=optimal.depth) plot(pruned.tree) text(pruned.tree, pretty=0) #Ans: The tree shuld have 6 leaves and the variables selected are A9, A3, A6, A15 and A11 ###Step 3### library(glmnet) x_train = model.matrix( ~ .-1, training[,-16]) lasso.CV <- cv.glmnet(x_train, scale(training$Class), alpha=1, family="binomial") coef(lasso.CV, s="lambda.min") #22st variabler plot(lasso.CV, main="cross-validation plot") optimal.penalty <- lasso.CV$lambda.min lasso.CV$nzero[39] #22st variabler # lambda > 0 is penalty factor. # Optimal penalty parameter value is 0.01037 # Number of components selected by LASSO: 22 # For the smallest value of penalty parameter has a deviance around 0.12 higher. # So the optimal model is better. But it is statistically more significant difference # for higher values on penalty parameter ###Step 4### error = function(targets, predictions) { return (sum(targets * log(predictions) + (1-targets) * log(1-predictions))) } test.targets <- test$Class tree.pred <- predict(pruned.tree, test) tree.err <- error(test.targets, tree.pred[,2]) #-41.81758 x_test = model.matrix( ~ .-1, test[,-16]) lasso.pred <- predict(lasso.CV, x_test, s="lambda.min", type="response") lasso.err <- error(test.targets, as.vector(lasso.pred)) #-41.83329 # According to this criterion the lasso model is better. # This criterion might be more reasonable to use than the misclassification rate # because it takes into consideration how likely it is that a prediction is correctly classified # and not only if it is correct or not.
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/SR_plot_true_y_VS_predicted_y.R \name{SR_plot_true_y_VS_predicted_y} \alias{SR_plot_true_y_VS_predicted_y} \title{Plot true y vs. predicted y} \usage{ SR_plot_true_y_VS_predicted_y( true_y, predicted_y, path_output = NULL, save = FALSE ) } \arguments{ \item{true_y}{numeric} \item{predicted_y}{numeric} \item{path_output}{character} \item{save}{Boolean} } \value{ Boolean value TRUE or FALSE } \description{ e.g. plot a model prediction for y against true y values and calculates several precision metrics. } \examples{ data("mtcars") SR_plot_true_y_VS_predicted_y(true_y = mtcars$hp, predicted_y = mtcars$hp + rnorm(nrow(mtcars), sd = 30)) }
/man/SR_plot_true_y_VS_predicted_y.Rd
permissive
samuelreuther/SRfunctions
R
false
true
795
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/SR_plot_true_y_VS_predicted_y.R \name{SR_plot_true_y_VS_predicted_y} \alias{SR_plot_true_y_VS_predicted_y} \title{Plot true y vs. predicted y} \usage{ SR_plot_true_y_VS_predicted_y( true_y, predicted_y, path_output = NULL, save = FALSE ) } \arguments{ \item{true_y}{numeric} \item{predicted_y}{numeric} \item{path_output}{character} \item{save}{Boolean} } \value{ Boolean value TRUE or FALSE } \description{ e.g. plot a model prediction for y against true y values and calculates several precision metrics. } \examples{ data("mtcars") SR_plot_true_y_VS_predicted_y(true_y = mtcars$hp, predicted_y = mtcars$hp + rnorm(nrow(mtcars), sd = 30)) }
source("Metropolis Hastings - Random walk example.R", echo=F) getwd() setwd("C:/Users/Jaehwan M/Dropbox/Bayesian Statistics") ## example of good traceplot set.seed(61) post0 = mh(n=n, ybar=ybar, n_iter=4e3, mu_init=0, cand_sd=0.9) library("coda") traceplot(as.mcmc(post0$mu[-c(1:500)])) start_indx = 1 nsim = length(post0$mu) ylim = range(post0$mu) pdf("animation/goodchain.pdf", width=9, height=4) for (t in 1:nsim){ plot(start_indx:(start_indx - 1 + t), post0$mu[1:t], type="l", ylim=ylim, xlim = c(start_indx, start_indx + nsim), xlab = "iterartion", ylab = expression(mu), main = "traceplot") Sys.sleep(0.001) } dev.off() start_indx set.seed(61) post1 = mh(n=n, ybar=ybar, n_iter=1e3, mu_init=0.0, cand_sd=0.04) coda::traceplot(as.mcmc(post1$mu[-c(1:500)])) set.seed(61) post1b = mh(n=n, ybar=ybar, n_iter=100e3, mu_init=0.0, cand_sd=0.04) coda::traceplot(as.mcmc(post1b$mu)) ##auotocorrelation. autocorr.plot(as.mcmc(post0$mu)) autocorr.diag(as.mcmc(post0$mu)) autocorr.plot(as.mcmc(post1$mu)) autocorr.diag(as.mcmc(post1$mu)) str(post1b) effectiveSize(as.mcmc(post1b$mu)) ##effective sample size autocorr.plot(as.mcmc(post1b$mu), lag.max =500) thin_interval = 400 thin_index = seq(from=400, 100e3, by =400) head(thin_index) par(mfrow=c(2,1)) traceplot(as.mcmc(post1b$mu)) traceplot(as.mcmc(post1b$mu[thin_index])) autocorr.plot(as.mcmc(post1b$mu[thin_index])) effectiveSize(post1b$mu[thin_index]) length(thin_index) ## effective sample size in the trimmed out chain == actual sample size ## because the the values are approximately uncorrelated # effective sample size in MC. # 1. how many independent samples you'd need to get the same info # 2. length of chain you'd have left over if you removed iterations # or thinned the chain until you got rid of the autocorrelation. effectiveSize(as.mcmc(post0$mu)) # effective sample size of nearly 1000, out of 4000. raftery.diag(as.mcmc(post0$mu)) ?raftery.diag
/lesson_06.R
no_license
troym91/Bayesian-Statistics
R
false
false
2,011
r
source("Metropolis Hastings - Random walk example.R", echo=F) getwd() setwd("C:/Users/Jaehwan M/Dropbox/Bayesian Statistics") ## example of good traceplot set.seed(61) post0 = mh(n=n, ybar=ybar, n_iter=4e3, mu_init=0, cand_sd=0.9) library("coda") traceplot(as.mcmc(post0$mu[-c(1:500)])) start_indx = 1 nsim = length(post0$mu) ylim = range(post0$mu) pdf("animation/goodchain.pdf", width=9, height=4) for (t in 1:nsim){ plot(start_indx:(start_indx - 1 + t), post0$mu[1:t], type="l", ylim=ylim, xlim = c(start_indx, start_indx + nsim), xlab = "iterartion", ylab = expression(mu), main = "traceplot") Sys.sleep(0.001) } dev.off() start_indx set.seed(61) post1 = mh(n=n, ybar=ybar, n_iter=1e3, mu_init=0.0, cand_sd=0.04) coda::traceplot(as.mcmc(post1$mu[-c(1:500)])) set.seed(61) post1b = mh(n=n, ybar=ybar, n_iter=100e3, mu_init=0.0, cand_sd=0.04) coda::traceplot(as.mcmc(post1b$mu)) ##auotocorrelation. autocorr.plot(as.mcmc(post0$mu)) autocorr.diag(as.mcmc(post0$mu)) autocorr.plot(as.mcmc(post1$mu)) autocorr.diag(as.mcmc(post1$mu)) str(post1b) effectiveSize(as.mcmc(post1b$mu)) ##effective sample size autocorr.plot(as.mcmc(post1b$mu), lag.max =500) thin_interval = 400 thin_index = seq(from=400, 100e3, by =400) head(thin_index) par(mfrow=c(2,1)) traceplot(as.mcmc(post1b$mu)) traceplot(as.mcmc(post1b$mu[thin_index])) autocorr.plot(as.mcmc(post1b$mu[thin_index])) effectiveSize(post1b$mu[thin_index]) length(thin_index) ## effective sample size in the trimmed out chain == actual sample size ## because the the values are approximately uncorrelated # effective sample size in MC. # 1. how many independent samples you'd need to get the same info # 2. length of chain you'd have left over if you removed iterations # or thinned the chain until you got rid of the autocorrelation. effectiveSize(as.mcmc(post0$mu)) # effective sample size of nearly 1000, out of 4000. raftery.diag(as.mcmc(post0$mu)) ?raftery.diag
library(RSQLite) library(tidyverse) library(tm) library("XML") library("rvest") db <- dbConnect(RSQLite::SQLite(), dbname="news_saturation (research).sqlite") news <- dbGetQuery(db, "SELECT * FROM top_news") sources <- c("associated-press","al-jazeera-english","bbc-news","bloomberg", "business-insider","breitbart-news","cnbc","cnn","google-news", "independent","reuters","the-economist", "the-huffington-post", "newsweek","the-new-york-times","the-wall-street-journal", "the-washington-post","time","usa-today") # News scraping function scrape_news <- function(news_source, news_url){ page <- xml2::read_html(news_url) if(news_source=="associated-press"){ # get particular content from page content <- rvest::html_nodes(page, "p") #convert to words text <- rvest::html_text(content) # remove photo and link captions from beginning text <- text[substr(text, 0, 1) != "\n"] # remove authors/credits from end cutoff <- grep("___", text)[1] - 1 if(!is.na(cutoff)){ text <- text[1:cutoff]} # collapse vector into one string text <- paste0(text, collapse = " | ") # remove city from beginning of article text <- gsub(".*(AP)) ", "", text)} text } # ---------------- # Associated Press # ---------------- # get all articles in vector ap <- news$articles.url[news$source=="associated-press"] text_col <- character() for(x in ap){ text_col <- c(text_col, scrape_news("associated-press", x))} ap_text <- data.frame("articles.url"=ap, "text"=text_col) ap_text %>% dbWriteTable(conn = db, name = "ap_text") dbDisconnect(db)
/news_scraper.R
no_license
rampje/News-Saturation
R
false
false
1,683
r
library(RSQLite) library(tidyverse) library(tm) library("XML") library("rvest") db <- dbConnect(RSQLite::SQLite(), dbname="news_saturation (research).sqlite") news <- dbGetQuery(db, "SELECT * FROM top_news") sources <- c("associated-press","al-jazeera-english","bbc-news","bloomberg", "business-insider","breitbart-news","cnbc","cnn","google-news", "independent","reuters","the-economist", "the-huffington-post", "newsweek","the-new-york-times","the-wall-street-journal", "the-washington-post","time","usa-today") # News scraping function scrape_news <- function(news_source, news_url){ page <- xml2::read_html(news_url) if(news_source=="associated-press"){ # get particular content from page content <- rvest::html_nodes(page, "p") #convert to words text <- rvest::html_text(content) # remove photo and link captions from beginning text <- text[substr(text, 0, 1) != "\n"] # remove authors/credits from end cutoff <- grep("___", text)[1] - 1 if(!is.na(cutoff)){ text <- text[1:cutoff]} # collapse vector into one string text <- paste0(text, collapse = " | ") # remove city from beginning of article text <- gsub(".*(AP)) ", "", text)} text } # ---------------- # Associated Press # ---------------- # get all articles in vector ap <- news$articles.url[news$source=="associated-press"] text_col <- character() for(x in ap){ text_col <- c(text_col, scrape_news("associated-press", x))} ap_text <- data.frame("articles.url"=ap, "text"=text_col) ap_text %>% dbWriteTable(conn = db, name = "ap_text") dbDisconnect(db)
################################################################################ # Utilities for extracting features from the original data ################################################################################ #' @title Get prefixes up to a character in a vector of strings #' @importFrom stringr str_extract #' @export #' @examples #' get_prefix("this_is_a&test", "&") get_prefix <- function(x, stop_char) { str_extract(x, paste0("[^", stop_char, "]+")) } #' @title Get features associated with a POSIXct date #' @importFrom data.table data.table month year yday mday hour #' @export #' @examples #' Sys.Date() %>% #' as.POSIXct() %>% #' get_date_features() get_date_features <- function(x) { stopifnot("POSIXt" %in% class(x)) data.table(year = year(x), month = month(x), day_of_year = yday(x), day_of_month = mday(x), day_of_week = weekdays(x), hour = hour(x)) }
/R/featurizing.R
no_license
krisrs1128/cleanUtils
R
false
false
915
r
################################################################################ # Utilities for extracting features from the original data ################################################################################ #' @title Get prefixes up to a character in a vector of strings #' @importFrom stringr str_extract #' @export #' @examples #' get_prefix("this_is_a&test", "&") get_prefix <- function(x, stop_char) { str_extract(x, paste0("[^", stop_char, "]+")) } #' @title Get features associated with a POSIXct date #' @importFrom data.table data.table month year yday mday hour #' @export #' @examples #' Sys.Date() %>% #' as.POSIXct() %>% #' get_date_features() get_date_features <- function(x) { stopifnot("POSIXt" %in% class(x)) data.table(year = year(x), month = month(x), day_of_year = yday(x), day_of_month = mday(x), day_of_week = weekdays(x), hour = hour(x)) }
\name{sw} \alias{sw} \docType{data} \title{A dataset of grids for producing spatial predictions of seabed mud content in the southwest Australia Exclusive Economic Zone} \description{This dataset contains 500703 rows of 2 variables including longitude (long), latitude (lat).} \usage{data("sw")} \format{ A data frame with 500703 rows on the following 2 variables. \describe{ \item{\code{long}}{a numeric vector, decimal degree} \item{\code{lat}}{a numeric vector, decimal degree} } } \details{For details, please check the source.} \source{Li, J., Potter, A., Huang, Z., Daniell, J.J., Heap, A., 2010. Predicting Seabed Mud Content across the Australian Margin: Comparison of Statistical and Mathematical Techniques Using a Simulation Experiment. Geoscience Australia, 2010/11, 146pp.} \keyword{datasets}
/man/sw.Rd
no_license
cran/spm
R
false
false
837
rd
\name{sw} \alias{sw} \docType{data} \title{A dataset of grids for producing spatial predictions of seabed mud content in the southwest Australia Exclusive Economic Zone} \description{This dataset contains 500703 rows of 2 variables including longitude (long), latitude (lat).} \usage{data("sw")} \format{ A data frame with 500703 rows on the following 2 variables. \describe{ \item{\code{long}}{a numeric vector, decimal degree} \item{\code{lat}}{a numeric vector, decimal degree} } } \details{For details, please check the source.} \source{Li, J., Potter, A., Huang, Z., Daniell, J.J., Heap, A., 2010. Predicting Seabed Mud Content across the Australian Margin: Comparison of Statistical and Mathematical Techniques Using a Simulation Experiment. Geoscience Australia, 2010/11, 146pp.} \keyword{datasets}
#load gwas results df<-read.table('/data/g***/z***/hihost_gwas/manh_plot/pooled/sig.txt',header = T,stringsAsFactors = F) #only keeps pheno with genome-wide sig gwsig<-aggregate(df$P,list(df$phecode),function(x) min(x,na.rm = T)) gwsig<-gwsig[gwsig[,2]<5e-8,] df<-df[which(df$phecode %in% gwsig[,1]),] df<-df[!is.na(df$P),] df$phecode=as.numeric(sub('X','',df$phecode)) df$logp=log(df$P,10)*-1 #phecode annotation anno<-read.csv('/data/c***/z***/anno/phecode/phecode_icd9_rolled.csv',header = T,stringsAsFactors = F) anno$phecode=anno$PheCode anno<-anno[!duplicated(anno$PheCode),] df<-merge(anno[,c('phecode','Phenotype')],df,by='phecode') df[which(df$Phenotype=='Human immunodeficiency virus [HIV] disease'),'Phenotype']<-'HIV disease' #---generate_new_pos--- df$pos<-df$BP df$chr=df$CHR chr_box<-as.data.frame(matrix(data=NA,nrow=22,ncol=3)) chr_df<-df[df$chr==1,] chr_box[1,1]<-max(chr_df$pos) chr_box[1,2]<-0 chr_box[1,3]<-chr_box[1,1]/2 for (i in 2:22){ chr_df<-df[df$chr==i,] chr_box[i,1]<-max(chr_df$pos) chr_box[i,2]<-max(chr_df$pos)+chr_box[i-1,2] df[which(df$chr==i),'pos']<-df[which(df$chr==i),'pos']+chr_box[i,2] chr_box[i,3]<-chr_box[i,2]+chr_box[i,1]/2 } #--------------------- library(RColorBrewer) library(ggplot2) library(ggrepel) #color color_panel<-c(brewer.pal(9, "Set1")[1:9],brewer.pal(8, "Dark2")[c(1,3,4,6)]) phecode_char<-as.character(unique(df$Phenotype)) df$Phenotype<-as.factor(df$Phenotype) #make a copy #for testing df_copy=df #--------- df<-df[df$P<1e-4,] #label df$label<-ifelse((df$P %in% gwsig[,2]),df$SNP,NA) df_labeled<-df[!is.na(df$label),] df_labeled$label<-ifelse(duplicated(df_labeled$phecode),NA,df_labeled$label) df_unlabeled<-df[is.na(df$label),] df_unlabeled<-df_unlabeled[sample(seq(1,nrow(df_unlabeled)),nrow(df_unlabeled),replace = F),] df<-rbind(df_unlabeled,df_labeled) #write.table(df,'/data/g***/z***/hihost_gwas/manh_plot/pooled/joint_13traits_1e-4.txt',quote = F,sep='\t',row.names = F) #plot png(paste0('/data/g***/z***/hihost_gwas/manh_plot/pooled/joint_13traits_1e-4.png'),width = 2300,height = 1600,res=300) set.seed(1) ggplot(df, aes(x=pos, y=logp,label=label)) + scale_x_continuous(breaks=chr_box$V3, labels = c(as.character(seq(1,15)),' ','17','','19','','21','')) + #x axis scale_y_continuous(breaks=c(1,2,3,5,10,20,30),trans='log10') + #x axis geom_point(data = df, shape=21, color = 'black', size=1.75,aes(x = pos, y = logp,fill=Phenotype)) + #points scale_fill_manual(breaks=phecode_char, values=color_panel)+ labs(x = "Chromosome", y = "-log(P)", title = "") + theme_bw() + #rm background theme(panel.grid =element_blank()) + #rm grids theme(legend.position= "top",legend.text=element_text(size=9)) + #theme(legend.position= "right",legend.title = element_blank()) + #set legend labs(fill = "ID phenotypes")+ #set legend title geom_hline(yintercept=log(5e-8,10)*-1, linetype="dashed", color = "black")+ geom_vline(xintercept=chr_box$V2[-1], linetype="dashed", color = "ivory2")+ ggtitle('') + guides(fill=guide_legend(nrow=5,byrow=TRUE,keyheight=0.75,title=''))+ geom_label_repel( #non overlapped labels size=2.5, fill=rgb(255, 255, 255, 200, maxColorValue=255), direction='y', #only work on Y-axis nudge_x=2e8, #shift to the right segment.alpha = 0.5, #transparent of segment min.segment.length = 0.5, fontface='italic' ) dev.off()
/src/BioVU_EA_23k_ID_GWAS_Manhattan_plot.r
permissive
gamazonlab/InfectiousDiseaseResource
R
false
false
3,524
r
#load gwas results df<-read.table('/data/g***/z***/hihost_gwas/manh_plot/pooled/sig.txt',header = T,stringsAsFactors = F) #only keeps pheno with genome-wide sig gwsig<-aggregate(df$P,list(df$phecode),function(x) min(x,na.rm = T)) gwsig<-gwsig[gwsig[,2]<5e-8,] df<-df[which(df$phecode %in% gwsig[,1]),] df<-df[!is.na(df$P),] df$phecode=as.numeric(sub('X','',df$phecode)) df$logp=log(df$P,10)*-1 #phecode annotation anno<-read.csv('/data/c***/z***/anno/phecode/phecode_icd9_rolled.csv',header = T,stringsAsFactors = F) anno$phecode=anno$PheCode anno<-anno[!duplicated(anno$PheCode),] df<-merge(anno[,c('phecode','Phenotype')],df,by='phecode') df[which(df$Phenotype=='Human immunodeficiency virus [HIV] disease'),'Phenotype']<-'HIV disease' #---generate_new_pos--- df$pos<-df$BP df$chr=df$CHR chr_box<-as.data.frame(matrix(data=NA,nrow=22,ncol=3)) chr_df<-df[df$chr==1,] chr_box[1,1]<-max(chr_df$pos) chr_box[1,2]<-0 chr_box[1,3]<-chr_box[1,1]/2 for (i in 2:22){ chr_df<-df[df$chr==i,] chr_box[i,1]<-max(chr_df$pos) chr_box[i,2]<-max(chr_df$pos)+chr_box[i-1,2] df[which(df$chr==i),'pos']<-df[which(df$chr==i),'pos']+chr_box[i,2] chr_box[i,3]<-chr_box[i,2]+chr_box[i,1]/2 } #--------------------- library(RColorBrewer) library(ggplot2) library(ggrepel) #color color_panel<-c(brewer.pal(9, "Set1")[1:9],brewer.pal(8, "Dark2")[c(1,3,4,6)]) phecode_char<-as.character(unique(df$Phenotype)) df$Phenotype<-as.factor(df$Phenotype) #make a copy #for testing df_copy=df #--------- df<-df[df$P<1e-4,] #label df$label<-ifelse((df$P %in% gwsig[,2]),df$SNP,NA) df_labeled<-df[!is.na(df$label),] df_labeled$label<-ifelse(duplicated(df_labeled$phecode),NA,df_labeled$label) df_unlabeled<-df[is.na(df$label),] df_unlabeled<-df_unlabeled[sample(seq(1,nrow(df_unlabeled)),nrow(df_unlabeled),replace = F),] df<-rbind(df_unlabeled,df_labeled) #write.table(df,'/data/g***/z***/hihost_gwas/manh_plot/pooled/joint_13traits_1e-4.txt',quote = F,sep='\t',row.names = F) #plot png(paste0('/data/g***/z***/hihost_gwas/manh_plot/pooled/joint_13traits_1e-4.png'),width = 2300,height = 1600,res=300) set.seed(1) ggplot(df, aes(x=pos, y=logp,label=label)) + scale_x_continuous(breaks=chr_box$V3, labels = c(as.character(seq(1,15)),' ','17','','19','','21','')) + #x axis scale_y_continuous(breaks=c(1,2,3,5,10,20,30),trans='log10') + #x axis geom_point(data = df, shape=21, color = 'black', size=1.75,aes(x = pos, y = logp,fill=Phenotype)) + #points scale_fill_manual(breaks=phecode_char, values=color_panel)+ labs(x = "Chromosome", y = "-log(P)", title = "") + theme_bw() + #rm background theme(panel.grid =element_blank()) + #rm grids theme(legend.position= "top",legend.text=element_text(size=9)) + #theme(legend.position= "right",legend.title = element_blank()) + #set legend labs(fill = "ID phenotypes")+ #set legend title geom_hline(yintercept=log(5e-8,10)*-1, linetype="dashed", color = "black")+ geom_vline(xintercept=chr_box$V2[-1], linetype="dashed", color = "ivory2")+ ggtitle('') + guides(fill=guide_legend(nrow=5,byrow=TRUE,keyheight=0.75,title=''))+ geom_label_repel( #non overlapped labels size=2.5, fill=rgb(255, 255, 255, 200, maxColorValue=255), direction='y', #only work on Y-axis nudge_x=2e8, #shift to the right segment.alpha = 0.5, #transparent of segment min.segment.length = 0.5, fontface='italic' ) dev.off()
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/data.R \docType{data} \name{Illumina_to_REFSEQ_MRNA} \alias{Illumina_to_REFSEQ_MRNA} \title{Illumina to REFSEQ_MRNA tabel GEOD.adf <- fread('Illumina HumanHT-12_V4.0_A-GEOD-13475.adf.txt') setnames(GEOD.adf,'Reporter Database Entry [genbank]','genbank') setnames(GEOD.adf,'Reporter Name','illumina_humanht_12_v4') REFSEQ_MRNA <- sapply(GEOD.adf[['genbank']], function(x) stringr::str_split(x,'\\.') ) GEOD.adf$REFSEQ_MRNA <- sapply(REFSEQ_MRNA, "[[", 1 ) setkey(GEOD.adf,'illumina_humanht_12_v4') Illumina_to_REFSEQ_MRNA <- GEOD.adf devtools::use_data(Illumina_to_REFSEQ_MRNA) A dataset containing the} \format{A data frame with 47323 rows and 5 variables:} \usage{ Illumina_to_REFSEQ_MRNA } \description{ Illumina to REFSEQ_MRNA tabel GEOD.adf <- fread('Illumina HumanHT-12_V4.0_A-GEOD-13475.adf.txt') setnames(GEOD.adf,'Reporter Database Entry [genbank]','genbank') setnames(GEOD.adf,'Reporter Name','illumina_humanht_12_v4') REFSEQ_MRNA <- sapply(GEOD.adf[['genbank']], function(x) stringr::str_split(x,'\\.') ) GEOD.adf$REFSEQ_MRNA <- sapply(REFSEQ_MRNA, "[[", 1 ) setkey(GEOD.adf,'illumina_humanht_12_v4') Illumina_to_REFSEQ_MRNA <- GEOD.adf devtools::use_data(Illumina_to_REFSEQ_MRNA) A dataset containing the } \keyword{datasets}
/man/Illumina_to_REFSEQ_MRNA.Rd
no_license
GrosseLab/BGSC
R
false
true
1,319
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/data.R \docType{data} \name{Illumina_to_REFSEQ_MRNA} \alias{Illumina_to_REFSEQ_MRNA} \title{Illumina to REFSEQ_MRNA tabel GEOD.adf <- fread('Illumina HumanHT-12_V4.0_A-GEOD-13475.adf.txt') setnames(GEOD.adf,'Reporter Database Entry [genbank]','genbank') setnames(GEOD.adf,'Reporter Name','illumina_humanht_12_v4') REFSEQ_MRNA <- sapply(GEOD.adf[['genbank']], function(x) stringr::str_split(x,'\\.') ) GEOD.adf$REFSEQ_MRNA <- sapply(REFSEQ_MRNA, "[[", 1 ) setkey(GEOD.adf,'illumina_humanht_12_v4') Illumina_to_REFSEQ_MRNA <- GEOD.adf devtools::use_data(Illumina_to_REFSEQ_MRNA) A dataset containing the} \format{A data frame with 47323 rows and 5 variables:} \usage{ Illumina_to_REFSEQ_MRNA } \description{ Illumina to REFSEQ_MRNA tabel GEOD.adf <- fread('Illumina HumanHT-12_V4.0_A-GEOD-13475.adf.txt') setnames(GEOD.adf,'Reporter Database Entry [genbank]','genbank') setnames(GEOD.adf,'Reporter Name','illumina_humanht_12_v4') REFSEQ_MRNA <- sapply(GEOD.adf[['genbank']], function(x) stringr::str_split(x,'\\.') ) GEOD.adf$REFSEQ_MRNA <- sapply(REFSEQ_MRNA, "[[", 1 ) setkey(GEOD.adf,'illumina_humanht_12_v4') Illumina_to_REFSEQ_MRNA <- GEOD.adf devtools::use_data(Illumina_to_REFSEQ_MRNA) A dataset containing the } \keyword{datasets}
library(tidyverse) library(data.table) library(writexl) library(segmented) library(MASS) library(caret) rm(list=ls()) set.seed(123) ###--- Set parameters # setting the working directory and select the datasets #### Settings sspp= "MUT" gsa="17" #assign gsa code to input file name (i.e. 9_11_ for two GSAs) sel="total"## alternatives: "total"; "sizethreshold" input_dir="~/CNR/MSFD/github/release" variables="year" #c("year", "month") indicator="L95" # alternatives= L95 ; Pmega stand="N" # Y if you use data from gam prediction, "N" if you are using data from LFD. stand_mon=6 # If line 23 is "Y", write here the month on which the data are predicted. pop_sym="Y" ### get WD ##### dir_t=paste0(input_dir, "/","GSA",gsa,"/", sspp,"/", sep="") dir.create(paste0(dir_t, "/seg_reg")) #for(i in 1:length(species_list)){ #species=species_list[i] ### Run #### species=sspp pop=ifelse(sel=="total", "_whole_", "_mat_") if(stand=="Y"){ data <- read_csv(paste0(dir_t,"/gam/",indicator,pop, sspp,"_standardized_", stand_mon,".csv"))%>% dplyr::select("Indicator"= stand, variables) }else{ data <- read_csv(paste0(dir_t,"/",sspp,"_GSA",gsa,"_Indicators_", sel,".csv"))%>% dplyr::select("Indicator"= indicator, variables) } ## Piecewise regression #### # Model selection if(length(variables)>1){ train.control <- trainControl(method = "cv", number = 3) # Set up repeated k-fold cross-validation step.model <- train(Indicator ~., data = data, method = "leapBackward", tuneGrid = data.frame(nvmax = 1:3), trControl = train.control) best_model=step.model[["finalModel"]][["xnames"]][-1] form=as.formula(paste("Indicator ~ ",paste(best_model,collapse="+"),sep = "")) lm0=glm(form, data=data) }else{ best_model="year" lm0=glm(Indicator ~ year, data=data) } summary(lm0) png(filename=paste0(dir_t, "/seg_reg", "/", species, "_GSA", gsa,indicator,pop, "_summary_model.png"),height = 20, width = 30, units = "cm", res = 600) par(mfrow=c(2,2)) plot(lm0) dev.off() # regression my.seg <- segmented(lm0, seg.Z = ~ year ) png(filename=paste0(dir_t, "/seg_reg", "/", species, "_GSA", gsa,indicator, pop, "_segmented.png"),height = 20, width = 30, units = "cm", res = 600) par(mfrow=c(1,1)) plot(my.seg) dev.off() summary(my.seg) ###### Test bp=my.seg$psi AP=data%>% dplyr::filter(year > round(as.numeric(bp[2]))) if(is.na(pop_sym)==FALSE){ #### Get ref points BRP= read_csv(paste0(dir_t,"pop_sim/",indicator,"RP.csv"))%>% dplyr::select("Indicator"= "val", F_ref)%>% dplyr::mutate(Indicator=Indicator*10) periods=tibble(Indicator=c(BRP$Indicator, AP$Indicator), dat=c(rep("BRP", nrow(BRP)), rep("AP", nrow(AP)))) ## Identify larger variance for set order in var.test large_period=periods%>% dplyr::group_by(dat)%>% dplyr::summarise(var=var(Indicator))%>% arrange(desc(var)) var1=periods%>%dplyr::filter(dat==large_period[[1,1]]) var2=periods%>%dplyr::filter(dat!=large_period[[1,1]]) variance_sign<-var.test(var1$Indicator, var2$Indicator, alternative="greater")[["p.value"]] if(variance_sign >= 0.05){ ttest<-t.test(BRP$Indicator , AP$Indicator, var.equal = TRUE) }else{ ttest<-t.test(BRP$Indicator , AP$Indicator, var.equal = FALSE) } meanBRP=mean(BRP$Indicator) meanAP=mean(AP$Indicator) regr.p=ttest$p.value AP$Val=AP$Indicator BRP$Val=BRP$Indicator }else{ BRP=data%>% dplyr::filter(year <= round(as.numeric(bp[2]))) periods=tibble(Indicator=c(BRP$Indicator, AP$Indicator), dat=c(rep("BRP", nrow(BRP)), rep("AP", nrow(AP)))) ## Identify larger variance for set order in var.test large_period=periods%>% dplyr::group_by(dat)%>% dplyr::summarise(var=var(Indicator))%>% arrange(desc(var)) var1=periods%>%dplyr::filter(dat==large_period[[1,1]]) var2=periods%>%dplyr::filter(dat!=large_period[[1,1]]) var(RefP$val) variance_sign<-var.test(var1$Indicator, var2$Indicator, alternative="greater")[["p.value"]] if(variance_sign >= 0.05){ ttest<-t.test(BRP$Indicator , AP$Indicator, var.equal = TRUE) }else{ ttest<-t.test(BRP$Indicator , AP$Indicator, var.equal = FALSE) } meanBRP=mean(BRP$Indicator) meanAP=mean(AP$Indicator) regr.p=ttest$p.value AP$Val=AP$Indicator BRP$Val=BRP$Indicator } ## Spearman analysis #### sprmn=cor.test( ~ Indicator + year, data=data, method = "spearman", continuity = FALSE, conf.level = 0.95) sprmn.p= sprmn$p.value rho= sprmn$estimate[[1]] #### ----------- Trend analysis: linear regression on the last five years APTA<-data%>%dplyr::filter(year > (max(year)-6)) ## Last five years reg<-summary(lm(APTA$Indicator ~ APTA$year)) if(reg[["coefficients"]][2,4] >= 0.05){ print("No significant trend detected in TA") TA=0 }else if(reg[["coefficients"]][2,4] <= 0.05 & reg[["coefficients"]][2,1]>0 ){ print("Significant increasing trend detected in TA") TA=1 }else if (reg[["coefficients"]][2,4] <= 0.05 & reg[["coefficients"]][2,1]<0 ){ print("Significant decreasing trend detected in TA") TA=-1 } #### Result table #### dev_expl=1-(summary(lm0)[["deviance"]]/summary(lm0)[["null.deviance"]]) res=tibble(species=paste(species, gsa, sep="_"),formula= paste(best_model,collapse="+"), bp.year=bp[2], slope.1= slope(my.seg)$year[1,1], slope.2=slope(my.seg)$year[2,1], bp.fitted=dev_expl,mean.BRP= meanBRP, meanAP= meanAP, bp.test= regr.p , spearman.p= sprmn.p, rho.spearman=rho, trend_sign =reg[["coefficients"]][2,4], trend = reg[["coefficients"]][2,1]) ### Final plot #### newdat=bind_cols(data, fit=my.seg$fitted.values) dataplot=bind_rows(newdat%>%dplyr::group_by(year)%>%dplyr::summarize(Indicator=mean(Indicator))%>%dplyr::mutate(source="observed"), newdat%>%dplyr::group_by(year)%>%dplyr::summarize(Indicator=mean(fit))%>%dplyr::mutate(source="fitted")) if(nrow(dataplot%>%distinct(year)) > 20 ){ spaz=5 }else{ spaz=2 } baseplot=function(dat, clr, lbl){ ggplot(data=dat %>%dplyr::filter(source=="observed"))+ geom_point(aes(x=year, y=Indicator, color=source), size=3)+ geom_vline(aes(xintercept=bp[2]), linetype="dashed")+ geom_rect(aes(xmin = min(AP$year), xmax = max(AP$year), ymin =mean(AP$Val)-sd(AP$Val), ymax = mean(AP$Val)+sd(AP$Val)),alpha = 0.005, fill = clr)+ylab(indicator)+ geom_segment(x=min(AP$year),xend=max(AP$year),y=mean(AP$Val), yend=mean(AP$Val), size=1, linetype = "dashed", color=clr)+ #geom_rect(aes(xmin = min(BRP$year), xmax = max(BRP$year), ymin =mean(BRP$Val)-sd(BRP$Val), ymax = mean(BRP$Val)+sd(BRP$Val)),alpha = 0.005, fill = "blue")+ #geom_segment(x=min(BRP$year),xend=max(BRP$year),y=mean(BRP$Val), yend=mean(BRP$Val), size=1, linetype = "dashed", color="blue")+ geom_segment(x=min(BRP$year),xend=max(BRP$year), y=BRP[as.numeric(str_remove(BRP$F_ref, "F"))== min(as.numeric(str_remove(BRP$F_ref, "F"))),][[1]], yend=BRP[as.numeric(str_remove(BRP$F_ref, "F"))==min(as.numeric(str_remove(BRP$F_ref, "F"))),][[1]], size=1, linetype = "dashed", color="black")+ annotate("text", x=min(dat$year+spaz), y=BRP[as.numeric(str_remove(BRP$F_ref, "F"))== min(as.numeric(str_remove(BRP$F_ref, "F"))),][[1]]+1.8, label= expression(paste("F" ["0"])))+ geom_segment(x=min(BRP$year),xend=max(BRP$year), y=BRP[as.numeric(str_remove(BRP$F_ref, "F"))== max(as.numeric(str_remove(BRP$F_ref, "F"))),][[1]], yend=BRP[as.numeric(str_remove(BRP$F_ref, "F"))==max(as.numeric(str_remove(BRP$F_ref, "F"))),][[1]], size=1, linetype = "dashed", color="black")+ annotate("text", x=min(dat$year+spaz), y=BRP[as.numeric(str_remove(BRP$F_ref, "F"))== max(as.numeric(str_remove(BRP$F_ref, "F"))),][[1]]-1.8, label= expression(paste("F" ["01"])))+ theme_bw()+scale_x_continuous(breaks=seq(min(data$year),max(data$year),spaz))+ theme_classic()+ annotate("text", x=max(dat$year-spaz), y=max(dat$Indicator)-((max(dat$Indicator)-min(dat$Indicator))*0.02), label= lbl)+ ggtitle(paste(sspp,"GSA",gsa,indicator,"segmented regression on", pop, "population"))+ geom_smooth(data=dataplot%>%dplyr::filter(source=="fitted"),aes(year, Indicator, color=source), span = 0.5 ,stat="identity") } if(regr.p >= 0.05){ pl=baseplot(dataplot, "blue", "BPA result: AP = Ref Point") }else if(regr.p < 0.05){ if(meanAP > meanBRP){ pl=baseplot(dataplot, "green", "BPA result: AP > Ref Point") }else{ pl=baseplot(dataplot, "red", "BPA result: AP < Ref Point") } } if(TA==0){ pl+ geom_smooth(method = "lm", se = TRUE, color="black", linetype="dashed", size=1.5, data=APTA,aes(year, Indicator))+ annotate("text", x=max(dataplot$year-spaz), y=max(dataplot$Indicator)-((max(dataplot$Indicator)-min(dataplot$Indicator))*0.06), label= "No sign trend in recent years") }else if(TA==1){ pl+ geom_smooth(method = "lm", se = TRUE, color="green", linetype="dashed", size=1.5, data=APTA,aes(year, Indicator))+ annotate("text", x=max(dataplot$year-spaz), y=max(dataplot$Indicator)-((max(dataplot$Indicator)-min(dataplot$Indicator))*0.06), label= "Significant increasing trend detected in recent years") }else if(TA==-1){ pl+ geom_smooth(method = "lm", se = TRUE, color="red", linetype="dashed", size=1.5, data=APTA,aes(year, Indicator))+ annotate("text", x=max(dataplot$year-spaz), y=max(dataplot$Indicator)-((max(dataplot$Indicator)-min(dataplot$Indicator))*0.06), label= "Significant decreasing trend detected in recent years") } ### Save #### writexl::write_xlsx(res,paste0(dir_t, "/seg_reg", "/", species, "_GSA", gsa,indicator,pop, "_results.xlsx")) saveRDS(list(lm0, my.seg), file=paste0(dir_t, "/seg_reg", "/", species, "_GSA", gsa,indicator,pop, "_models.rds")) ggsave(file=paste0(dir_t, "/seg_reg", "/", species, "_GSA", gsa,indicator,pop, "_segmented_fit_vs_obs.png"), width=12) #}
/step_3_piecewise_and_spearman_sim.R
no_license
EnricoNArmelloni/MSFD
R
false
false
10,680
r
library(tidyverse) library(data.table) library(writexl) library(segmented) library(MASS) library(caret) rm(list=ls()) set.seed(123) ###--- Set parameters # setting the working directory and select the datasets #### Settings sspp= "MUT" gsa="17" #assign gsa code to input file name (i.e. 9_11_ for two GSAs) sel="total"## alternatives: "total"; "sizethreshold" input_dir="~/CNR/MSFD/github/release" variables="year" #c("year", "month") indicator="L95" # alternatives= L95 ; Pmega stand="N" # Y if you use data from gam prediction, "N" if you are using data from LFD. stand_mon=6 # If line 23 is "Y", write here the month on which the data are predicted. pop_sym="Y" ### get WD ##### dir_t=paste0(input_dir, "/","GSA",gsa,"/", sspp,"/", sep="") dir.create(paste0(dir_t, "/seg_reg")) #for(i in 1:length(species_list)){ #species=species_list[i] ### Run #### species=sspp pop=ifelse(sel=="total", "_whole_", "_mat_") if(stand=="Y"){ data <- read_csv(paste0(dir_t,"/gam/",indicator,pop, sspp,"_standardized_", stand_mon,".csv"))%>% dplyr::select("Indicator"= stand, variables) }else{ data <- read_csv(paste0(dir_t,"/",sspp,"_GSA",gsa,"_Indicators_", sel,".csv"))%>% dplyr::select("Indicator"= indicator, variables) } ## Piecewise regression #### # Model selection if(length(variables)>1){ train.control <- trainControl(method = "cv", number = 3) # Set up repeated k-fold cross-validation step.model <- train(Indicator ~., data = data, method = "leapBackward", tuneGrid = data.frame(nvmax = 1:3), trControl = train.control) best_model=step.model[["finalModel"]][["xnames"]][-1] form=as.formula(paste("Indicator ~ ",paste(best_model,collapse="+"),sep = "")) lm0=glm(form, data=data) }else{ best_model="year" lm0=glm(Indicator ~ year, data=data) } summary(lm0) png(filename=paste0(dir_t, "/seg_reg", "/", species, "_GSA", gsa,indicator,pop, "_summary_model.png"),height = 20, width = 30, units = "cm", res = 600) par(mfrow=c(2,2)) plot(lm0) dev.off() # regression my.seg <- segmented(lm0, seg.Z = ~ year ) png(filename=paste0(dir_t, "/seg_reg", "/", species, "_GSA", gsa,indicator, pop, "_segmented.png"),height = 20, width = 30, units = "cm", res = 600) par(mfrow=c(1,1)) plot(my.seg) dev.off() summary(my.seg) ###### Test bp=my.seg$psi AP=data%>% dplyr::filter(year > round(as.numeric(bp[2]))) if(is.na(pop_sym)==FALSE){ #### Get ref points BRP= read_csv(paste0(dir_t,"pop_sim/",indicator,"RP.csv"))%>% dplyr::select("Indicator"= "val", F_ref)%>% dplyr::mutate(Indicator=Indicator*10) periods=tibble(Indicator=c(BRP$Indicator, AP$Indicator), dat=c(rep("BRP", nrow(BRP)), rep("AP", nrow(AP)))) ## Identify larger variance for set order in var.test large_period=periods%>% dplyr::group_by(dat)%>% dplyr::summarise(var=var(Indicator))%>% arrange(desc(var)) var1=periods%>%dplyr::filter(dat==large_period[[1,1]]) var2=periods%>%dplyr::filter(dat!=large_period[[1,1]]) variance_sign<-var.test(var1$Indicator, var2$Indicator, alternative="greater")[["p.value"]] if(variance_sign >= 0.05){ ttest<-t.test(BRP$Indicator , AP$Indicator, var.equal = TRUE) }else{ ttest<-t.test(BRP$Indicator , AP$Indicator, var.equal = FALSE) } meanBRP=mean(BRP$Indicator) meanAP=mean(AP$Indicator) regr.p=ttest$p.value AP$Val=AP$Indicator BRP$Val=BRP$Indicator }else{ BRP=data%>% dplyr::filter(year <= round(as.numeric(bp[2]))) periods=tibble(Indicator=c(BRP$Indicator, AP$Indicator), dat=c(rep("BRP", nrow(BRP)), rep("AP", nrow(AP)))) ## Identify larger variance for set order in var.test large_period=periods%>% dplyr::group_by(dat)%>% dplyr::summarise(var=var(Indicator))%>% arrange(desc(var)) var1=periods%>%dplyr::filter(dat==large_period[[1,1]]) var2=periods%>%dplyr::filter(dat!=large_period[[1,1]]) var(RefP$val) variance_sign<-var.test(var1$Indicator, var2$Indicator, alternative="greater")[["p.value"]] if(variance_sign >= 0.05){ ttest<-t.test(BRP$Indicator , AP$Indicator, var.equal = TRUE) }else{ ttest<-t.test(BRP$Indicator , AP$Indicator, var.equal = FALSE) } meanBRP=mean(BRP$Indicator) meanAP=mean(AP$Indicator) regr.p=ttest$p.value AP$Val=AP$Indicator BRP$Val=BRP$Indicator } ## Spearman analysis #### sprmn=cor.test( ~ Indicator + year, data=data, method = "spearman", continuity = FALSE, conf.level = 0.95) sprmn.p= sprmn$p.value rho= sprmn$estimate[[1]] #### ----------- Trend analysis: linear regression on the last five years APTA<-data%>%dplyr::filter(year > (max(year)-6)) ## Last five years reg<-summary(lm(APTA$Indicator ~ APTA$year)) if(reg[["coefficients"]][2,4] >= 0.05){ print("No significant trend detected in TA") TA=0 }else if(reg[["coefficients"]][2,4] <= 0.05 & reg[["coefficients"]][2,1]>0 ){ print("Significant increasing trend detected in TA") TA=1 }else if (reg[["coefficients"]][2,4] <= 0.05 & reg[["coefficients"]][2,1]<0 ){ print("Significant decreasing trend detected in TA") TA=-1 } #### Result table #### dev_expl=1-(summary(lm0)[["deviance"]]/summary(lm0)[["null.deviance"]]) res=tibble(species=paste(species, gsa, sep="_"),formula= paste(best_model,collapse="+"), bp.year=bp[2], slope.1= slope(my.seg)$year[1,1], slope.2=slope(my.seg)$year[2,1], bp.fitted=dev_expl,mean.BRP= meanBRP, meanAP= meanAP, bp.test= regr.p , spearman.p= sprmn.p, rho.spearman=rho, trend_sign =reg[["coefficients"]][2,4], trend = reg[["coefficients"]][2,1]) ### Final plot #### newdat=bind_cols(data, fit=my.seg$fitted.values) dataplot=bind_rows(newdat%>%dplyr::group_by(year)%>%dplyr::summarize(Indicator=mean(Indicator))%>%dplyr::mutate(source="observed"), newdat%>%dplyr::group_by(year)%>%dplyr::summarize(Indicator=mean(fit))%>%dplyr::mutate(source="fitted")) if(nrow(dataplot%>%distinct(year)) > 20 ){ spaz=5 }else{ spaz=2 } baseplot=function(dat, clr, lbl){ ggplot(data=dat %>%dplyr::filter(source=="observed"))+ geom_point(aes(x=year, y=Indicator, color=source), size=3)+ geom_vline(aes(xintercept=bp[2]), linetype="dashed")+ geom_rect(aes(xmin = min(AP$year), xmax = max(AP$year), ymin =mean(AP$Val)-sd(AP$Val), ymax = mean(AP$Val)+sd(AP$Val)),alpha = 0.005, fill = clr)+ylab(indicator)+ geom_segment(x=min(AP$year),xend=max(AP$year),y=mean(AP$Val), yend=mean(AP$Val), size=1, linetype = "dashed", color=clr)+ #geom_rect(aes(xmin = min(BRP$year), xmax = max(BRP$year), ymin =mean(BRP$Val)-sd(BRP$Val), ymax = mean(BRP$Val)+sd(BRP$Val)),alpha = 0.005, fill = "blue")+ #geom_segment(x=min(BRP$year),xend=max(BRP$year),y=mean(BRP$Val), yend=mean(BRP$Val), size=1, linetype = "dashed", color="blue")+ geom_segment(x=min(BRP$year),xend=max(BRP$year), y=BRP[as.numeric(str_remove(BRP$F_ref, "F"))== min(as.numeric(str_remove(BRP$F_ref, "F"))),][[1]], yend=BRP[as.numeric(str_remove(BRP$F_ref, "F"))==min(as.numeric(str_remove(BRP$F_ref, "F"))),][[1]], size=1, linetype = "dashed", color="black")+ annotate("text", x=min(dat$year+spaz), y=BRP[as.numeric(str_remove(BRP$F_ref, "F"))== min(as.numeric(str_remove(BRP$F_ref, "F"))),][[1]]+1.8, label= expression(paste("F" ["0"])))+ geom_segment(x=min(BRP$year),xend=max(BRP$year), y=BRP[as.numeric(str_remove(BRP$F_ref, "F"))== max(as.numeric(str_remove(BRP$F_ref, "F"))),][[1]], yend=BRP[as.numeric(str_remove(BRP$F_ref, "F"))==max(as.numeric(str_remove(BRP$F_ref, "F"))),][[1]], size=1, linetype = "dashed", color="black")+ annotate("text", x=min(dat$year+spaz), y=BRP[as.numeric(str_remove(BRP$F_ref, "F"))== max(as.numeric(str_remove(BRP$F_ref, "F"))),][[1]]-1.8, label= expression(paste("F" ["01"])))+ theme_bw()+scale_x_continuous(breaks=seq(min(data$year),max(data$year),spaz))+ theme_classic()+ annotate("text", x=max(dat$year-spaz), y=max(dat$Indicator)-((max(dat$Indicator)-min(dat$Indicator))*0.02), label= lbl)+ ggtitle(paste(sspp,"GSA",gsa,indicator,"segmented regression on", pop, "population"))+ geom_smooth(data=dataplot%>%dplyr::filter(source=="fitted"),aes(year, Indicator, color=source), span = 0.5 ,stat="identity") } if(regr.p >= 0.05){ pl=baseplot(dataplot, "blue", "BPA result: AP = Ref Point") }else if(regr.p < 0.05){ if(meanAP > meanBRP){ pl=baseplot(dataplot, "green", "BPA result: AP > Ref Point") }else{ pl=baseplot(dataplot, "red", "BPA result: AP < Ref Point") } } if(TA==0){ pl+ geom_smooth(method = "lm", se = TRUE, color="black", linetype="dashed", size=1.5, data=APTA,aes(year, Indicator))+ annotate("text", x=max(dataplot$year-spaz), y=max(dataplot$Indicator)-((max(dataplot$Indicator)-min(dataplot$Indicator))*0.06), label= "No sign trend in recent years") }else if(TA==1){ pl+ geom_smooth(method = "lm", se = TRUE, color="green", linetype="dashed", size=1.5, data=APTA,aes(year, Indicator))+ annotate("text", x=max(dataplot$year-spaz), y=max(dataplot$Indicator)-((max(dataplot$Indicator)-min(dataplot$Indicator))*0.06), label= "Significant increasing trend detected in recent years") }else if(TA==-1){ pl+ geom_smooth(method = "lm", se = TRUE, color="red", linetype="dashed", size=1.5, data=APTA,aes(year, Indicator))+ annotate("text", x=max(dataplot$year-spaz), y=max(dataplot$Indicator)-((max(dataplot$Indicator)-min(dataplot$Indicator))*0.06), label= "Significant decreasing trend detected in recent years") } ### Save #### writexl::write_xlsx(res,paste0(dir_t, "/seg_reg", "/", species, "_GSA", gsa,indicator,pop, "_results.xlsx")) saveRDS(list(lm0, my.seg), file=paste0(dir_t, "/seg_reg", "/", species, "_GSA", gsa,indicator,pop, "_models.rds")) ggsave(file=paste0(dir_t, "/seg_reg", "/", species, "_GSA", gsa,indicator,pop, "_segmented_fit_vs_obs.png"), width=12) #}
likelihood_vl <- function(obs, predicted, sd, min_vl=0, limit_quantification=2){ normal_vl <- which(obs > limit_quantification) unquantified_vl <- which(obs <= limit_quantification & obs > min_vl) low_vl <- which(obs <= min_vl) #liks <- dnorm(obs, predicted, sd, TRUE) liks <- numeric(length(obs)) liks[normal_vl] <- dnorm(obs[normal_vl],predicted[normal_vl],sd,TRUE) liks[unquantified_vl] <- log(pnorm(limit_quantification, predicted[unquantified_vl], sd, lower.tail=TRUE) - pnorm(min_vl, predicted[unquantified_vl], sd, lower.tail=TRUE)) liks[low_vl] <- pnorm(min_vl, predicted[low_vl], sd, lower.tail=TRUE, TRUE) #liks[normal_vl] <- dgumbel(obs[normal_vl],predicted[normal_vl],sigma=sd,TRUE) #liks[low_vl] <- pgumbel(min_vl,predicted[low_vl],sigma=sd, lower.tail=TRUE, TRUE) liks } ## Create the posterior function for model fitting create_func_indivs_multivariate_hinge<- function(parTab, dat, PRIOR_FUNC=NULL,ver="model", for_plot=FALSE){ ## We'll be creating a vector of predictions that is the same length ## as the vector of observations obs <- dat$obs ts <- dat$t par_names <- parTab$names n_indivs <- length(unique(dat$indiv)) ## Keep track of which individual we're solving the model for indiv_ids <- parTab$indiv indiv_ids_unique <- unique(indiv_ids) indiv_ids_unique <- indiv_ids_unique[indiv_ids_unique != 0] f <- function(pars){ names(pars) <- par_names ## Overall parameters viral_peaks <- pars[which(par_names == "viral_peak")] wane_pars <- pars[which(par_names == "t_wane")] t_shifts <- pars[which(par_names == "tshift")] desired_modes <- pars[which(par_names == "desired_mode")] true_0 <- pars[which(par_names == "true_0")] t_onsets <- pars[which(par_names == "incu")] predicted <- NULL pred_dat <- NULL ## Solve model for each individual for(i in indiv_ids_unique){ ## Extract this individual's parameters subset_pars <- pars[which(indiv_ids == i)] viral_peak <- subset_pars["viral_peak"] tw <- subset_pars["t_wane"] tshift <- subset_pars["tshift"] desired_mode <- subset_pars["desired_mode"] + tshift t_onset <- subset_pars["incu"] ## Observed data is shifted by incubation period if(ver == "posterior"){ obs_t <- dat[dat$i == i, "t"] + t_onset } else { obs_t <- dat[dat$i == i, "t"] } y <- rep(true_0, length(obs_t)) wane_rate <- viral_peak / (t_onset - desired_mode + tw) ## Latent period y[obs_t <= tshift] <- true_0 ## Growth period y[obs_t > tshift & obs_t <= desired_mode] <- viral_peak * (obs_t[obs_t > tshift & obs_t <= desired_mode]-tshift) / (desired_mode-tshift) ## Wane period y[obs_t > desired_mode] <- viral_peak - wane_rate * (obs_t[obs_t > desired_mode] - desired_mode) if(for_plot){ use_t <- obs_t - t_onset pred_dat <- bind_rows(pred_dat, tibble(t=use_t,y=y,i=i)) } else { predicted <- c(predicted, y) } } ## Can use same function to return model predictions or likelihoods if(ver == "model") { if(for_plot){ return(pred_dat) } else { return(predicted) } } else { ## Likelihoods for observations #lik <- likelihood(obs, predicted, pars["sd"],pars["max_titre"], pars["lod"]) lik <- likelihood_vl(obs, predicted, pars["sd"],pars["lod"], pars["limit_quantification"]) all_pars <- matrix(nrow=length(viral_peaks),ncol=2) all_pars[,1] <- viral_peaks all_pars[,2] <- wane_pars #all_pars[,3] <- desired_modes mus <- c(pars["viral_peak_mean"], pars["wane_mean"])#, pars["tp_mean"]) rhos <- c(pars["rho_viral_wane"])#,pars["rho_viral_tp"]),pars["rho_wane_tp"]) sds <- c(pars["viral_peak_sd"], pars["wane_sd"])#, pars["tp_sd"]) #R <- diag(length(rhos)) R <- diag(2) R[upper.tri(R)] <- R[lower.tri(R)]<- rhos ## Correlation prior corr_prior <- dlkjcorr(R, eta=2, TRUE) ## Sd priors sd_prior_viral <- dhcauchy(pars["viral_peak_sd"], 1, TRUE) sd_prior_wane <- dhcauchy(pars["wane_sd"], 1, TRUE) #sd_prior_tp <- dhcauchy(pars["tp_sd"], 1, TRUE) ## Kinetics parameters kinetics_prior <- sum(dmvnorm2(all_pars, mus, sds, R, log=TRUE)) #random_effects <- 0 random_effects <- kinetics_prior + sd_prior_viral + sd_prior_wane + corr_prior # + sd_prior_tp lik <- sum(lik) + random_effects #lik <- random_effects if(!is.null(PRIOR_FUNC)){ lik <- lik + PRIOR_FUNC(pars) } lik } } }
/code/viral_kinetics/functions/model_funcs_multivariate_hinge.R
permissive
cleary-lab/covid19-group-tests
R
false
false
4,767
r
likelihood_vl <- function(obs, predicted, sd, min_vl=0, limit_quantification=2){ normal_vl <- which(obs > limit_quantification) unquantified_vl <- which(obs <= limit_quantification & obs > min_vl) low_vl <- which(obs <= min_vl) #liks <- dnorm(obs, predicted, sd, TRUE) liks <- numeric(length(obs)) liks[normal_vl] <- dnorm(obs[normal_vl],predicted[normal_vl],sd,TRUE) liks[unquantified_vl] <- log(pnorm(limit_quantification, predicted[unquantified_vl], sd, lower.tail=TRUE) - pnorm(min_vl, predicted[unquantified_vl], sd, lower.tail=TRUE)) liks[low_vl] <- pnorm(min_vl, predicted[low_vl], sd, lower.tail=TRUE, TRUE) #liks[normal_vl] <- dgumbel(obs[normal_vl],predicted[normal_vl],sigma=sd,TRUE) #liks[low_vl] <- pgumbel(min_vl,predicted[low_vl],sigma=sd, lower.tail=TRUE, TRUE) liks } ## Create the posterior function for model fitting create_func_indivs_multivariate_hinge<- function(parTab, dat, PRIOR_FUNC=NULL,ver="model", for_plot=FALSE){ ## We'll be creating a vector of predictions that is the same length ## as the vector of observations obs <- dat$obs ts <- dat$t par_names <- parTab$names n_indivs <- length(unique(dat$indiv)) ## Keep track of which individual we're solving the model for indiv_ids <- parTab$indiv indiv_ids_unique <- unique(indiv_ids) indiv_ids_unique <- indiv_ids_unique[indiv_ids_unique != 0] f <- function(pars){ names(pars) <- par_names ## Overall parameters viral_peaks <- pars[which(par_names == "viral_peak")] wane_pars <- pars[which(par_names == "t_wane")] t_shifts <- pars[which(par_names == "tshift")] desired_modes <- pars[which(par_names == "desired_mode")] true_0 <- pars[which(par_names == "true_0")] t_onsets <- pars[which(par_names == "incu")] predicted <- NULL pred_dat <- NULL ## Solve model for each individual for(i in indiv_ids_unique){ ## Extract this individual's parameters subset_pars <- pars[which(indiv_ids == i)] viral_peak <- subset_pars["viral_peak"] tw <- subset_pars["t_wane"] tshift <- subset_pars["tshift"] desired_mode <- subset_pars["desired_mode"] + tshift t_onset <- subset_pars["incu"] ## Observed data is shifted by incubation period if(ver == "posterior"){ obs_t <- dat[dat$i == i, "t"] + t_onset } else { obs_t <- dat[dat$i == i, "t"] } y <- rep(true_0, length(obs_t)) wane_rate <- viral_peak / (t_onset - desired_mode + tw) ## Latent period y[obs_t <= tshift] <- true_0 ## Growth period y[obs_t > tshift & obs_t <= desired_mode] <- viral_peak * (obs_t[obs_t > tshift & obs_t <= desired_mode]-tshift) / (desired_mode-tshift) ## Wane period y[obs_t > desired_mode] <- viral_peak - wane_rate * (obs_t[obs_t > desired_mode] - desired_mode) if(for_plot){ use_t <- obs_t - t_onset pred_dat <- bind_rows(pred_dat, tibble(t=use_t,y=y,i=i)) } else { predicted <- c(predicted, y) } } ## Can use same function to return model predictions or likelihoods if(ver == "model") { if(for_plot){ return(pred_dat) } else { return(predicted) } } else { ## Likelihoods for observations #lik <- likelihood(obs, predicted, pars["sd"],pars["max_titre"], pars["lod"]) lik <- likelihood_vl(obs, predicted, pars["sd"],pars["lod"], pars["limit_quantification"]) all_pars <- matrix(nrow=length(viral_peaks),ncol=2) all_pars[,1] <- viral_peaks all_pars[,2] <- wane_pars #all_pars[,3] <- desired_modes mus <- c(pars["viral_peak_mean"], pars["wane_mean"])#, pars["tp_mean"]) rhos <- c(pars["rho_viral_wane"])#,pars["rho_viral_tp"]),pars["rho_wane_tp"]) sds <- c(pars["viral_peak_sd"], pars["wane_sd"])#, pars["tp_sd"]) #R <- diag(length(rhos)) R <- diag(2) R[upper.tri(R)] <- R[lower.tri(R)]<- rhos ## Correlation prior corr_prior <- dlkjcorr(R, eta=2, TRUE) ## Sd priors sd_prior_viral <- dhcauchy(pars["viral_peak_sd"], 1, TRUE) sd_prior_wane <- dhcauchy(pars["wane_sd"], 1, TRUE) #sd_prior_tp <- dhcauchy(pars["tp_sd"], 1, TRUE) ## Kinetics parameters kinetics_prior <- sum(dmvnorm2(all_pars, mus, sds, R, log=TRUE)) #random_effects <- 0 random_effects <- kinetics_prior + sd_prior_viral + sd_prior_wane + corr_prior # + sd_prior_tp lik <- sum(lik) + random_effects #lik <- random_effects if(!is.null(PRIOR_FUNC)){ lik <- lik + PRIOR_FUNC(pars) } lik } } }
## for single embryo diapause RNA-Seq data files made by Kallisto library(sleuth) library(Biobase) ## to set the "Diapause_transcritome" as the working directory setwd("/Users/Chi-Kuo/Dropbox/Diapause_transcriptome") # Set it to your current working directory ##### =========================================================================================== ##### data.frame making ##### =========================================================================================== ## base directory of the results in a variable TMF_dir <- "se_read_mapping" ## the name of the Library index file panelkey<-c("ref_BLE") ## to obtain a list of sample IDs in the sub-folder "Nfur" where all data files are sample_id <- dir(file.path(TMF_dir)) ## to obtain a list of paths to the kallisto results indexed by the sample IDs is collated with kal_dirs <- sapply(sample_id, function(id) file.path(TMF_dir, id)) ## to load an auxillary table that describes the experimental design and the relationship ## between the kallisto directories and the samples: s2c <- read.table(file.path(paste0("DE/", panelkey, ".txt")), header = TRUE, stringsAsFactors=FALSE) s2c <- dplyr::mutate(s2c, path=kal_dirs) ##### =========================================================================================== ##### DE analysis ##### =========================================================================================== ## to load the kallisto processed data into the object so <- sleuth_prep(s2c, ~ condition) ## to estimate parameters for the sleuth response error measurement model so <- sleuth_fit(so) so <- sleuth_fit(so, ~1, 'reduced') so <- sleuth_lrt(so, 'reduced', 'full') ## to perform differential analyis. PreD (Pre-diapause is the Intercept) so <- sleuth_wt(so, which_beta = '(Intercept)') so <- sleuth_wt(so, which_beta = 'conditionBLEBB') models(so) results_table <- sleuth_results(so, 'reduced:full', test_type = 'lrt') ## to generate the Shiny webpage that allows for exploratory data analysis #sleuth_live(so) ## to save the buildup data (huge file!!) # sleuth_data_pathway <-paste0(sleuth_data_dir, "/", panelkey, "/", panelkey, "_data.RData") # save(so, file = sleuth_data_pathway) ##### =========================================================================================== ##### Extract Data from individual condition ##### =========================================================================================== ## to set up a list of conditions. conditionlist<-c("(Intercept)", "conditionBLEBB") for( i in 1:length(conditionlist)){ cleaner<-sleuth_results(so, conditionlist[i]) ## to keep only column 1 (target_id), 3 (qval), and 4 (beta value) cleaned<-cleaner[,c(1,2,4)] #convert from ln2 to log2 by multiplying log2(2)/ln(2) cleaned[,3]<-cleaned[,3]*1.44269504 file_names<- paste0("DE/output/differential_gene_list_", conditionlist[i], ".csv") write.csv(cleaned, file = file_names) rm(cleaner, cleaned) } ## to extract normalized and filtered TPM from sleuth object normalized_TPM<-kallisto_table(so, use_filtered=TRUE, normalized = TRUE, include_covariates = TRUE) write.csv(normalized_TPM, file="DE/output/normalized_TPM.csv") rm(list=setdiff(ls(), "panelkey")) ##### =========================================================================================== ##### post-analysis lists making ##### =========================================================================================== #### (1) to make a list of raw read counts (TPM) from all libraries ## to use Lib01 (PreD_01) as the template seedTPMlist<-read.csv(paste0("se_read_mapping/0D239/abundance.tsv"), header= TRUE, colClasses = c("character", "NULL","NULL", "NULL", "numeric"), sep = "\t") ## to add other Libs by order (all with the same gene order in the reference transcriptome; no need to reorder) TPM_Lib_list<-c("0D2310", "0D2317", "0D2318", "0D2325", "0D2326", "0D2333", "0D2334", "0D2335", "0D2336", "0D2337", "BLEBB5", "BLEBB6", "BLEBB7", "BLEBB8", "BLEBB13", "BLEBB14", "BLEBB16", "BLEBB21", "BLEBB22", "BLEBB23", "BLEBB24", "BLEBB29", "BLEBB30", "BLEBB31", "BLEBB32", "BLEBB41", "BLEBB42" ) for (i in 1:length(TPM_Lib_list)) { newaddTPMlist<-read.csv(paste0("se_read_mapping/", TPM_Lib_list[i], "/abundance.tsv"), header= TRUE, colClasses = c("NULL", "NULL","NULL", "NULL", "numeric"), sep = "\t") seedTPMlist<-cbind(seedTPMlist, newaddTPMlist) rm(newaddTPMlist) } rownames(seedTPMlist)<-seedTPMlist$target_id seedTPMlist$target_id<-NULL colnames(seedTPMlist)<-c("0D239","0D2310", "0D2317", "0D2318", "0D2325", "0D2326", "0D2333", "0D2334", "0D2335", "0D2336", "0D2337", "BLEBB5", "BLEBB6", "BLEBB7", "BLEBB8", "BLEBB13", "BLEBB14", "BLEBB16", "BLEBB21", "BLEBB22", "BLEBB23", "BLEBB24", "BLEBB29", "BLEBB30", "BLEBB31", "BLEBB32", "BLEBB41", "BLEBB42" ) seedTPMlist<-seedTPMlist[order(rownames(seedTPMlist)),] ## reorder the genes in alphabetrical order write.csv(seedTPMlist, file="DE/output/RAW_TPM_combined.csv") rm(TPM_Lib_list,i) #### (2) to make a list of DE information in all libraries ## to load PreD(Intercept) as the template, but only keep the Gene ID (the reference for DE). seedDElist<-read.csv(paste0("DE/output/differential_gene_list_(Intercept).csv"), header= TRUE, colClasses = c("NULL", "character", "NULL", "NULL"), sep = ",") rownames(seedDElist)<-seedDElist$target_id seedDElist$target_id<-NULL seedDElist<-seedDElist[order(rownames(seedDElist)),] ## reorder the genes in alphabetrical order ## to append other conditions by order DE_condition_list<-c("BLEBB") for (i in 1:length(DE_condition_list)) { newaddDElist<-read.csv(paste0("DE/output/differential_gene_list_condition", DE_condition_list[i], ".csv"), header= TRUE, colClasses = c("NULL", "character", "numeric", "numeric"), sep = ",") rownames(newaddDElist)<-newaddDElist$target_id newaddDElist$target_id<-NULL newaddDElist<-newaddDElist[order(rownames(newaddDElist)),] ## reorder the genes in alphabetrical order seedDElist<-cbind(seedDElist, newaddDElist) rm(newaddDElist) } colnames(seedDElist)<-c("qval_BLEBB", "b_BLEBB") write.csv(seedDElist, file="DE/output/DE_combined.csv") rm(DE_condition_list, i) #### (3) to combine both DE and TPM lists into one master list combined<-cbind(seedDElist, seedTPMlist) write.csv(combined, file="DE/output/Master_combined.csv") rm(list=ls())
/DE/seDE_analysis_with_sleuth.R
no_license
oplz/African_Killifish_Diapause
R
false
false
6,919
r
## for single embryo diapause RNA-Seq data files made by Kallisto library(sleuth) library(Biobase) ## to set the "Diapause_transcritome" as the working directory setwd("/Users/Chi-Kuo/Dropbox/Diapause_transcriptome") # Set it to your current working directory ##### =========================================================================================== ##### data.frame making ##### =========================================================================================== ## base directory of the results in a variable TMF_dir <- "se_read_mapping" ## the name of the Library index file panelkey<-c("ref_BLE") ## to obtain a list of sample IDs in the sub-folder "Nfur" where all data files are sample_id <- dir(file.path(TMF_dir)) ## to obtain a list of paths to the kallisto results indexed by the sample IDs is collated with kal_dirs <- sapply(sample_id, function(id) file.path(TMF_dir, id)) ## to load an auxillary table that describes the experimental design and the relationship ## between the kallisto directories and the samples: s2c <- read.table(file.path(paste0("DE/", panelkey, ".txt")), header = TRUE, stringsAsFactors=FALSE) s2c <- dplyr::mutate(s2c, path=kal_dirs) ##### =========================================================================================== ##### DE analysis ##### =========================================================================================== ## to load the kallisto processed data into the object so <- sleuth_prep(s2c, ~ condition) ## to estimate parameters for the sleuth response error measurement model so <- sleuth_fit(so) so <- sleuth_fit(so, ~1, 'reduced') so <- sleuth_lrt(so, 'reduced', 'full') ## to perform differential analyis. PreD (Pre-diapause is the Intercept) so <- sleuth_wt(so, which_beta = '(Intercept)') so <- sleuth_wt(so, which_beta = 'conditionBLEBB') models(so) results_table <- sleuth_results(so, 'reduced:full', test_type = 'lrt') ## to generate the Shiny webpage that allows for exploratory data analysis #sleuth_live(so) ## to save the buildup data (huge file!!) # sleuth_data_pathway <-paste0(sleuth_data_dir, "/", panelkey, "/", panelkey, "_data.RData") # save(so, file = sleuth_data_pathway) ##### =========================================================================================== ##### Extract Data from individual condition ##### =========================================================================================== ## to set up a list of conditions. conditionlist<-c("(Intercept)", "conditionBLEBB") for( i in 1:length(conditionlist)){ cleaner<-sleuth_results(so, conditionlist[i]) ## to keep only column 1 (target_id), 3 (qval), and 4 (beta value) cleaned<-cleaner[,c(1,2,4)] #convert from ln2 to log2 by multiplying log2(2)/ln(2) cleaned[,3]<-cleaned[,3]*1.44269504 file_names<- paste0("DE/output/differential_gene_list_", conditionlist[i], ".csv") write.csv(cleaned, file = file_names) rm(cleaner, cleaned) } ## to extract normalized and filtered TPM from sleuth object normalized_TPM<-kallisto_table(so, use_filtered=TRUE, normalized = TRUE, include_covariates = TRUE) write.csv(normalized_TPM, file="DE/output/normalized_TPM.csv") rm(list=setdiff(ls(), "panelkey")) ##### =========================================================================================== ##### post-analysis lists making ##### =========================================================================================== #### (1) to make a list of raw read counts (TPM) from all libraries ## to use Lib01 (PreD_01) as the template seedTPMlist<-read.csv(paste0("se_read_mapping/0D239/abundance.tsv"), header= TRUE, colClasses = c("character", "NULL","NULL", "NULL", "numeric"), sep = "\t") ## to add other Libs by order (all with the same gene order in the reference transcriptome; no need to reorder) TPM_Lib_list<-c("0D2310", "0D2317", "0D2318", "0D2325", "0D2326", "0D2333", "0D2334", "0D2335", "0D2336", "0D2337", "BLEBB5", "BLEBB6", "BLEBB7", "BLEBB8", "BLEBB13", "BLEBB14", "BLEBB16", "BLEBB21", "BLEBB22", "BLEBB23", "BLEBB24", "BLEBB29", "BLEBB30", "BLEBB31", "BLEBB32", "BLEBB41", "BLEBB42" ) for (i in 1:length(TPM_Lib_list)) { newaddTPMlist<-read.csv(paste0("se_read_mapping/", TPM_Lib_list[i], "/abundance.tsv"), header= TRUE, colClasses = c("NULL", "NULL","NULL", "NULL", "numeric"), sep = "\t") seedTPMlist<-cbind(seedTPMlist, newaddTPMlist) rm(newaddTPMlist) } rownames(seedTPMlist)<-seedTPMlist$target_id seedTPMlist$target_id<-NULL colnames(seedTPMlist)<-c("0D239","0D2310", "0D2317", "0D2318", "0D2325", "0D2326", "0D2333", "0D2334", "0D2335", "0D2336", "0D2337", "BLEBB5", "BLEBB6", "BLEBB7", "BLEBB8", "BLEBB13", "BLEBB14", "BLEBB16", "BLEBB21", "BLEBB22", "BLEBB23", "BLEBB24", "BLEBB29", "BLEBB30", "BLEBB31", "BLEBB32", "BLEBB41", "BLEBB42" ) seedTPMlist<-seedTPMlist[order(rownames(seedTPMlist)),] ## reorder the genes in alphabetrical order write.csv(seedTPMlist, file="DE/output/RAW_TPM_combined.csv") rm(TPM_Lib_list,i) #### (2) to make a list of DE information in all libraries ## to load PreD(Intercept) as the template, but only keep the Gene ID (the reference for DE). seedDElist<-read.csv(paste0("DE/output/differential_gene_list_(Intercept).csv"), header= TRUE, colClasses = c("NULL", "character", "NULL", "NULL"), sep = ",") rownames(seedDElist)<-seedDElist$target_id seedDElist$target_id<-NULL seedDElist<-seedDElist[order(rownames(seedDElist)),] ## reorder the genes in alphabetrical order ## to append other conditions by order DE_condition_list<-c("BLEBB") for (i in 1:length(DE_condition_list)) { newaddDElist<-read.csv(paste0("DE/output/differential_gene_list_condition", DE_condition_list[i], ".csv"), header= TRUE, colClasses = c("NULL", "character", "numeric", "numeric"), sep = ",") rownames(newaddDElist)<-newaddDElist$target_id newaddDElist$target_id<-NULL newaddDElist<-newaddDElist[order(rownames(newaddDElist)),] ## reorder the genes in alphabetrical order seedDElist<-cbind(seedDElist, newaddDElist) rm(newaddDElist) } colnames(seedDElist)<-c("qval_BLEBB", "b_BLEBB") write.csv(seedDElist, file="DE/output/DE_combined.csv") rm(DE_condition_list, i) #### (3) to combine both DE and TPM lists into one master list combined<-cbind(seedDElist, seedTPMlist) write.csv(combined, file="DE/output/Master_combined.csv") rm(list=ls())
library(BuenColors) library(data.table) library(dplyr) neighbors <- c("NFASC", "CNTN2", "RBBP5", "DSTYK", "NUAK2", "KLHDC8A", "LEMD1", "CDK18", "TMEM81") df <- fread("output/RNAseq-g1_NC_Padj01.tsv") %>% data.frame() df$color <- ifelse(df$gene == "TMCC2", "TMCC2", ifelse(df$gene %in% neighbors, "Neighbor", "other")) ggplot(df %>% filter(baseMean > 20) %>% arrange(desc(color)), aes(x =-1* log2FoldChange, y = -log10(pvalue), color = color)) + geom_point() + labs(x = "log2FC CRISPRi/Non-targeting", y = "-log10 P-value") + pretty_plot(fontsize = 8) + L_border() + scale_color_manual(values = c("TMCC2" = "firebrick", "Neighbor" = "dodgerblue2", "other" = "black")) theme(legend.position = "none")
/tmcc2/crispri_DE/03_plot.R
no_license
sankaranlab/erythroid-profiling
R
false
false
716
r
library(BuenColors) library(data.table) library(dplyr) neighbors <- c("NFASC", "CNTN2", "RBBP5", "DSTYK", "NUAK2", "KLHDC8A", "LEMD1", "CDK18", "TMEM81") df <- fread("output/RNAseq-g1_NC_Padj01.tsv") %>% data.frame() df$color <- ifelse(df$gene == "TMCC2", "TMCC2", ifelse(df$gene %in% neighbors, "Neighbor", "other")) ggplot(df %>% filter(baseMean > 20) %>% arrange(desc(color)), aes(x =-1* log2FoldChange, y = -log10(pvalue), color = color)) + geom_point() + labs(x = "log2FC CRISPRi/Non-targeting", y = "-log10 P-value") + pretty_plot(fontsize = 8) + L_border() + scale_color_manual(values = c("TMCC2" = "firebrick", "Neighbor" = "dodgerblue2", "other" = "black")) theme(legend.position = "none")
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/logLik.R \name{logLik.grpreg} \alias{logLik.grpreg} \alias{logLik} \alias{logLik.grpsurv} \title{logLik method for grpreg} \usage{ \method{logLik}{grpreg}(object, df.method = c("default", "active"), REML = FALSE, ...) \method{logLik}{grpsurv}(object, df.method = c("default", "active"), ...) } \arguments{ \item{object}{A fitted \code{grpreg} or \code{grpsurv} object, as obtained from \code{grpreg()} or \code{grpsurv()}} \item{df.method}{How should effective model parameters be calculated? One of: \code{"active"}, which counts the number of nonzero coefficients; or \code{"default"}, which uses the calculated \code{df} returned by \code{grpreg}. Default is \code{"default"}.} \item{REML}{Use restricted MLE for estimation of the scale parameter in a gaussian model? Default is FALSE.} \item{\dots}{For S3 method compatibility.} } \value{ Returns an object of class 'logLik', in this case consisting of a number (or vector of numbers) with two attributes: 'df' (the estimated degrees of freedom in the model) and 'nobs' (number of observations). The 'print' method for 'logLik' objects is not intended to handle vectors; consequently, the value of the function does not necessarily display correctly. However, it works with 'AIC' and 'BIC' without any glitches and returns the expected vectorized output. } \description{ Calculates the log likelihood and degrees of freedom for a fitted grpreg object. } \details{ Exists mainly for use with \code{'AIC'} and \code{'BIC'}. } \examples{ data(Birthwt) X <- Birthwt$X y <- Birthwt$bwt group <- Birthwt$group fit <- grpreg(X,y,group,penalty="cMCP") logLik(fit) ## Display is glitchy for vectors AIC(fit) BIC(fit) } \seealso{ \code{grpreg} } \author{ Patrick Breheny }
/man/logLik.grpreg.Rd
no_license
pbreheny/grpreg
R
false
true
1,804
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/logLik.R \name{logLik.grpreg} \alias{logLik.grpreg} \alias{logLik} \alias{logLik.grpsurv} \title{logLik method for grpreg} \usage{ \method{logLik}{grpreg}(object, df.method = c("default", "active"), REML = FALSE, ...) \method{logLik}{grpsurv}(object, df.method = c("default", "active"), ...) } \arguments{ \item{object}{A fitted \code{grpreg} or \code{grpsurv} object, as obtained from \code{grpreg()} or \code{grpsurv()}} \item{df.method}{How should effective model parameters be calculated? One of: \code{"active"}, which counts the number of nonzero coefficients; or \code{"default"}, which uses the calculated \code{df} returned by \code{grpreg}. Default is \code{"default"}.} \item{REML}{Use restricted MLE for estimation of the scale parameter in a gaussian model? Default is FALSE.} \item{\dots}{For S3 method compatibility.} } \value{ Returns an object of class 'logLik', in this case consisting of a number (or vector of numbers) with two attributes: 'df' (the estimated degrees of freedom in the model) and 'nobs' (number of observations). The 'print' method for 'logLik' objects is not intended to handle vectors; consequently, the value of the function does not necessarily display correctly. However, it works with 'AIC' and 'BIC' without any glitches and returns the expected vectorized output. } \description{ Calculates the log likelihood and degrees of freedom for a fitted grpreg object. } \details{ Exists mainly for use with \code{'AIC'} and \code{'BIC'}. } \examples{ data(Birthwt) X <- Birthwt$X y <- Birthwt$bwt group <- Birthwt$group fit <- grpreg(X,y,group,penalty="cMCP") logLik(fit) ## Display is glitchy for vectors AIC(fit) BIC(fit) } \seealso{ \code{grpreg} } \author{ Patrick Breheny }
#Polynomial Linear Regression #Importing the dataset dataset = read.csv('Position_Salaries.csv') dataset= dataset[2:3] #Fitting linear regression to the dataset linreg=lm(formula=Salary~., data=dataset) #Fittin polynomial linear regression to the dataset dataset$Level2=dataset$Level^2 dataset$Level3=dataset$Level^3 dataset$Level4=dataset$Level^4 polyreg=lm(formula=Salary ~ ., data=dataset) #Visualizing linear regression results library(ggplot2) ggplot()+ geom_point(aes(x=dataset$Level,y=dataset$Salary), colour='red')+ geom_line(aes(x=dataset$Level,y=predict(linreg, newdata=dataset)), colour='blue')+ ggtitle('Truth Or Bullshit(LR)')+ xlab('Level')+ ylab('Salary') #Visualizing polynomial regression results ggplot()+ geom_point(aes(x=dataset$Level,y=dataset$Salary), colour='red')+ geom_line(aes(x=dataset$Level,y=predict(polyreg, newdata=dataset)), colour='blue')+ ggtitle('Truth Or Bullshit(PR)')+ xlab('Level')+ ylab('Salary') #Predicting a new result with linear regression ypred=predict(linreg,data.frame(Level=6.5)) #Predicting a new result with polynomial regression ypred=predict(polyreg,data.frame(Level=6.5, Level2=6.5^2, Level3=6.3^3, Level4=6.5^4))
/Polynomial Regression.R
no_license
srees16/ML-Deep-Learning-R
R
false
false
1,362
r
#Polynomial Linear Regression #Importing the dataset dataset = read.csv('Position_Salaries.csv') dataset= dataset[2:3] #Fitting linear regression to the dataset linreg=lm(formula=Salary~., data=dataset) #Fittin polynomial linear regression to the dataset dataset$Level2=dataset$Level^2 dataset$Level3=dataset$Level^3 dataset$Level4=dataset$Level^4 polyreg=lm(formula=Salary ~ ., data=dataset) #Visualizing linear regression results library(ggplot2) ggplot()+ geom_point(aes(x=dataset$Level,y=dataset$Salary), colour='red')+ geom_line(aes(x=dataset$Level,y=predict(linreg, newdata=dataset)), colour='blue')+ ggtitle('Truth Or Bullshit(LR)')+ xlab('Level')+ ylab('Salary') #Visualizing polynomial regression results ggplot()+ geom_point(aes(x=dataset$Level,y=dataset$Salary), colour='red')+ geom_line(aes(x=dataset$Level,y=predict(polyreg, newdata=dataset)), colour='blue')+ ggtitle('Truth Or Bullshit(PR)')+ xlab('Level')+ ylab('Salary') #Predicting a new result with linear regression ypred=predict(linreg,data.frame(Level=6.5)) #Predicting a new result with polynomial regression ypred=predict(polyreg,data.frame(Level=6.5, Level2=6.5^2, Level3=6.3^3, Level4=6.5^4))
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/encoding_methods.R \name{encode_one_hot} \alias{encode_one_hot} \title{One-hot encoder} \usage{ encode_one_hot(sequence, max_length = 4034) } \arguments{ \item{sequence}{Sequence in a string format.} \item{max_length}{Maximum length of sequence to encode.} } \value{ One-hot encoded sequence. } \description{ \code{encode_one_hot} one-hot-encodes sequence in a string format. } \examples{ sample_seq <- "MSHMTFNTWKAGLWRLAAAAVLSLLPVVARAAVPGITGPTFDLTAQPGRANQPDGASVYSWGYGCNPRTVPGFLPSVNPLAGQ" encoded_seq <- encode_one_hot(sample_seq) }
/man/encode_one_hot.Rd
no_license
cran/deepredeff
R
false
true
612
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/encoding_methods.R \name{encode_one_hot} \alias{encode_one_hot} \title{One-hot encoder} \usage{ encode_one_hot(sequence, max_length = 4034) } \arguments{ \item{sequence}{Sequence in a string format.} \item{max_length}{Maximum length of sequence to encode.} } \value{ One-hot encoded sequence. } \description{ \code{encode_one_hot} one-hot-encodes sequence in a string format. } \examples{ sample_seq <- "MSHMTFNTWKAGLWRLAAAAVLSLLPVVARAAVPGITGPTFDLTAQPGRANQPDGASVYSWGYGCNPRTVPGFLPSVNPLAGQ" encoded_seq <- encode_one_hot(sample_seq) }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/pub01_utilityFuncs.R \name{rtn.lastperiods} \alias{rtn.lastperiods} \title{rtn.lastperiods} \usage{ rtn.lastperiods(rtn, periods = list(months(1), months(3), months(6), years(1), years(3), years(5))) } \arguments{ \item{rtn}{an xts, vector, matrix, data frame, timeSeries or zoo object of asset returns} \item{periods}{a list, with elements of class "Period",eg.list(months(1),months(3)), giving the last periods.} } \value{ a matrix } \description{ get a return of different last periods of the rtn series(including overall cumulativereturn). } \examples{ rtn.long <- zoo(rnorm(100,0.001,0.02),as.Date("2010-01-01")+1:100) rtn.short <- rtn.long + rnorm(100,-0.001,0.003) rtn <- merge(rtn.long,rtn.short) rtn.lastperiods(rtn,list(months(3),months(6),years(1))) } \author{ Ruifei.Yin }
/man/rtn.lastperiods.Rd
no_license
raphael210/QUtility
R
false
true
866
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/pub01_utilityFuncs.R \name{rtn.lastperiods} \alias{rtn.lastperiods} \title{rtn.lastperiods} \usage{ rtn.lastperiods(rtn, periods = list(months(1), months(3), months(6), years(1), years(3), years(5))) } \arguments{ \item{rtn}{an xts, vector, matrix, data frame, timeSeries or zoo object of asset returns} \item{periods}{a list, with elements of class "Period",eg.list(months(1),months(3)), giving the last periods.} } \value{ a matrix } \description{ get a return of different last periods of the rtn series(including overall cumulativereturn). } \examples{ rtn.long <- zoo(rnorm(100,0.001,0.02),as.Date("2010-01-01")+1:100) rtn.short <- rtn.long + rnorm(100,-0.001,0.003) rtn <- merge(rtn.long,rtn.short) rtn.lastperiods(rtn,list(months(3),months(6),years(1))) } \author{ Ruifei.Yin }
## Format sample data prepared in <Calculate_AlternateSumPCBs.R> for ## Table S4, sample data: ## 1. add replicate letters ## 2. covert pct.det to percentages ## 3. round pcb sums to one dec place ## 4. reorder columns/remove sys_smp_code and stn_id ## ## Input: <AllSampleData_CompareSums.csv> ## ## Output: <TabVals_SampleData.csv> # Read in base data, prepared with <Calculate_AlternateSumPCBs.R> smp.data.raw <- read.csv(paste(DirOut, "AllSampleData_CompareSums.csv", sep="")) smp.data <- smp.data.raw #retain smp.data.raw as an unmodified df # Generate 'replicate' column (to replace [stn_id]) # Count number of samples in each 'site mean' sitelist <- split(x=smp.data, f=list(smp.data$site_number, smp.data$category, smp.data$sample_year), #note order of factors is important to get sequence in correct order drop=TRUE) n.persite <- lapply(X=sitelist, FUN=nrow) n.vect <- data.frame(do.call(rbind, n.persite)) #vector (sort of) of number of elements in each site mean level # Assign A-D letters to define replicates rep.letters <- c("A", "B", "C", "D") # Add replicate letters as a new column smp.data$rep <- rep.letters[sequence(n.vect[,1])] # Clean up rm(sitelist); rm(n.persite); rm(n.vect); rm(rep.letters) # Reorder columns and remove unnec data smp.form <- select(smp.data, sample_year, category, site_number, rep, #basic metadata smp.mass, pct.det, avg.mdl, #sample data sum.zero, sum.halfdl, sum.km, sum.ref.cons) #four different sum-pcb values # Convert pct column to percentages smp.form <- mutate(smp.form, pct.det=round(100*pct.det, 1)) # Round off PCB sum values smp.form <- mutate(smp.form, sum.zero = round(sum.zero, 1), sum.halfdl = round(sum.halfdl, 1), sum.km = round(sum.halfdl, 1), sum.ref.cons = round(sum.ref.cons, 1)) #! Wait and round the mdl values in excel # Change K-M sums to NA for non-detect samples smp.form$sum.km[smp.form$sum.zero==0] <- NA # Rearrange so that sediment is first smp.form$category <- factor(smp.form$category, levels=c("Sediment", "Araneid", "Tetragnathid")) smp.form <- arrange(smp.form, sample_year, category, site_number) # Print output write.csv(smp.form, paste(DirOut, "TabVals_SampleData.csv", sep=""), row.names=FALSE) #### END SCRIPT ####
/PrepareTables_S3_SampleData.R
no_license
ppgibson/MSQ_DataPaper_Anl
R
false
false
2,489
r
## Format sample data prepared in <Calculate_AlternateSumPCBs.R> for ## Table S4, sample data: ## 1. add replicate letters ## 2. covert pct.det to percentages ## 3. round pcb sums to one dec place ## 4. reorder columns/remove sys_smp_code and stn_id ## ## Input: <AllSampleData_CompareSums.csv> ## ## Output: <TabVals_SampleData.csv> # Read in base data, prepared with <Calculate_AlternateSumPCBs.R> smp.data.raw <- read.csv(paste(DirOut, "AllSampleData_CompareSums.csv", sep="")) smp.data <- smp.data.raw #retain smp.data.raw as an unmodified df # Generate 'replicate' column (to replace [stn_id]) # Count number of samples in each 'site mean' sitelist <- split(x=smp.data, f=list(smp.data$site_number, smp.data$category, smp.data$sample_year), #note order of factors is important to get sequence in correct order drop=TRUE) n.persite <- lapply(X=sitelist, FUN=nrow) n.vect <- data.frame(do.call(rbind, n.persite)) #vector (sort of) of number of elements in each site mean level # Assign A-D letters to define replicates rep.letters <- c("A", "B", "C", "D") # Add replicate letters as a new column smp.data$rep <- rep.letters[sequence(n.vect[,1])] # Clean up rm(sitelist); rm(n.persite); rm(n.vect); rm(rep.letters) # Reorder columns and remove unnec data smp.form <- select(smp.data, sample_year, category, site_number, rep, #basic metadata smp.mass, pct.det, avg.mdl, #sample data sum.zero, sum.halfdl, sum.km, sum.ref.cons) #four different sum-pcb values # Convert pct column to percentages smp.form <- mutate(smp.form, pct.det=round(100*pct.det, 1)) # Round off PCB sum values smp.form <- mutate(smp.form, sum.zero = round(sum.zero, 1), sum.halfdl = round(sum.halfdl, 1), sum.km = round(sum.halfdl, 1), sum.ref.cons = round(sum.ref.cons, 1)) #! Wait and round the mdl values in excel # Change K-M sums to NA for non-detect samples smp.form$sum.km[smp.form$sum.zero==0] <- NA # Rearrange so that sediment is first smp.form$category <- factor(smp.form$category, levels=c("Sediment", "Araneid", "Tetragnathid")) smp.form <- arrange(smp.form, sample_year, category, site_number) # Print output write.csv(smp.form, paste(DirOut, "TabVals_SampleData.csv", sep=""), row.names=FALSE) #### END SCRIPT ####
####################################################################### # seriation - Infrastructure for seriation # Copyright (C) 2011 Michael Hahsler, Christian Buchta and Kurt Hornik # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License along # with this program; if not, write to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. # helper ndim <- function(x) length(dim(x)) find_order <- function(x, order, ...) { if (is.logical(order)) { if(order) order <- seriate(x, ...) else order <- seriate(x, method = "identity", ...) } if (is.character(order)) order <- seriate(x, method = order, ...) if (!inherits(order, "ser_permutation")) order <- ser_permutation(order) # for debugging #print(order) order } #' Permute the Order in Various Objects #' #' Provides the generic function and methods for permuting the order of various #' objects including vectors, lists, dendrograms (also \code{hclust} objects), #' the order of observations in a \code{dist} object, the rows and columns of a #' matrix or data.frame, and all dimensions of an array given a suitable #' [ser_permutation] object. #' #' The permutation vectors in [ser_permutation] are suitable if the number #' of permutation vectors matches the number of dimensions of \code{x} and if #' the length of each permutation vector has the same length as the #' corresponding dimension of \code{x}. #' #' For 1-dimensional/1-mode data (list, vector, \code{dist}), \code{order} can #' also be a single permutation vector of class [ser_permutation_vector] #' or data which can be automatically coerced to this class (e.g. a numeric #' vector). #' #' For \code{dendrogram} and \code{hclust}, subtrees are rotated to represent #' the order best possible. If the order is not achieved perfectly then the #' user is warned. This behavior can be changed with the extra parameter #' \code{incompatible} which can take the values \code{"warn"} (default), #' \code{"stop"} or \code{"ignore"}. #' #' @family permutation #' #' @param x an object (a list, a vector, a \code{dist} object, a matrix, an #' array or any other object which provides \code{dim} and standard subsetting #' with \code{"["}). #' @param order an object of class [ser_permutation] which contains #' suitable permutation vectors for \code{x}. Alternatively, a character string with the #' name of a seriation method appropriate for `x` can be specified (see [seriate()]). #' This will perform seriation and permute `x`. The value `TRUE` will permute using the #' default seriation method. #' @param margin specifies the dimensions to be permuted as a vector with dimension indices. #' If `NULL`, \code{order} needs to contain a permutation for all dimensions. #' If a single margin is specified, then \code{order} can also contain #' a single permutation vector. #' \code{margin} are ignored. #' @param ... if `order` is the name of a seriation method, then additional arguments are #' passed on to [seriate()]. #' @returns A permuted object of the same class as `x`. #' @author Michael Hahsler #' @keywords manip #' @examples #' # List data types for permute #' methods("permute") #' #' # Permute matrix #' m <- matrix(rnorm(10), 5, 2, dimnames = list(1:5, LETTERS[1:2])) #' m #' #' # Permute rows and columns #' o <- ser_permutation(5:1, 2:1) #' o #' #' permute(m, o) #' #' ## permute only columns #' permute(m, o, margin = 2) #' #' ## permute using PCA seriation #' permute(m, "PCA") #' #' ## permute only rows using PCA #' permute(m, "PCA", margin = 1) #' #' # Permute data.frames #' df <- as.data.frame(m) #' permute(df, o) #' #' # Permute objects in a dist object #' d <- dist(m) #' d #' #' permute(d, c(3, 2, 1, 4, 5)) #' #' permute(d, "Spectral") #' #' # Permute a list #' l <- list(a = 1:5, b = letters[1:3], c = 0) #' l #' #' permute(l, c(2, 3, 1)) #' #' # Permute a dendrogram #' hc <- hclust(d) #' plot(hc) #' plot(permute(hc, 5:1)) #' @export permute <- function(x, order, ...) UseMethod("permute") #' @export permute.default <- function(x, order, ...) .permute_kd(x, order, ...) #' @rdname permute #' @export permute.array <- function(x, order, margin = NULL, ...) .permute_kd(x, order, margin = margin, ...) #' @rdname permute #' @export permute.matrix <- function(x, order, margin = NULL, ...) .permute_kd(x, order, margin = margin, ...) #' @rdname permute #' @export permute.data.frame <- function(x, order, margin = NULL, ...) .permute_kd(x, order, margin = margin, ...) #' @rdname permute #' @export permute.table <- function(x, order, margin = NULL, ...) .permute_kd(x, order, margin = margin, ...) #' @rdname permute #' @export permute.numeric <- function(x, order, ...) .permute_1d(x, order, ...) #' @rdname permute #' @export permute.character <- function(x, order, ...) .permute_1d(x, order, ...) #' @rdname permute #' @export permute.list <- function(x, order, ...) .permute_1d(x, order, ...) # special cases #' @rdname permute #' @export permute.dist <- function(x, order, ...) { order <- find_order(x, order, ...) if (.is_identity_permutation(order[[1]])) return(x) .check_dist_perm(x, order) .rearrange_dist(x, get_order(order, 1)) } #' @rdname permute #' @export permute.dendrogram <- function(x, order, ...) { order <- find_order(x, order, ...) # modeled after rotate in dendextend. Copied here to reduce the heavy dependency count of dendextend. # x <- dendextend::rotate(x, order = match(get_order(order), get_order(x))) rot <- function (x, order, ...) { if (length(get_order(order)) != stats::nobs(x)) stop("Length of order and number of leaves in dendrogram do not agree!") if (missing(order)) { warning("'order' parameter is missing, returning the tree as it was.") return(x) } labels_x <- labels(x) order_x <- order.dendrogram(x) number_of_leaves <- length(order_x) if (!is.numeric(order)) { order <- as.character(order) if (length(intersect(order, labels_x)) != number_of_leaves) { stop( "'order' is neither numeric nor a vector with ALL of the labels (in the order you want them to be)" ) } order <- match(order, labels_x) } weights <- seq_len(number_of_leaves) weights_for_order <- numeric(number_of_leaves) weights_for_order[order_x[order]] <- weights reorder(x, weights_for_order, mean, ...) } x <- rot(x, order = match(get_order(order), get_order(x))) if (any(get_order(x) != get_order(order))) warning("Dendrogram cannot be perfectly reordered! Using best approximation.") x } #' @rdname permute #' @export permute.hclust <- function(x, order, ...) { nd <- stats::as.hclust(permute(stats::as.dendrogram(x), order, ...)) x$merge <- nd$merge x$height <- nd$height x$order <- nd$order x } # helper .check_dist_perm <- function(x, order) { if (inherits(order, "ser_permutation") && length(order) != 1L) stop("dimensions do not match") if (attr(x, "Size") != length(get_order(order, 1))) stop("some permutation vectors do not fit dimension of data") # check dist if (isTRUE(attr(x, "Diag")) || isTRUE(attr(x, "Upper"))) stop("'dist' with diagonal or upper triangle matrix not implemented") } .check_matrix_perm <- function(x, order) { if (ndim(x) != length(order)) stop("dimensions do not match") if (any(dim(x) != sapply(order, length))) stop("some permutation vectors do not fit dimension of data") } .permute_kd <- function(x, order, margin = NULL, ...) { # DEPRECATED: Compatibility with old permutation for data.frame if (is.data.frame(x) && is.null(margin) && !is.character(order) && ( inherits(order, "ser_permutation") && length(order) == 1 || inherits(order, "ser_permutation_vector") || is.integer(order) )) { warning( "permute for data.frames with a single seriation order is now deprecated. Specify the margin as follows: 'permute(x, order, margin = 1)'" ) margin <- 1 } if (is.null(margin)) margin <- seq(ndim(x)) else { margin <- as.integer(margin) if (!all(margin %in% seq(ndim(x)))) stop("all margins need to specify a valid dimension in x") } order <- find_order(x, order, margin = margin, ...) if (length(order) != ndim(x) && length(order) != length(margin)) stop( "order needs to contain either orders for all dimensions of x or just orders for the selected margin." ) # set margins not to be permuted to identity and copy the rest o <- seriate(x, method = "identity") if (length(order) < ndim(x)) ### we only have order for specified margins for(i in seq(length(order))) o[[margin[i]]] <- order[[i]] else for (i in margin) o[[i]] <- order[[i]] # expand identity manual permutations (if any) for (i in which(sapply(o, .is_identity_permutation))) o[[i]] <- ser_permutation_vector(seq(dim(x)[i])) # check .check_matrix_perm(x, o) perm <- lapply(o, get_order) do.call("[", c(list(x), perm, drop = FALSE)) } .permute_1d <- function(x, order, ...) { if (is.logical(order)) { if(order) stop("No default seritation method for vectors avaialble. Specify the order.") else return(x) } order <- ser_permutation(order) if (length(order) != 1) stop("dimensions do not match!") perm <- get_order(order, 1) if (length(x) != length(perm)) stop("The permutation vectors do not fit the length of x!") x[perm] } # if we used proxy we would say: #.rearrange_dist <- function (x, order) x[[order]] # Note: order can be a subset .rearrange_dist <- function (x, order) { # make C call mode(x) <- "double" # as.dist seems to make Size numeric and not integer! attr(x, "Size") <- as.integer(attr(x, "Size")) mode(order) <- "integer" d <- .Call("reorder_dist", x, order) labels <- if (is.null(labels(x))) NULL else labels(x)[order] structure( d, class = "dist", Size = length(order), Labels = labels, Diag = FALSE, Upper = FALSE, method = attr(x, "method") ) }
/R/permute.R
no_license
cran/seriation
R
false
false
10,629
r
####################################################################### # seriation - Infrastructure for seriation # Copyright (C) 2011 Michael Hahsler, Christian Buchta and Kurt Hornik # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License along # with this program; if not, write to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. # helper ndim <- function(x) length(dim(x)) find_order <- function(x, order, ...) { if (is.logical(order)) { if(order) order <- seriate(x, ...) else order <- seriate(x, method = "identity", ...) } if (is.character(order)) order <- seriate(x, method = order, ...) if (!inherits(order, "ser_permutation")) order <- ser_permutation(order) # for debugging #print(order) order } #' Permute the Order in Various Objects #' #' Provides the generic function and methods for permuting the order of various #' objects including vectors, lists, dendrograms (also \code{hclust} objects), #' the order of observations in a \code{dist} object, the rows and columns of a #' matrix or data.frame, and all dimensions of an array given a suitable #' [ser_permutation] object. #' #' The permutation vectors in [ser_permutation] are suitable if the number #' of permutation vectors matches the number of dimensions of \code{x} and if #' the length of each permutation vector has the same length as the #' corresponding dimension of \code{x}. #' #' For 1-dimensional/1-mode data (list, vector, \code{dist}), \code{order} can #' also be a single permutation vector of class [ser_permutation_vector] #' or data which can be automatically coerced to this class (e.g. a numeric #' vector). #' #' For \code{dendrogram} and \code{hclust}, subtrees are rotated to represent #' the order best possible. If the order is not achieved perfectly then the #' user is warned. This behavior can be changed with the extra parameter #' \code{incompatible} which can take the values \code{"warn"} (default), #' \code{"stop"} or \code{"ignore"}. #' #' @family permutation #' #' @param x an object (a list, a vector, a \code{dist} object, a matrix, an #' array or any other object which provides \code{dim} and standard subsetting #' with \code{"["}). #' @param order an object of class [ser_permutation] which contains #' suitable permutation vectors for \code{x}. Alternatively, a character string with the #' name of a seriation method appropriate for `x` can be specified (see [seriate()]). #' This will perform seriation and permute `x`. The value `TRUE` will permute using the #' default seriation method. #' @param margin specifies the dimensions to be permuted as a vector with dimension indices. #' If `NULL`, \code{order} needs to contain a permutation for all dimensions. #' If a single margin is specified, then \code{order} can also contain #' a single permutation vector. #' \code{margin} are ignored. #' @param ... if `order` is the name of a seriation method, then additional arguments are #' passed on to [seriate()]. #' @returns A permuted object of the same class as `x`. #' @author Michael Hahsler #' @keywords manip #' @examples #' # List data types for permute #' methods("permute") #' #' # Permute matrix #' m <- matrix(rnorm(10), 5, 2, dimnames = list(1:5, LETTERS[1:2])) #' m #' #' # Permute rows and columns #' o <- ser_permutation(5:1, 2:1) #' o #' #' permute(m, o) #' #' ## permute only columns #' permute(m, o, margin = 2) #' #' ## permute using PCA seriation #' permute(m, "PCA") #' #' ## permute only rows using PCA #' permute(m, "PCA", margin = 1) #' #' # Permute data.frames #' df <- as.data.frame(m) #' permute(df, o) #' #' # Permute objects in a dist object #' d <- dist(m) #' d #' #' permute(d, c(3, 2, 1, 4, 5)) #' #' permute(d, "Spectral") #' #' # Permute a list #' l <- list(a = 1:5, b = letters[1:3], c = 0) #' l #' #' permute(l, c(2, 3, 1)) #' #' # Permute a dendrogram #' hc <- hclust(d) #' plot(hc) #' plot(permute(hc, 5:1)) #' @export permute <- function(x, order, ...) UseMethod("permute") #' @export permute.default <- function(x, order, ...) .permute_kd(x, order, ...) #' @rdname permute #' @export permute.array <- function(x, order, margin = NULL, ...) .permute_kd(x, order, margin = margin, ...) #' @rdname permute #' @export permute.matrix <- function(x, order, margin = NULL, ...) .permute_kd(x, order, margin = margin, ...) #' @rdname permute #' @export permute.data.frame <- function(x, order, margin = NULL, ...) .permute_kd(x, order, margin = margin, ...) #' @rdname permute #' @export permute.table <- function(x, order, margin = NULL, ...) .permute_kd(x, order, margin = margin, ...) #' @rdname permute #' @export permute.numeric <- function(x, order, ...) .permute_1d(x, order, ...) #' @rdname permute #' @export permute.character <- function(x, order, ...) .permute_1d(x, order, ...) #' @rdname permute #' @export permute.list <- function(x, order, ...) .permute_1d(x, order, ...) # special cases #' @rdname permute #' @export permute.dist <- function(x, order, ...) { order <- find_order(x, order, ...) if (.is_identity_permutation(order[[1]])) return(x) .check_dist_perm(x, order) .rearrange_dist(x, get_order(order, 1)) } #' @rdname permute #' @export permute.dendrogram <- function(x, order, ...) { order <- find_order(x, order, ...) # modeled after rotate in dendextend. Copied here to reduce the heavy dependency count of dendextend. # x <- dendextend::rotate(x, order = match(get_order(order), get_order(x))) rot <- function (x, order, ...) { if (length(get_order(order)) != stats::nobs(x)) stop("Length of order and number of leaves in dendrogram do not agree!") if (missing(order)) { warning("'order' parameter is missing, returning the tree as it was.") return(x) } labels_x <- labels(x) order_x <- order.dendrogram(x) number_of_leaves <- length(order_x) if (!is.numeric(order)) { order <- as.character(order) if (length(intersect(order, labels_x)) != number_of_leaves) { stop( "'order' is neither numeric nor a vector with ALL of the labels (in the order you want them to be)" ) } order <- match(order, labels_x) } weights <- seq_len(number_of_leaves) weights_for_order <- numeric(number_of_leaves) weights_for_order[order_x[order]] <- weights reorder(x, weights_for_order, mean, ...) } x <- rot(x, order = match(get_order(order), get_order(x))) if (any(get_order(x) != get_order(order))) warning("Dendrogram cannot be perfectly reordered! Using best approximation.") x } #' @rdname permute #' @export permute.hclust <- function(x, order, ...) { nd <- stats::as.hclust(permute(stats::as.dendrogram(x), order, ...)) x$merge <- nd$merge x$height <- nd$height x$order <- nd$order x } # helper .check_dist_perm <- function(x, order) { if (inherits(order, "ser_permutation") && length(order) != 1L) stop("dimensions do not match") if (attr(x, "Size") != length(get_order(order, 1))) stop("some permutation vectors do not fit dimension of data") # check dist if (isTRUE(attr(x, "Diag")) || isTRUE(attr(x, "Upper"))) stop("'dist' with diagonal or upper triangle matrix not implemented") } .check_matrix_perm <- function(x, order) { if (ndim(x) != length(order)) stop("dimensions do not match") if (any(dim(x) != sapply(order, length))) stop("some permutation vectors do not fit dimension of data") } .permute_kd <- function(x, order, margin = NULL, ...) { # DEPRECATED: Compatibility with old permutation for data.frame if (is.data.frame(x) && is.null(margin) && !is.character(order) && ( inherits(order, "ser_permutation") && length(order) == 1 || inherits(order, "ser_permutation_vector") || is.integer(order) )) { warning( "permute for data.frames with a single seriation order is now deprecated. Specify the margin as follows: 'permute(x, order, margin = 1)'" ) margin <- 1 } if (is.null(margin)) margin <- seq(ndim(x)) else { margin <- as.integer(margin) if (!all(margin %in% seq(ndim(x)))) stop("all margins need to specify a valid dimension in x") } order <- find_order(x, order, margin = margin, ...) if (length(order) != ndim(x) && length(order) != length(margin)) stop( "order needs to contain either orders for all dimensions of x or just orders for the selected margin." ) # set margins not to be permuted to identity and copy the rest o <- seriate(x, method = "identity") if (length(order) < ndim(x)) ### we only have order for specified margins for(i in seq(length(order))) o[[margin[i]]] <- order[[i]] else for (i in margin) o[[i]] <- order[[i]] # expand identity manual permutations (if any) for (i in which(sapply(o, .is_identity_permutation))) o[[i]] <- ser_permutation_vector(seq(dim(x)[i])) # check .check_matrix_perm(x, o) perm <- lapply(o, get_order) do.call("[", c(list(x), perm, drop = FALSE)) } .permute_1d <- function(x, order, ...) { if (is.logical(order)) { if(order) stop("No default seritation method for vectors avaialble. Specify the order.") else return(x) } order <- ser_permutation(order) if (length(order) != 1) stop("dimensions do not match!") perm <- get_order(order, 1) if (length(x) != length(perm)) stop("The permutation vectors do not fit the length of x!") x[perm] } # if we used proxy we would say: #.rearrange_dist <- function (x, order) x[[order]] # Note: order can be a subset .rearrange_dist <- function (x, order) { # make C call mode(x) <- "double" # as.dist seems to make Size numeric and not integer! attr(x, "Size") <- as.integer(attr(x, "Size")) mode(order) <- "integer" d <- .Call("reorder_dist", x, order) labels <- if (is.null(labels(x))) NULL else labels(x)[order] structure( d, class = "dist", Size = length(order), Labels = labels, Diag = FALSE, Upper = FALSE, method = attr(x, "method") ) }
# Data for Feb 1, 2007 and Feb 2, 2007 was extracted from # household_power_consumption.txt using two grep commands: # # grep "^1/2/2007" household_power_consumption.txt > 20070201 # grep "^2/2/2007" household_power_consumption.txt > 20070202 # # The header line was extracted from household_power_consumption.txt # by issuing the command "head household_power_consumption.txt" from # the console, cutting the first line from the console output, # and dropping it into its own file with the command # # echo Date;Time;Global_active_power;Global_reactive_power;Voltage;Global_intensity;Su_metering_1;Sub_metering_2;Sub_metering_3 > header # # Finally, the input file for this R script was created by # concatenating all three files with the command: # # cat header 20070201 20070202 > combo # setwd("C:/Coursera/Exploratory Data Analysis/Assignments/ExData_Plotting1") a = read.table("combo", header=T, sep=";") hist(a$Global_active_power,col="red",xlab="Global Active Power (kilowatts)",main="Global Active Power")
/plot1.R
no_license
spamsickle/ExData_Plotting1
R
false
false
1,020
r
# Data for Feb 1, 2007 and Feb 2, 2007 was extracted from # household_power_consumption.txt using two grep commands: # # grep "^1/2/2007" household_power_consumption.txt > 20070201 # grep "^2/2/2007" household_power_consumption.txt > 20070202 # # The header line was extracted from household_power_consumption.txt # by issuing the command "head household_power_consumption.txt" from # the console, cutting the first line from the console output, # and dropping it into its own file with the command # # echo Date;Time;Global_active_power;Global_reactive_power;Voltage;Global_intensity;Su_metering_1;Sub_metering_2;Sub_metering_3 > header # # Finally, the input file for this R script was created by # concatenating all three files with the command: # # cat header 20070201 20070202 > combo # setwd("C:/Coursera/Exploratory Data Analysis/Assignments/ExData_Plotting1") a = read.table("combo", header=T, sep=";") hist(a$Global_active_power,col="red",xlab="Global Active Power (kilowatts)",main="Global Active Power")
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/tclustreg.R \name{tclustreg} \alias{tclustreg} \title{Computes robust linear grouping analysis} \usage{ tclustreg( y, x, k, alphaLik, alphaX, restrfactor = 12, intercept = TRUE, plot = FALSE, nsamp, refsteps = 10, reftol = 1e-13, equalweights = FALSE, mixt = 0, wtrim = 0, we, msg = TRUE, RandNumbForNini, trace = FALSE, ... ) } \arguments{ \item{y}{Response variable. A vector with \code{n} elements that contains the response variable.} \item{x}{An n x p data matrix (n observations and p variables). Rows of x represent observations, and columns represent variables. Missing values (NA's) and infinite values (Inf's) are allowed, since observations (rows) with missing or infinite values will automatically be excluded from the computations.} \item{k}{Number of groups.} \item{alphaLik}{Trimming level, a scalar between 0 and 0.5 or an integer specifying the number of observations which have to be trimmed. If \code{alphaLik=0}, there is no trimming. More in detail, if \code{0 < alphaLik < 1} clustering is based on \code{h = floor(n * (1 - alphaLik))} observations. If \code{alphaLik} is an integer greater than 1 clustering is based on \code{h = n - floor(alphaLik)}. More in detail, likelihood contributions are sorted and the units associated with the smallest \code{n - h} contributions are trimmed.} \item{alphaX}{Second-level trimming or constrained weighted model for \code{x}.} \item{restrfactor}{Restriction factor for regression residuals and covariance matrices of the explanatory variables. Scalar or vector with two elements. If \code{restrfactor} is a scalar it controls the differences among group scatters of the residuals. The value 1 is the strongest restriction. If \code{restrfactor} is a vector with two elements the first element controls the differences among group scatters of the residuals and the second the differences among covariance matrices of the explanatory variables. Note that \code{restrfactor[2]} is used just if \code{alphaX=1}, that is if constrained weighted model for \code{x} is assumed.} \item{intercept}{wheather to use constant term (default is \code{intercept=TRUE}} \item{plot}{If \code{plot=FALSE} (default) or \code{plot=0} no plot is produced. If \code{plot=TRUE} a plot with the final allocation is shown (using the spmplot function). If \code{X} is 2-dimensional, the lines associated to the groups are shown too.} \item{nsamp}{If a scalar, it contains the number of subsamples which will be extracted. If \code{nsamp = 0} all subsets will be extracted. Remark - if the number of all possible subset is greater than 300 the default is to extract all subsets, otherwise just 300. If \code{nsamp} is a matrix it contains in the rows the indexes of the subsets which have to be extracted. \code{nsamp} in this case can be conveniently generated by function \code{subsets()}. \code{nsamp} must have \code{k * p} columns. The first \code{p} columns are used to estimate the regression coefficient of group 1, ..., the last \code{p} columns are used to estimate the regression coefficient of group \code{k}.} \item{refsteps}{Number of refining iterations in each subsample. Default is \code{refsteps=10}. \code{refsteps = 0} means "raw-subsampling" without iterations.} \item{reftol}{Tolerance of the refining steps. The default value is 1e-14} \item{equalweights}{A logical specifying wheather cluster weights in the concentration and assignment steps shall be considered. If \code{equalweights=TRUE} we are (ideally) assuming equally sized groups, else if \code{equalweights = false} (default) we allow for different group weights. Please, check in the given references which functions are maximized in both cases.} \item{mixt}{Specifies whether mixture modelling or crisp assignment approach to model based clustering must be used. In the case of mixture modelling parameter mixt also controls which is the criterion to find the untrimmed units in each step of the maximization. If \code{mixt>=1} mixture modelling is assumed else crisp assignment. The default value is \code{mixt=0}, i.e. crisp assignment. Please see for details the provided references. The parameter \code{mixt} also controls the criterion to select the units to trim. If \code{mixt = 2} the \code{h} units are those which give the largest contribution to the likelihood, else if \code{mixt=1} the criterion to select the \code{h} units is exactly the same as the one which is used in crisp assignment.} \item{wtrim}{How to apply the weights on the observations - a flag taking values in c(0, 1, 2, 3, 4). \itemize{ \item If \code{wtrim==0} (no weights), the algorithm reduces to the standard \code{tclustreg} algorithm. \item If \code{wtrim==1}, trimming is done by weighting the observations using values specified in vector \code{we}. In this case, vector \code{we} must be supplied by the user. \item If \code{wtrim==2}, trimming is again done by weighting the observations using values specified in vector \code{we}. In this case, vector \code{we} is computed from the data as a function of the density estimate pdfe. Specifically, the weight of each observation is the probability of retaining the observation, computed as \deqn{pretain_{ig} = 1-pdfe_{ig}/max_{ig}(pdfe_{ig})} \item If \code{wtrim==3}, trimming is again done by weighting the observations using values specified in vector \code{we}. In this case, each element wei of vector \code{we} is a Bernoulli random variable with probability of success \eqn{pdfe_{ig}}. In the clustering framework this is done under the constraint that no group is empty. \item If \code{wtrim==4}, trimming is done with the tandem approach of Cerioli and Perrotta (2014). }} \item{we}{Weights. A vector of size n-by-1 containing application-specific weights Default is a vector of ones.} \item{msg}{Controls whether to display or not messages on the screen If \code{msg==TRUE} (default) messages are displayed on the screen. If \code{msg=2}, detailed messages are displayed, for example the information at iteration level.} \item{RandNumbForNini}{pre-extracted random numbers to initialize proportions. Matrix of size k-by-nrow(nsamp) containing the random numbers which are used to initialize the proportions of the groups. This option is effective only if \code{nsamp} is a matrix which contains pre-extracted subsamples. The purpose of this option is to enable the user to replicate the results when the function \code{tclustreg()} is called using a parfor instruction (as it happens for example in routine IC, where \code{tclustreg()} is called through a parfor for different values of the restriction factor). The default is that \code{RandNumbForNini} is empty - then uniform random numbers are used.} \item{trace}{Whether to print intermediate results. Default is \code{trace=FALSE}.} \item{...}{potential further arguments passed to lower level functions.} } \value{ An S3 object of class \code{\link{tclustreg.object}} } \description{ Performs robust linear grouping analysis. } \examples{ \dontrun{ ## The X data have been introduced by Gordaliza, Garcia-Escudero & Mayo-Iscar (2013). ## The dataset presents two parallel components without contamination. data(X) y1 = X[, ncol(X)] X1 = X[,-ncol(X), drop=FALSE] (out <- tclustreg(y1, X1, k=2, alphaLik=0.05, alphaX=0.01, restrfactor=5, plot=TRUE, trace=TRUE)) (out <- tclustreg(y1, X1, k=2, alphaLik=0.05, alphaX=0.01, restrfactor=2, mixt=2, plot=TRUE, trace=TRUE)) ## Examples with fishery data data(fishery) X <- fishery ## some jittering is necessary because duplicated units are not treated: ## this needs to be addressed X <- X + 10^(-8) * abs(matrix(rnorm(nrow(X)*ncol(X)), ncol=2)) y1 <- X[, ncol(X)] X1 <- X[, -ncol(X), drop=FALSE] (out <- tclustreg(y1, X1, k=3, restrfact=50, alphaLik=0.04, alphaX=0.01, trace=TRUE)) ## Example 2: ## Define some arbitrary weightssome arbitrary weights for the units we <- sqrt(X1)/sum(sqrt(X1)) ## tclustreg required parameters k <- 2; restrfact <- 50; alpha1 <- 0.04; alpha2 <- 0.01 ## Now tclust is run on each combination of mixt and wtrim options cat("\nmixt=0; wtrim=0", "\nStandard tclustreg, with classification likelihood and without thinning\n") (out <- tclustreg(y1, X1, k=k, restrfact=restrfact, alphaLik=alpha1, alphaX=alpha2, mixt=0, wtrim=0, trace=TRUE)) cat("\nmixt=2; wtrim=0", "\nMixture likelihood, no thinning\n") (out <- tclustreg(y1, X1, k=k, restrfact=restrfact, alphaLik=alpha1, alphaX=alpha2, mixt=2, wtrim=0, trace=TRUE)) cat("\nmixt=0; wtrim=1", "\nClassification likelihood, thinning based on user weights\n") (out <- tclustreg(y1, X1, k=k, restrfact=restrfact, alphaLik=alpha1, alphaX=alpha2, mixt=0, we=we, wtrim=1, trace=TRUE)) cat("\nmixt=2; wtrim=1", "\nMixture likelihood, thinning based on user weights\n") (out <- tclustreg(y1, X1, k=k, restrfact=restrfact, alphaLik=alpha1, alphaX=alpha2, mixt=2, we=we, wtrim=1, trace=TRUE)) cat("\nmixt=0; wtrim=2", "\nClassification likelihood, thinning based on retention probabilities\n") (out <- tclustreg(y1, X1, k=k, restrfact=restrfact, alphaLik=alpha1, alphaX=alpha2, mixt=0, wtrim=2, trace=TRUE)) cat("\nmixt=2; wtrim=2", "\nMixture likelihood, thinning based on retention probabilities\n") (out <- tclustreg(y1, X1, k=k, restrfact=restrfact, alphaLik=alpha1, alphaX=alpha2, mixt=2, wtrim=2, trace=TRUE)) cat("\nmixt=0; wtrim=3", "\nClassification likelihood, thinning based on bernoulli weights\n") (out <- tclustreg(y1, X1, k=k, restrfact=restrfact, alphaLik=alpha1, alphaX=alpha2, mixt=0, wtrim=3, trace=TRUE)) cat("\nmixt=2; wtrim=3", "\nMixture likelihood, thinning based on bernoulli weights\n") (out <- tclustreg(y1, X1, k=k, restrfact=restrfact, alphaLik=alpha1, alphaX=alpha2, mixt=2, wtrim=3, trace=TRUE)) cat("\nmixt=0; wtrim=4", "\nClassification likelihood, tandem thinning based on bernoulli weights\n") (out <- tclustreg(y1, X1, k=k, restrfact=restrfact, alphaLik=alpha1, alphaX=alpha2, mixt=0, wtrim=4, trace=TRUE)) cat("\nmixt=2; wtrim=4", "\nMixture likelihood, tandem thinning based on bernoulli weights\n") (out <- tclustreg(y1, X1, k=k, restrfact=restrfact, alphaLik=alpha1, alphaX=alpha2, mixt=2, wtrim=4, trace=TRUE)) } } \references{ Mayo-Iscar A. (2016). The joint role of trimming and constraints in robust estimation for mixtures of gaussian factor analyzers, Computational Statistics and Data Analysis", Vol. 99, pp. 131-147. Garcia-Escudero, L.A., Gordaliza, A., Greselin, F., Ingrassia, S. and Mayo-Iscar, A. (2017), Robust estimation of mixtures of regressions with random covariates, via trimming and constraints, Statistics and Computing, Vol. 27, pp. 377-402. Garcia-Escudero, L.A., Gordaliza A., Mayo-Iscar A., and San Martin R. (2010). Robust clusterwise linear regression through trimming, Computational Statistics and Data Analysis, Vol. 54, pp.3057-3069. Cerioli, A. and Perrotta, D. (2014). Robust Clustering Around Regression Lines with High Density Regions. Advances in Data Analysis and Classification, Vol. 8, pp. 5-26. Torti F., Perrotta D., Riani, M. and Cerioli A. (2019). Assessing Robust Methodologies for Clustering Linear Regression Data, Advances in Data Analysis and Classification, Vol. 13, pp 227-257. } \author{ FSDA team, \email{valentin.todorov@chello.at} }
/man/tclustreg.Rd
no_license
cran/fsdaR
R
false
true
12,123
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/tclustreg.R \name{tclustreg} \alias{tclustreg} \title{Computes robust linear grouping analysis} \usage{ tclustreg( y, x, k, alphaLik, alphaX, restrfactor = 12, intercept = TRUE, plot = FALSE, nsamp, refsteps = 10, reftol = 1e-13, equalweights = FALSE, mixt = 0, wtrim = 0, we, msg = TRUE, RandNumbForNini, trace = FALSE, ... ) } \arguments{ \item{y}{Response variable. A vector with \code{n} elements that contains the response variable.} \item{x}{An n x p data matrix (n observations and p variables). Rows of x represent observations, and columns represent variables. Missing values (NA's) and infinite values (Inf's) are allowed, since observations (rows) with missing or infinite values will automatically be excluded from the computations.} \item{k}{Number of groups.} \item{alphaLik}{Trimming level, a scalar between 0 and 0.5 or an integer specifying the number of observations which have to be trimmed. If \code{alphaLik=0}, there is no trimming. More in detail, if \code{0 < alphaLik < 1} clustering is based on \code{h = floor(n * (1 - alphaLik))} observations. If \code{alphaLik} is an integer greater than 1 clustering is based on \code{h = n - floor(alphaLik)}. More in detail, likelihood contributions are sorted and the units associated with the smallest \code{n - h} contributions are trimmed.} \item{alphaX}{Second-level trimming or constrained weighted model for \code{x}.} \item{restrfactor}{Restriction factor for regression residuals and covariance matrices of the explanatory variables. Scalar or vector with two elements. If \code{restrfactor} is a scalar it controls the differences among group scatters of the residuals. The value 1 is the strongest restriction. If \code{restrfactor} is a vector with two elements the first element controls the differences among group scatters of the residuals and the second the differences among covariance matrices of the explanatory variables. Note that \code{restrfactor[2]} is used just if \code{alphaX=1}, that is if constrained weighted model for \code{x} is assumed.} \item{intercept}{wheather to use constant term (default is \code{intercept=TRUE}} \item{plot}{If \code{plot=FALSE} (default) or \code{plot=0} no plot is produced. If \code{plot=TRUE} a plot with the final allocation is shown (using the spmplot function). If \code{X} is 2-dimensional, the lines associated to the groups are shown too.} \item{nsamp}{If a scalar, it contains the number of subsamples which will be extracted. If \code{nsamp = 0} all subsets will be extracted. Remark - if the number of all possible subset is greater than 300 the default is to extract all subsets, otherwise just 300. If \code{nsamp} is a matrix it contains in the rows the indexes of the subsets which have to be extracted. \code{nsamp} in this case can be conveniently generated by function \code{subsets()}. \code{nsamp} must have \code{k * p} columns. The first \code{p} columns are used to estimate the regression coefficient of group 1, ..., the last \code{p} columns are used to estimate the regression coefficient of group \code{k}.} \item{refsteps}{Number of refining iterations in each subsample. Default is \code{refsteps=10}. \code{refsteps = 0} means "raw-subsampling" without iterations.} \item{reftol}{Tolerance of the refining steps. The default value is 1e-14} \item{equalweights}{A logical specifying wheather cluster weights in the concentration and assignment steps shall be considered. If \code{equalweights=TRUE} we are (ideally) assuming equally sized groups, else if \code{equalweights = false} (default) we allow for different group weights. Please, check in the given references which functions are maximized in both cases.} \item{mixt}{Specifies whether mixture modelling or crisp assignment approach to model based clustering must be used. In the case of mixture modelling parameter mixt also controls which is the criterion to find the untrimmed units in each step of the maximization. If \code{mixt>=1} mixture modelling is assumed else crisp assignment. The default value is \code{mixt=0}, i.e. crisp assignment. Please see for details the provided references. The parameter \code{mixt} also controls the criterion to select the units to trim. If \code{mixt = 2} the \code{h} units are those which give the largest contribution to the likelihood, else if \code{mixt=1} the criterion to select the \code{h} units is exactly the same as the one which is used in crisp assignment.} \item{wtrim}{How to apply the weights on the observations - a flag taking values in c(0, 1, 2, 3, 4). \itemize{ \item If \code{wtrim==0} (no weights), the algorithm reduces to the standard \code{tclustreg} algorithm. \item If \code{wtrim==1}, trimming is done by weighting the observations using values specified in vector \code{we}. In this case, vector \code{we} must be supplied by the user. \item If \code{wtrim==2}, trimming is again done by weighting the observations using values specified in vector \code{we}. In this case, vector \code{we} is computed from the data as a function of the density estimate pdfe. Specifically, the weight of each observation is the probability of retaining the observation, computed as \deqn{pretain_{ig} = 1-pdfe_{ig}/max_{ig}(pdfe_{ig})} \item If \code{wtrim==3}, trimming is again done by weighting the observations using values specified in vector \code{we}. In this case, each element wei of vector \code{we} is a Bernoulli random variable with probability of success \eqn{pdfe_{ig}}. In the clustering framework this is done under the constraint that no group is empty. \item If \code{wtrim==4}, trimming is done with the tandem approach of Cerioli and Perrotta (2014). }} \item{we}{Weights. A vector of size n-by-1 containing application-specific weights Default is a vector of ones.} \item{msg}{Controls whether to display or not messages on the screen If \code{msg==TRUE} (default) messages are displayed on the screen. If \code{msg=2}, detailed messages are displayed, for example the information at iteration level.} \item{RandNumbForNini}{pre-extracted random numbers to initialize proportions. Matrix of size k-by-nrow(nsamp) containing the random numbers which are used to initialize the proportions of the groups. This option is effective only if \code{nsamp} is a matrix which contains pre-extracted subsamples. The purpose of this option is to enable the user to replicate the results when the function \code{tclustreg()} is called using a parfor instruction (as it happens for example in routine IC, where \code{tclustreg()} is called through a parfor for different values of the restriction factor). The default is that \code{RandNumbForNini} is empty - then uniform random numbers are used.} \item{trace}{Whether to print intermediate results. Default is \code{trace=FALSE}.} \item{...}{potential further arguments passed to lower level functions.} } \value{ An S3 object of class \code{\link{tclustreg.object}} } \description{ Performs robust linear grouping analysis. } \examples{ \dontrun{ ## The X data have been introduced by Gordaliza, Garcia-Escudero & Mayo-Iscar (2013). ## The dataset presents two parallel components without contamination. data(X) y1 = X[, ncol(X)] X1 = X[,-ncol(X), drop=FALSE] (out <- tclustreg(y1, X1, k=2, alphaLik=0.05, alphaX=0.01, restrfactor=5, plot=TRUE, trace=TRUE)) (out <- tclustreg(y1, X1, k=2, alphaLik=0.05, alphaX=0.01, restrfactor=2, mixt=2, plot=TRUE, trace=TRUE)) ## Examples with fishery data data(fishery) X <- fishery ## some jittering is necessary because duplicated units are not treated: ## this needs to be addressed X <- X + 10^(-8) * abs(matrix(rnorm(nrow(X)*ncol(X)), ncol=2)) y1 <- X[, ncol(X)] X1 <- X[, -ncol(X), drop=FALSE] (out <- tclustreg(y1, X1, k=3, restrfact=50, alphaLik=0.04, alphaX=0.01, trace=TRUE)) ## Example 2: ## Define some arbitrary weightssome arbitrary weights for the units we <- sqrt(X1)/sum(sqrt(X1)) ## tclustreg required parameters k <- 2; restrfact <- 50; alpha1 <- 0.04; alpha2 <- 0.01 ## Now tclust is run on each combination of mixt and wtrim options cat("\nmixt=0; wtrim=0", "\nStandard tclustreg, with classification likelihood and without thinning\n") (out <- tclustreg(y1, X1, k=k, restrfact=restrfact, alphaLik=alpha1, alphaX=alpha2, mixt=0, wtrim=0, trace=TRUE)) cat("\nmixt=2; wtrim=0", "\nMixture likelihood, no thinning\n") (out <- tclustreg(y1, X1, k=k, restrfact=restrfact, alphaLik=alpha1, alphaX=alpha2, mixt=2, wtrim=0, trace=TRUE)) cat("\nmixt=0; wtrim=1", "\nClassification likelihood, thinning based on user weights\n") (out <- tclustreg(y1, X1, k=k, restrfact=restrfact, alphaLik=alpha1, alphaX=alpha2, mixt=0, we=we, wtrim=1, trace=TRUE)) cat("\nmixt=2; wtrim=1", "\nMixture likelihood, thinning based on user weights\n") (out <- tclustreg(y1, X1, k=k, restrfact=restrfact, alphaLik=alpha1, alphaX=alpha2, mixt=2, we=we, wtrim=1, trace=TRUE)) cat("\nmixt=0; wtrim=2", "\nClassification likelihood, thinning based on retention probabilities\n") (out <- tclustreg(y1, X1, k=k, restrfact=restrfact, alphaLik=alpha1, alphaX=alpha2, mixt=0, wtrim=2, trace=TRUE)) cat("\nmixt=2; wtrim=2", "\nMixture likelihood, thinning based on retention probabilities\n") (out <- tclustreg(y1, X1, k=k, restrfact=restrfact, alphaLik=alpha1, alphaX=alpha2, mixt=2, wtrim=2, trace=TRUE)) cat("\nmixt=0; wtrim=3", "\nClassification likelihood, thinning based on bernoulli weights\n") (out <- tclustreg(y1, X1, k=k, restrfact=restrfact, alphaLik=alpha1, alphaX=alpha2, mixt=0, wtrim=3, trace=TRUE)) cat("\nmixt=2; wtrim=3", "\nMixture likelihood, thinning based on bernoulli weights\n") (out <- tclustreg(y1, X1, k=k, restrfact=restrfact, alphaLik=alpha1, alphaX=alpha2, mixt=2, wtrim=3, trace=TRUE)) cat("\nmixt=0; wtrim=4", "\nClassification likelihood, tandem thinning based on bernoulli weights\n") (out <- tclustreg(y1, X1, k=k, restrfact=restrfact, alphaLik=alpha1, alphaX=alpha2, mixt=0, wtrim=4, trace=TRUE)) cat("\nmixt=2; wtrim=4", "\nMixture likelihood, tandem thinning based on bernoulli weights\n") (out <- tclustreg(y1, X1, k=k, restrfact=restrfact, alphaLik=alpha1, alphaX=alpha2, mixt=2, wtrim=4, trace=TRUE)) } } \references{ Mayo-Iscar A. (2016). The joint role of trimming and constraints in robust estimation for mixtures of gaussian factor analyzers, Computational Statistics and Data Analysis", Vol. 99, pp. 131-147. Garcia-Escudero, L.A., Gordaliza, A., Greselin, F., Ingrassia, S. and Mayo-Iscar, A. (2017), Robust estimation of mixtures of regressions with random covariates, via trimming and constraints, Statistics and Computing, Vol. 27, pp. 377-402. Garcia-Escudero, L.A., Gordaliza A., Mayo-Iscar A., and San Martin R. (2010). Robust clusterwise linear regression through trimming, Computational Statistics and Data Analysis, Vol. 54, pp.3057-3069. Cerioli, A. and Perrotta, D. (2014). Robust Clustering Around Regression Lines with High Density Regions. Advances in Data Analysis and Classification, Vol. 8, pp. 5-26. Torti F., Perrotta D., Riani, M. and Cerioli A. (2019). Assessing Robust Methodologies for Clustering Linear Regression Data, Advances in Data Analysis and Classification, Vol. 13, pp 227-257. } \author{ FSDA team, \email{valentin.todorov@chello.at} }
library(tidyverse) library(gggenes) library(data.table) library(RColorBrewer) target_cluster = "aminocoumarin" clusters = list.files(path = sprintf("data/antismash/%s//",target_cluster), pattern = ".gff.1", recursive = T, full.names = T) clusters = lapply(clusters, rtracklayer::readGFF) clusters = bind_rows(clusters, .id = "id") clusters = clusters %>% mutate(seqid = case_when(seqid == "c00001_tig0000.." ~ "B06-3", TRUE ~ as.character(seqid))) clusters = clusters %>% mutate(molecule = case_when(seqid == "BGC0001287" ~ "chaxamycin", seqid == "BGC0000833" ~ "coumermycinA1", seqid == "BGC0000832" ~ "clorobiocin", seqid == "BGC0000141" ~ "rubradirin", seqid == "AF170880" ~ "novobiocin", seqid == "B06-3" ~ "B06-3")) clusters = clusters %>% filter(!(seqid == "B06-3" & start > 100244 )) clusters$gene_functions = clusters$gene_functions %>% str_extract("(.*):(.*)\\(") %>% str_remove_all(pattern = "(.*)smcogs\\) |\\(") #get high frequency gene function freq_g_func = clusters %>% group_by(gene_functions) %>% count(gene_functions) %>% filter(n>3) clusters = clusters %>% mutate(g_func_simple = case_when( gene_functions %in% freq_g_func$gene_functions ~ as.character(gene_functions))) g_func_colors = colorRampPalette(brewer.pal(12, "Set3"))(nrow(freq_g_func)) # clusters$g_func_simple = clusters$gene_functions %>% str_extract(pattern = '\\)(.*)') %>% str_remove_all(pattern = 'SMC\\w+:|\\) |\\((.*)') # clusters = clusters %>% mutate(g_func_simple = case_when( # g_func_simple %like% "regulator" ~ "regulator", # g_func_simple %like% "other" ~ "other", # g_func_simple %like% "transporter" ~ "transporter", # g_func_simple %like% "dehydrogenase" ~ "dehydrogenase", # g_func_simple %like% "p450|P450" ~ "p450", # g_func_simple %like% "tra_KS|hyb_KS|PKS_AT" ~ "PKS", # g_func_simple %like% "halogenase" ~ "halogenase", # g_func_simple %like% "methyltransferase" ~ "methyltransferase", # g_func_simple %like% "DegT/DnrJ/EryC1/StrS aminotransferase" ~ "aminotransferase", # g_func_simple %like% "malonyl CoA-acyl carrier protein transacylase" ~ "malonyl CoA-ACP transacylase", # g_func_simple %like% "Beta-ketoacyl synthase" ~ "Beta-ketoacyl synthase", # TRUE ~ "other" # )) # clusters = clusters %>% mutate(g_func_simple = na_if(g_func_simple,"other")) p = ggplot(clusters %>% filter(type...3 == "CDS")) + geom_gene_arrow(aes(xmin=start, xmax = end, y = seqid, fill = g_func_simple), size = 0.6) + theme_genes() + theme(legend.position = "bottom") + # scale_x_discrete(expand = c(0,0), # breaks = scales::trans_breaks(identity, identity, n = 5)) + scale_fill_discrete(type = g_func_colors,na.value = "dimgrey") + guides(fill = guide_legend(ncol=3, title = "Gene function", title.position = "top", title.hjust = 0.5, title.theme = element_text(face = "bold"))) ##add AHBA synthesis genes aminocoumarin_regions = clusters %>% filter(product == "aminocoumarin" & type...3 == "region" & seqid == "B06-3") %>% select(seqid,start,end,id,product) aminocoumarin_regions$molecule = factor("B06-3",levels = sort(unique(clusters$molecule))) aminocoumarin_regions$id = as.numeric(aminocoumarin_regions$id) p = p + geom_segment(data = aminocoumarin_regions, aes(y=id+0.2, x = start, xend = end, yend = id+0.2),color = "darkgreen", size = 0.7) + geom_text(data = aminocoumarin_regions, aes(y=id+0.4, x = start+end/2, label = "AHBA biosynthesis genes"), color = "darkgreen") #AHBA domain CAL_AHBA = clusters %>% filter(specificity == "Minowa=AHBA") %>% select(seqid,start,end,locus_tag,aSDomain,specificity,id) CAL_AHBA_cds = clusters %>% filter((locus_tag %in% as.character(CAL_AHBA$locus_tag) | (gene %in% as.character(CAL_AHBA$locus_tag))) & type...3 == "CDS") %>% select(seqid,gstart=start,gend=end,id) CAL_AHBA = merge(CAL_AHBA,CAL_AHBA_cds) %>% unique() CAL_AHBA$id = as.numeric(CAL_AHBA$id) CAL_AHBA = CAL_AHBA %>% mutate(aSDomain = "CAL domain\nAHBA specific") p = p + geom_subgene_arrow(data = CAL_AHBA, aes(xmin = gstart, xmax = gend, xsubmin = start, xsubmax = end, y = id),fill = "darkgreen") + ggrepel::geom_text_repel(data = CAL_AHBA, aes(label = " ", x = (start+end)/2, y =id +0.2), color = "darkgreen", fontface = "bold", size = 3.5, min.segment.length = 0) + geom_text(data = CAL_AHBA, aes(label = aSDomain, x = (start+end)/2, y =id +0.5), color = "darkgreen") p = p + scale_y_discrete(labels = unique(clusters$molecule), expand = c(0.1,0.3))+ theme(axis.text.y = element_text(size = 14,face="bold", color = "black"), panel.grid.major.y = element_line(color = "black"), legend.background = element_rect(color = "black")) + labs(x="Scale (bp)", y = NULL) ggsave(p, filename = "figures/aminocoumarin_cluster.png", width = 15, height = 6, dpi = 300)
/src/plot_cluster_aminocoumarin.R
no_license
rajwanir/flongle_actinomycetes_paper
R
false
false
5,367
r
library(tidyverse) library(gggenes) library(data.table) library(RColorBrewer) target_cluster = "aminocoumarin" clusters = list.files(path = sprintf("data/antismash/%s//",target_cluster), pattern = ".gff.1", recursive = T, full.names = T) clusters = lapply(clusters, rtracklayer::readGFF) clusters = bind_rows(clusters, .id = "id") clusters = clusters %>% mutate(seqid = case_when(seqid == "c00001_tig0000.." ~ "B06-3", TRUE ~ as.character(seqid))) clusters = clusters %>% mutate(molecule = case_when(seqid == "BGC0001287" ~ "chaxamycin", seqid == "BGC0000833" ~ "coumermycinA1", seqid == "BGC0000832" ~ "clorobiocin", seqid == "BGC0000141" ~ "rubradirin", seqid == "AF170880" ~ "novobiocin", seqid == "B06-3" ~ "B06-3")) clusters = clusters %>% filter(!(seqid == "B06-3" & start > 100244 )) clusters$gene_functions = clusters$gene_functions %>% str_extract("(.*):(.*)\\(") %>% str_remove_all(pattern = "(.*)smcogs\\) |\\(") #get high frequency gene function freq_g_func = clusters %>% group_by(gene_functions) %>% count(gene_functions) %>% filter(n>3) clusters = clusters %>% mutate(g_func_simple = case_when( gene_functions %in% freq_g_func$gene_functions ~ as.character(gene_functions))) g_func_colors = colorRampPalette(brewer.pal(12, "Set3"))(nrow(freq_g_func)) # clusters$g_func_simple = clusters$gene_functions %>% str_extract(pattern = '\\)(.*)') %>% str_remove_all(pattern = 'SMC\\w+:|\\) |\\((.*)') # clusters = clusters %>% mutate(g_func_simple = case_when( # g_func_simple %like% "regulator" ~ "regulator", # g_func_simple %like% "other" ~ "other", # g_func_simple %like% "transporter" ~ "transporter", # g_func_simple %like% "dehydrogenase" ~ "dehydrogenase", # g_func_simple %like% "p450|P450" ~ "p450", # g_func_simple %like% "tra_KS|hyb_KS|PKS_AT" ~ "PKS", # g_func_simple %like% "halogenase" ~ "halogenase", # g_func_simple %like% "methyltransferase" ~ "methyltransferase", # g_func_simple %like% "DegT/DnrJ/EryC1/StrS aminotransferase" ~ "aminotransferase", # g_func_simple %like% "malonyl CoA-acyl carrier protein transacylase" ~ "malonyl CoA-ACP transacylase", # g_func_simple %like% "Beta-ketoacyl synthase" ~ "Beta-ketoacyl synthase", # TRUE ~ "other" # )) # clusters = clusters %>% mutate(g_func_simple = na_if(g_func_simple,"other")) p = ggplot(clusters %>% filter(type...3 == "CDS")) + geom_gene_arrow(aes(xmin=start, xmax = end, y = seqid, fill = g_func_simple), size = 0.6) + theme_genes() + theme(legend.position = "bottom") + # scale_x_discrete(expand = c(0,0), # breaks = scales::trans_breaks(identity, identity, n = 5)) + scale_fill_discrete(type = g_func_colors,na.value = "dimgrey") + guides(fill = guide_legend(ncol=3, title = "Gene function", title.position = "top", title.hjust = 0.5, title.theme = element_text(face = "bold"))) ##add AHBA synthesis genes aminocoumarin_regions = clusters %>% filter(product == "aminocoumarin" & type...3 == "region" & seqid == "B06-3") %>% select(seqid,start,end,id,product) aminocoumarin_regions$molecule = factor("B06-3",levels = sort(unique(clusters$molecule))) aminocoumarin_regions$id = as.numeric(aminocoumarin_regions$id) p = p + geom_segment(data = aminocoumarin_regions, aes(y=id+0.2, x = start, xend = end, yend = id+0.2),color = "darkgreen", size = 0.7) + geom_text(data = aminocoumarin_regions, aes(y=id+0.4, x = start+end/2, label = "AHBA biosynthesis genes"), color = "darkgreen") #AHBA domain CAL_AHBA = clusters %>% filter(specificity == "Minowa=AHBA") %>% select(seqid,start,end,locus_tag,aSDomain,specificity,id) CAL_AHBA_cds = clusters %>% filter((locus_tag %in% as.character(CAL_AHBA$locus_tag) | (gene %in% as.character(CAL_AHBA$locus_tag))) & type...3 == "CDS") %>% select(seqid,gstart=start,gend=end,id) CAL_AHBA = merge(CAL_AHBA,CAL_AHBA_cds) %>% unique() CAL_AHBA$id = as.numeric(CAL_AHBA$id) CAL_AHBA = CAL_AHBA %>% mutate(aSDomain = "CAL domain\nAHBA specific") p = p + geom_subgene_arrow(data = CAL_AHBA, aes(xmin = gstart, xmax = gend, xsubmin = start, xsubmax = end, y = id),fill = "darkgreen") + ggrepel::geom_text_repel(data = CAL_AHBA, aes(label = " ", x = (start+end)/2, y =id +0.2), color = "darkgreen", fontface = "bold", size = 3.5, min.segment.length = 0) + geom_text(data = CAL_AHBA, aes(label = aSDomain, x = (start+end)/2, y =id +0.5), color = "darkgreen") p = p + scale_y_discrete(labels = unique(clusters$molecule), expand = c(0.1,0.3))+ theme(axis.text.y = element_text(size = 14,face="bold", color = "black"), panel.grid.major.y = element_line(color = "black"), legend.background = element_rect(color = "black")) + labs(x="Scale (bp)", y = NULL) ggsave(p, filename = "figures/aminocoumarin_cluster.png", width = 15, height = 6, dpi = 300)
library(e1071) dataset <- datasetbyday rmse <- function(error) { sqrt(mean(error^2)) } endmethod <- function() { print("---------------------") } k_cross_valid_svm_setup <- 1 print_mse <- FALSE ren_dataset <- dataset #dataset$DiaHora <- (dataset$Dia * 24) + dataset$Hora names(dataset) <- c("Dia", "Global", "Sub1", "Sub2", "Sub3", "SubG") dataset$X <- dataset$Dia plot_X_axis <- "Day" dataset_training <- dataset[1:1071,] dataset_test <- dataset[1072:1251,] dataset_training <- dataset[1:1251,] dataset_test <- dataset[1252:1431,] dataset_training$X <- dataset_training$Dia dataset_test$X <- dataset_test$Dia dataset_training$Y <- dataset_training$SubG dataset_test$Y <- dataset_test$SubG dataset$Y <- dataset$SubG plot_Y_axis <- "Global - SubMeterings" svm_formula <- SubG ~ Dia svm_cost <- 1.0 svm_nu <- 0.4 svm_kernel <- "radial" svm_gamma <- 4 svm_title_plot <- "SubGlobal Regression" source('regression/SVR/nu_radial.R') r_SubG <- r_e1071_svr_nu_rad dataset_training$Y <- dataset_training$Sub3 dataset_test$Y <- dataset_test$Sub3 dataset$Y <- dataset$Sub3 plot_Y_axis <- "SubMetering 3" svm_formula <- Sub3 ~ Dia svm_cost <- 1.0 svm_nu <- 0.5 svm_kernel <- "radial" svm_gamma <- 4 svm_title_plot <- "Sub3 Regression" source('regression/SVR/nu_radial.R') r_Sub3 <- r_e1071_svr_nu_rad dataset_training$Y <- dataset_training$Sub2 dataset_test$Y <- dataset_test$Sub2 dataset$Y <- dataset$Sub2 plot_Y_axis <- "SubMetering 2" svm_formula <- Sub2 ~ Dia svm_cost <- 3.0 svm_nu <- 0.7 svm_kernel <- "radial" svm_gamma <- 3 svm_title_plot <- "Sub2 Regression" source('regression/SVR/nu_radial.R') r_Sub2 <- r_e1071_svr_nu_rad #source("regression/linear.R") #r_Sub2 <- r_linear dataset_training$Y <- dataset_training$Sub1 dataset_test$Y <- dataset_test$Sub1 dataset$Y <- dataset$Sub1 plot_Y_axis <- "SubMetering 1" svm_formula <- Sub1 ~ Dia svm_cost <- 1.0 svm_nu <- 0.5 svm_kernel <- "radial" svm_gamma <- 3 svm_title_plot <- "Sub1 Regression" source('regression/SVR/nu_radial.R') r_Sub1 <- r_e1071_svr_nu_rad #source("regression/linear.R") #r_Sub1 <- r_linear print("-> nu_ALL") datasetpredtrain <- dataset_training datasetpredtrain$Sub1 <- predict(r_Sub1, dataset_training) datasetpredtrain$Sub2 <- predict(r_Sub2, dataset_training) datasetpredtrain$Sub3 <- predict(r_Sub3, dataset_training) datasetpredtrain$SubG <- predict(r_SubG, dataset_training) predictedY <- c(1:nrow(datasetpredtrain)) vsize <- nrow(datasetpredtrain) for (i in 1:vsize) { predictedY[i] <- (datasetpredtrain$Sub1[i] + datasetpredtrain$Sub2[i] + datasetpredtrain$Sub3[i] + datasetpredtrain$SubG[i]) * (60/1000) } residual_error <- dataset_training$Global - predictedY print(rmse(residual_error)) datasetpredtest <- dataset_test datasetpredtest$Sub1 <- predict(r_Sub1, dataset_test) datasetpredtest$Sub2 <- predict(r_Sub2, dataset_test) datasetpredtest$Sub3 <- predict(r_Sub3, dataset_test) datasetpredtest$SubG <- predict(r_SubG, dataset_test) predictedY <- c(1:nrow(datasetpredtest)) vsize <- nrow(datasetpredtest) for (i in 1:vsize) { predictedY[i] <- (datasetpredtest$Sub1[i] + datasetpredtest$Sub2[i] + datasetpredtest$Sub3[i] + datasetpredtest$SubG[i]) * (60/1000) } residual_error <- dataset_test$Global - predictedY print(rmse(residual_error)) datasetpred <- dataset datasetpred$Sub1 <- predict(r_Sub1, dataset) datasetpred$Sub2 <- predict(r_Sub2, dataset) datasetpred$Sub3 <- predict(r_Sub3, dataset) datasetpred$SubG <- predict(r_SubG, dataset) plot_Y_axis <- "Global Active Power" plot(x <- dataset_training$Dia, y <- dataset_training$Global, pch='*', xlim = c(0,nrow(dataset)), xlab=plot_X_axis, ylab=plot_Y_axis) title(main="SVM with sub-meterings") lines(dataset$Dia, predict(r_e1071_svr_all, datasetpred), col = "red", pch=4) dataset <- ren_dataset # Cleaning environment rm(dataset_training) rm(dataset_test) rm(ren_dataset) rm(k_cross_valid_svm_setup) rm(print_mse) rm(svm_formula) rm(rmse) rm(endmethod) rm(plot_X_axis) rm(plot_Y_axis) rm(svm_cost) rm(svm_nu) rm(svm_kernel) rm(svm_gamma)
/regressional.R
no_license
lquatrin/ihepcds-MLRegression
R
false
false
4,067
r
library(e1071) dataset <- datasetbyday rmse <- function(error) { sqrt(mean(error^2)) } endmethod <- function() { print("---------------------") } k_cross_valid_svm_setup <- 1 print_mse <- FALSE ren_dataset <- dataset #dataset$DiaHora <- (dataset$Dia * 24) + dataset$Hora names(dataset) <- c("Dia", "Global", "Sub1", "Sub2", "Sub3", "SubG") dataset$X <- dataset$Dia plot_X_axis <- "Day" dataset_training <- dataset[1:1071,] dataset_test <- dataset[1072:1251,] dataset_training <- dataset[1:1251,] dataset_test <- dataset[1252:1431,] dataset_training$X <- dataset_training$Dia dataset_test$X <- dataset_test$Dia dataset_training$Y <- dataset_training$SubG dataset_test$Y <- dataset_test$SubG dataset$Y <- dataset$SubG plot_Y_axis <- "Global - SubMeterings" svm_formula <- SubG ~ Dia svm_cost <- 1.0 svm_nu <- 0.4 svm_kernel <- "radial" svm_gamma <- 4 svm_title_plot <- "SubGlobal Regression" source('regression/SVR/nu_radial.R') r_SubG <- r_e1071_svr_nu_rad dataset_training$Y <- dataset_training$Sub3 dataset_test$Y <- dataset_test$Sub3 dataset$Y <- dataset$Sub3 plot_Y_axis <- "SubMetering 3" svm_formula <- Sub3 ~ Dia svm_cost <- 1.0 svm_nu <- 0.5 svm_kernel <- "radial" svm_gamma <- 4 svm_title_plot <- "Sub3 Regression" source('regression/SVR/nu_radial.R') r_Sub3 <- r_e1071_svr_nu_rad dataset_training$Y <- dataset_training$Sub2 dataset_test$Y <- dataset_test$Sub2 dataset$Y <- dataset$Sub2 plot_Y_axis <- "SubMetering 2" svm_formula <- Sub2 ~ Dia svm_cost <- 3.0 svm_nu <- 0.7 svm_kernel <- "radial" svm_gamma <- 3 svm_title_plot <- "Sub2 Regression" source('regression/SVR/nu_radial.R') r_Sub2 <- r_e1071_svr_nu_rad #source("regression/linear.R") #r_Sub2 <- r_linear dataset_training$Y <- dataset_training$Sub1 dataset_test$Y <- dataset_test$Sub1 dataset$Y <- dataset$Sub1 plot_Y_axis <- "SubMetering 1" svm_formula <- Sub1 ~ Dia svm_cost <- 1.0 svm_nu <- 0.5 svm_kernel <- "radial" svm_gamma <- 3 svm_title_plot <- "Sub1 Regression" source('regression/SVR/nu_radial.R') r_Sub1 <- r_e1071_svr_nu_rad #source("regression/linear.R") #r_Sub1 <- r_linear print("-> nu_ALL") datasetpredtrain <- dataset_training datasetpredtrain$Sub1 <- predict(r_Sub1, dataset_training) datasetpredtrain$Sub2 <- predict(r_Sub2, dataset_training) datasetpredtrain$Sub3 <- predict(r_Sub3, dataset_training) datasetpredtrain$SubG <- predict(r_SubG, dataset_training) predictedY <- c(1:nrow(datasetpredtrain)) vsize <- nrow(datasetpredtrain) for (i in 1:vsize) { predictedY[i] <- (datasetpredtrain$Sub1[i] + datasetpredtrain$Sub2[i] + datasetpredtrain$Sub3[i] + datasetpredtrain$SubG[i]) * (60/1000) } residual_error <- dataset_training$Global - predictedY print(rmse(residual_error)) datasetpredtest <- dataset_test datasetpredtest$Sub1 <- predict(r_Sub1, dataset_test) datasetpredtest$Sub2 <- predict(r_Sub2, dataset_test) datasetpredtest$Sub3 <- predict(r_Sub3, dataset_test) datasetpredtest$SubG <- predict(r_SubG, dataset_test) predictedY <- c(1:nrow(datasetpredtest)) vsize <- nrow(datasetpredtest) for (i in 1:vsize) { predictedY[i] <- (datasetpredtest$Sub1[i] + datasetpredtest$Sub2[i] + datasetpredtest$Sub3[i] + datasetpredtest$SubG[i]) * (60/1000) } residual_error <- dataset_test$Global - predictedY print(rmse(residual_error)) datasetpred <- dataset datasetpred$Sub1 <- predict(r_Sub1, dataset) datasetpred$Sub2 <- predict(r_Sub2, dataset) datasetpred$Sub3 <- predict(r_Sub3, dataset) datasetpred$SubG <- predict(r_SubG, dataset) plot_Y_axis <- "Global Active Power" plot(x <- dataset_training$Dia, y <- dataset_training$Global, pch='*', xlim = c(0,nrow(dataset)), xlab=plot_X_axis, ylab=plot_Y_axis) title(main="SVM with sub-meterings") lines(dataset$Dia, predict(r_e1071_svr_all, datasetpred), col = "red", pch=4) dataset <- ren_dataset # Cleaning environment rm(dataset_training) rm(dataset_test) rm(ren_dataset) rm(k_cross_valid_svm_setup) rm(print_mse) rm(svm_formula) rm(rmse) rm(endmethod) rm(plot_X_axis) rm(plot_Y_axis) rm(svm_cost) rm(svm_nu) rm(svm_kernel) rm(svm_gamma)
############################################################## ## USE LCA TO GROUP STUDENTS BASED ON THE PATHS THEY FOLLOWED ## THROUGHOUT THE COURSE (WEEKS 2-13) ############################################################## clusters.w213 <- read.csv(file = "Intermediate_files/original_weekly_clusters_w2_to_w13_(feb2016).csv") f.fullcourse <- cbind(cl.w2, cl.w3, cl.w4, cl.w5, cl.w6, cl.w7, cl.w8, cl.w9, cl.w10, cl.w11, cl.w12, cl.w13) ~ 1 eval.metrics <- data.frame() require(poLCA) set.seed(3003) for(i in 3:7) { lc <- poLCA(f.fullcourse, data = clusters.w213, nrep = 50, nclass = i, verbose = F, na.rm = F) metrics <- c(i, lc$aic, lc$bic, lc$llik, lc$Chisq) eval.metrics <- as.data.frame( rbind(eval.metrics, metrics) ) } colnames(eval.metrics) <- c('nclass', 'AIC', 'BIC', 'LogLike', 'ChiSquare') require(knitr) kable(x = eval.metrics, format = "rst") require(poLCA) set.seed(3003) ## examine the model with 5 classes # lc5.fullcourse <- poLCA(f.fullcourse, data = clusters.w213, nrep = 50, # nclass = 5, graphs = T, na.rm = F) # # order the latent classes based on their size ($P gives the size each latent class) # probs.start <- poLCA.reorder(lc5.fullcourse$probs.start, order(lc5.fullcourse$P,decreasing=T)) # lc5.fullcourse <- poLCA(f.fullcourse, data = clusters.w213, graphs = T, # nclass = 5, probs.start = probs.start, na.rm = F) ## examine the model with 4 classes # lc4.fullcourse <- poLCA(f.fullcourse, data = clusters.w213, nrep = 50, # nclass = 4, graphs = T, na.rm = F) # probs.start <- poLCA.reorder(lc4.fullcourse$probs.start, order(lc4.fullcourse$P,decreasing=T)) # lc4.fullcourse <- poLCA(f.fullcourse, data = clusters.w213, graphs = T, # nclass = 4, probs.start = probs.start, na.rm = F) ## examine the model with 6 classes lc6.fullcourse <- poLCA(f.fullcourse, data = clusters.w213, nrep = 50, nclass = 6, na.rm = F) probs.start <- poLCA.reorder(lc6.fullcourse$probs.start, order(lc6.fullcourse$P,decreasing=T)) lc6.fullcourse <- poLCA(f.fullcourse, data = clusters.w213, graphs = T, nclass = 6, probs.start = probs.start, na.rm = F) ## add the predicted class to each observation clusters.w213.pred <- clusters.w213 # clusters.w213.pred$lca5 <- as.factor(lc5.fullcourse$predclass) # clusters.w213.pred$lca4 <- as.factor(lc4.fullcourse$predclass) clusters.w213.pred$lca6 <- as.factor(lc6.fullcourse$predclass) str(clusters.w213.pred) ## write the features + the LCA class values to a file write.csv(x = clusters.w213.pred, file = "results/lca_w2_to_w13_6classes(April2016).csv", row.names = F, quote = F) ########################################################### ## TRACE STUDENTS' STRATEGIES (WEEKLY CLUSTERS) THROUGHT ## ## THE OBTAINED LCA-BASED TRAJECTORIES ## ########################################################### weekly.strategies <- read.csv(file = "results/lca_w2_to_w13_6classes(April2016).csv") str(weekly.strategies) weekly.strategies$cl.w2 <- factor(weekly.strategies$cl.w2, levels = c(1:5), labels = c('A','D','E','B1','C1')) weekly.strategies$cl.w3 <- factor(weekly.strategies$cl.w3, levels = c(1:4), labels = c('A','E','D','B2')) weekly.strategies$cl.w4 <- factor(weekly.strategies$cl.w4, levels = c(1:5), labels = c('E','C2','B1','A','B2')) weekly.strategies$cl.w5 <- factor(weekly.strategies$cl.w5, levels = c(1:4), labels = c('D','B1','E','A')) weekly.strategies$cl.w6 <- factor(weekly.strategies$cl.w6, levels = c(1:5), labels = c('C2','C1','E','D','A')) weekly.strategies$cl.w7 <- factor(weekly.strategies$cl.w7, levels = c(1:4), labels = c('E','B1','D','C1')) weekly.strategies$cl.w8 <- factor(weekly.strategies$cl.w8, levels = c(1:5), labels = c('B1','E','B2','A','D')) weekly.strategies$cl.w9 <- factor(weekly.strategies$cl.w9, levels = c(1:4), labels = c('D','A','E','B1')) weekly.strategies$cl.w10 <- factor(weekly.strategies$cl.w10, levels = c(1:5), labels = c('B1','A','D','B2','E')) weekly.strategies$cl.w11 <- factor(weekly.strategies$cl.w11, levels = c(1:4), labels = c('D','B1','F','E')) weekly.strategies$cl.w12 <- factor(weekly.strategies$cl.w12, levels = c(1:4), labels = c('E','F','B1','B2')) weekly.strategies$cl.w13 <- factor(weekly.strategies$cl.w13, levels = c(1:5), labels = c('A','E','D','C1','C2')) weekly.strategies$lca6 <- factor(weekly.strategies$lca6) str(weekly.strategies) ## select students who had strategy F in weeks 11 and 12 stud.F.w11 <- weekly.strategies$user_id[is.na(weekly.strategies$cl.w11) == F & weekly.strategies$cl.w11 == 'F'] stud.F.w12 <- weekly.strategies$user_id[is.na(weekly.strategies$cl.w12) == F & weekly.strategies$cl.w12 == 'F'] ## check how many students had strategy F in both weeks (11 and 12) length(intersect(stud.F.w11, stud.F.w12)) ## 24 students ## find the distribution of strategies in weeks 10 and 13 for those students ## who had strategy F in week 11 strat.w9to13.studF.in.w11 <- subset(weekly.strategies, user_id %in% stud.F.w11, select = c(cl.w9, cl.w10, cl.w12, cl.w13)) summary(strat.w9to13.studF.in.w11) ## find the distribution of strategies in weeks 10 and 13 for those students ## who had strategy F in week 12 strat.w9to13.studF.in.w12 <- subset(weekly.strategies, user_id %in% stud.F.w12, select = c(cl.w9, cl.w10, cl.w11, cl.w13)) summary(strat.w9to13.studF.in.w12) ## distribution of trajectories for students who had strategy F in week 11 table(subset(weekly.strategies, user_id %in% stud.F.w11, select = lca6)) ## distribution of trajectories for students who had strategy F in week 11 summary(subset(weekly.strategies, user_id %in% stud.F.w12, select = lca6))
/USYD_14_d2u/LCA_with_weekly_clusters.R
no_license
abelardopardo/Flipped_course_analysis
R
false
false
6,305
r
############################################################## ## USE LCA TO GROUP STUDENTS BASED ON THE PATHS THEY FOLLOWED ## THROUGHOUT THE COURSE (WEEKS 2-13) ############################################################## clusters.w213 <- read.csv(file = "Intermediate_files/original_weekly_clusters_w2_to_w13_(feb2016).csv") f.fullcourse <- cbind(cl.w2, cl.w3, cl.w4, cl.w5, cl.w6, cl.w7, cl.w8, cl.w9, cl.w10, cl.w11, cl.w12, cl.w13) ~ 1 eval.metrics <- data.frame() require(poLCA) set.seed(3003) for(i in 3:7) { lc <- poLCA(f.fullcourse, data = clusters.w213, nrep = 50, nclass = i, verbose = F, na.rm = F) metrics <- c(i, lc$aic, lc$bic, lc$llik, lc$Chisq) eval.metrics <- as.data.frame( rbind(eval.metrics, metrics) ) } colnames(eval.metrics) <- c('nclass', 'AIC', 'BIC', 'LogLike', 'ChiSquare') require(knitr) kable(x = eval.metrics, format = "rst") require(poLCA) set.seed(3003) ## examine the model with 5 classes # lc5.fullcourse <- poLCA(f.fullcourse, data = clusters.w213, nrep = 50, # nclass = 5, graphs = T, na.rm = F) # # order the latent classes based on their size ($P gives the size each latent class) # probs.start <- poLCA.reorder(lc5.fullcourse$probs.start, order(lc5.fullcourse$P,decreasing=T)) # lc5.fullcourse <- poLCA(f.fullcourse, data = clusters.w213, graphs = T, # nclass = 5, probs.start = probs.start, na.rm = F) ## examine the model with 4 classes # lc4.fullcourse <- poLCA(f.fullcourse, data = clusters.w213, nrep = 50, # nclass = 4, graphs = T, na.rm = F) # probs.start <- poLCA.reorder(lc4.fullcourse$probs.start, order(lc4.fullcourse$P,decreasing=T)) # lc4.fullcourse <- poLCA(f.fullcourse, data = clusters.w213, graphs = T, # nclass = 4, probs.start = probs.start, na.rm = F) ## examine the model with 6 classes lc6.fullcourse <- poLCA(f.fullcourse, data = clusters.w213, nrep = 50, nclass = 6, na.rm = F) probs.start <- poLCA.reorder(lc6.fullcourse$probs.start, order(lc6.fullcourse$P,decreasing=T)) lc6.fullcourse <- poLCA(f.fullcourse, data = clusters.w213, graphs = T, nclass = 6, probs.start = probs.start, na.rm = F) ## add the predicted class to each observation clusters.w213.pred <- clusters.w213 # clusters.w213.pred$lca5 <- as.factor(lc5.fullcourse$predclass) # clusters.w213.pred$lca4 <- as.factor(lc4.fullcourse$predclass) clusters.w213.pred$lca6 <- as.factor(lc6.fullcourse$predclass) str(clusters.w213.pred) ## write the features + the LCA class values to a file write.csv(x = clusters.w213.pred, file = "results/lca_w2_to_w13_6classes(April2016).csv", row.names = F, quote = F) ########################################################### ## TRACE STUDENTS' STRATEGIES (WEEKLY CLUSTERS) THROUGHT ## ## THE OBTAINED LCA-BASED TRAJECTORIES ## ########################################################### weekly.strategies <- read.csv(file = "results/lca_w2_to_w13_6classes(April2016).csv") str(weekly.strategies) weekly.strategies$cl.w2 <- factor(weekly.strategies$cl.w2, levels = c(1:5), labels = c('A','D','E','B1','C1')) weekly.strategies$cl.w3 <- factor(weekly.strategies$cl.w3, levels = c(1:4), labels = c('A','E','D','B2')) weekly.strategies$cl.w4 <- factor(weekly.strategies$cl.w4, levels = c(1:5), labels = c('E','C2','B1','A','B2')) weekly.strategies$cl.w5 <- factor(weekly.strategies$cl.w5, levels = c(1:4), labels = c('D','B1','E','A')) weekly.strategies$cl.w6 <- factor(weekly.strategies$cl.w6, levels = c(1:5), labels = c('C2','C1','E','D','A')) weekly.strategies$cl.w7 <- factor(weekly.strategies$cl.w7, levels = c(1:4), labels = c('E','B1','D','C1')) weekly.strategies$cl.w8 <- factor(weekly.strategies$cl.w8, levels = c(1:5), labels = c('B1','E','B2','A','D')) weekly.strategies$cl.w9 <- factor(weekly.strategies$cl.w9, levels = c(1:4), labels = c('D','A','E','B1')) weekly.strategies$cl.w10 <- factor(weekly.strategies$cl.w10, levels = c(1:5), labels = c('B1','A','D','B2','E')) weekly.strategies$cl.w11 <- factor(weekly.strategies$cl.w11, levels = c(1:4), labels = c('D','B1','F','E')) weekly.strategies$cl.w12 <- factor(weekly.strategies$cl.w12, levels = c(1:4), labels = c('E','F','B1','B2')) weekly.strategies$cl.w13 <- factor(weekly.strategies$cl.w13, levels = c(1:5), labels = c('A','E','D','C1','C2')) weekly.strategies$lca6 <- factor(weekly.strategies$lca6) str(weekly.strategies) ## select students who had strategy F in weeks 11 and 12 stud.F.w11 <- weekly.strategies$user_id[is.na(weekly.strategies$cl.w11) == F & weekly.strategies$cl.w11 == 'F'] stud.F.w12 <- weekly.strategies$user_id[is.na(weekly.strategies$cl.w12) == F & weekly.strategies$cl.w12 == 'F'] ## check how many students had strategy F in both weeks (11 and 12) length(intersect(stud.F.w11, stud.F.w12)) ## 24 students ## find the distribution of strategies in weeks 10 and 13 for those students ## who had strategy F in week 11 strat.w9to13.studF.in.w11 <- subset(weekly.strategies, user_id %in% stud.F.w11, select = c(cl.w9, cl.w10, cl.w12, cl.w13)) summary(strat.w9to13.studF.in.w11) ## find the distribution of strategies in weeks 10 and 13 for those students ## who had strategy F in week 12 strat.w9to13.studF.in.w12 <- subset(weekly.strategies, user_id %in% stud.F.w12, select = c(cl.w9, cl.w10, cl.w11, cl.w13)) summary(strat.w9to13.studF.in.w12) ## distribution of trajectories for students who had strategy F in week 11 table(subset(weekly.strategies, user_id %in% stud.F.w11, select = lca6)) ## distribution of trajectories for students who had strategy F in week 11 summary(subset(weekly.strategies, user_id %in% stud.F.w12, select = lca6))
## FAST TOOL Targets COP FY18 ## A.Chafetz, USAID ## Purpose: targets to populate FAST Tool ## Date: Dec 22, 2017 ## DEPENDENCIES # ICPI Fact View PSNUxIM w/FY18 Targets # COP MATRIX REPORT from FACTSInfo library(tidyverse) ## IMPORT -------------------------------------------------------------------------------------------------------------------------- df_targets <- read_tsv("~/ICPI/Data/ICPI_FactView_PSNU_IM_20171115_v1_2.txt", col_types = cols(FY2016_TARGETS = "d", FY2017_TARGETS = "d", FY2018_TARGETS = "d")) %>% rename_all(tolower) ## CLEAN/SUBSET DATA ---------------------------------------------------------------------------------------------------------------- #fix issues with different mech & partner names(function at bottom of script) df_targets <-cleanup_mechs(df_targets, "~/GitHub/DataPack/RawData/") #subset df_targets <- df_targets %>% #just indicators of interest (total numerators only) & limit variables filter(indicator %in% c("TX_NEW", "TX_CURR", "HTS_TST", "HTS_TST_POS", "PMTCT_STAT", "PMTCT_ART", "OVC_SERV", "VMMC_CIRC"), disaggregate == "Total Numerator") %>% #summarize at prioritization/im level group_by(operatingunit, fy17snuprioritization, primepartner, mechanismid, implementingmechanismname, indicator) %>% summarise_at(vars(fy2016_targets, fy2017_targets, fy2018_targets), funs(sum(., na.rm=TRUE))) %>% ungroup() %>% #change 0s to NA mutate_at(vars(fy2016_targets, fy2017_targets, fy2018_targets), funs(ifelse(.==0, NA, .))) ## EXPORT DATA ---------------------------------------------------------------------------------------------------------------- write_csv(df_targets, "~/GitHub/DataPack/TempOutput/PEPFAR_FAST_Targets_2017.12.22.csv", na="") ## CLEANUP MECHANISMS FUNCTION ------------------------------------------------------------------------------------------------------- cleanup_mechs <- function(df_to_clean, report_folder_path, report_start_year = 2014) { #needed packages require(tidyverse, quietly = FALSE) require(readxl, quietly = FALSE) #import official mech and partner names; source: FACTS Info df_names <- read_excel(Sys.glob(file.path(report_folder_path,"*Standard COP Matrix Report*.xls")), skip = 1) #rename variable stubs names(df_names) <- gsub("Prime Partner", "primepartner", names(df_names)) names(df_names) <- gsub("Mechanism Name", "implementingmechanismname", names(df_names)) #figure out latest name for IM and partner (should both be from the same year) df_names <- df_names %>% #rename variables that don't fit pattern rename(operatingunit = `Operating Unit`, mechanismid = `Mechanism Identifier`, primepartner__0 = primepartner, implementingmechanismname__0 = implementingmechanismname) %>% #reshape long gather(type, name, -operatingunit, -mechanismid) %>% #split out type and year (eg type = primeparnter__1 --> type = primepartner, year = 1) separate(type, c("type", "year"), sep="__") %>% #add year (assumes first year if report is 2014) mutate(year = as.numeric(year) + report_start_year) %>% #drop lines/years with missing names filter(!is.na(name)) %>% #group to figure out latest year with names and keep only latest year's names (one obs per mech) group_by(operatingunit, mechanismid, type) %>% filter(year==max(year)) %>% ungroup() %>% #reshape wide so primepartner and implementingmechanismname are two seperate columsn to match fact view dataset spread(type, name) %>% #convert mechanism id to string for merging back onto main df mutate(mechanismid = as.character(mechanismid)) %>% #keep only names with mechid and renaming with _F to identify as from FACTS select(mechanismid, implementingmechanismname, primepartner) %>% rename(implementingmechanismname_F = implementingmechanismname, primepartner_F = primepartner) #match mechanism id type for compatible merge df_to_clean <- mutate(df_to_clean, mechanismid = as.character(mechanismid)) #merge in official names df_to_clean <- left_join(df_to_clean, df_names, by="mechanismid") #replace prime partner and mech names with official names df_to_clean <- df_to_clean %>% mutate(implementingmechanismname = ifelse(is.na(implementingmechanismname_F), implementingmechanismname, implementingmechanismname_F), primepartner = ifelse(is.na(primepartner_F), primepartner, primepartner_F)) %>% select(-ends_with("_F")) }
/Other/FAST_Tool_targets.R
no_license
cadelson/ICPI_Projects
R
false
false
4,668
r
## FAST TOOL Targets COP FY18 ## A.Chafetz, USAID ## Purpose: targets to populate FAST Tool ## Date: Dec 22, 2017 ## DEPENDENCIES # ICPI Fact View PSNUxIM w/FY18 Targets # COP MATRIX REPORT from FACTSInfo library(tidyverse) ## IMPORT -------------------------------------------------------------------------------------------------------------------------- df_targets <- read_tsv("~/ICPI/Data/ICPI_FactView_PSNU_IM_20171115_v1_2.txt", col_types = cols(FY2016_TARGETS = "d", FY2017_TARGETS = "d", FY2018_TARGETS = "d")) %>% rename_all(tolower) ## CLEAN/SUBSET DATA ---------------------------------------------------------------------------------------------------------------- #fix issues with different mech & partner names(function at bottom of script) df_targets <-cleanup_mechs(df_targets, "~/GitHub/DataPack/RawData/") #subset df_targets <- df_targets %>% #just indicators of interest (total numerators only) & limit variables filter(indicator %in% c("TX_NEW", "TX_CURR", "HTS_TST", "HTS_TST_POS", "PMTCT_STAT", "PMTCT_ART", "OVC_SERV", "VMMC_CIRC"), disaggregate == "Total Numerator") %>% #summarize at prioritization/im level group_by(operatingunit, fy17snuprioritization, primepartner, mechanismid, implementingmechanismname, indicator) %>% summarise_at(vars(fy2016_targets, fy2017_targets, fy2018_targets), funs(sum(., na.rm=TRUE))) %>% ungroup() %>% #change 0s to NA mutate_at(vars(fy2016_targets, fy2017_targets, fy2018_targets), funs(ifelse(.==0, NA, .))) ## EXPORT DATA ---------------------------------------------------------------------------------------------------------------- write_csv(df_targets, "~/GitHub/DataPack/TempOutput/PEPFAR_FAST_Targets_2017.12.22.csv", na="") ## CLEANUP MECHANISMS FUNCTION ------------------------------------------------------------------------------------------------------- cleanup_mechs <- function(df_to_clean, report_folder_path, report_start_year = 2014) { #needed packages require(tidyverse, quietly = FALSE) require(readxl, quietly = FALSE) #import official mech and partner names; source: FACTS Info df_names <- read_excel(Sys.glob(file.path(report_folder_path,"*Standard COP Matrix Report*.xls")), skip = 1) #rename variable stubs names(df_names) <- gsub("Prime Partner", "primepartner", names(df_names)) names(df_names) <- gsub("Mechanism Name", "implementingmechanismname", names(df_names)) #figure out latest name for IM and partner (should both be from the same year) df_names <- df_names %>% #rename variables that don't fit pattern rename(operatingunit = `Operating Unit`, mechanismid = `Mechanism Identifier`, primepartner__0 = primepartner, implementingmechanismname__0 = implementingmechanismname) %>% #reshape long gather(type, name, -operatingunit, -mechanismid) %>% #split out type and year (eg type = primeparnter__1 --> type = primepartner, year = 1) separate(type, c("type", "year"), sep="__") %>% #add year (assumes first year if report is 2014) mutate(year = as.numeric(year) + report_start_year) %>% #drop lines/years with missing names filter(!is.na(name)) %>% #group to figure out latest year with names and keep only latest year's names (one obs per mech) group_by(operatingunit, mechanismid, type) %>% filter(year==max(year)) %>% ungroup() %>% #reshape wide so primepartner and implementingmechanismname are two seperate columsn to match fact view dataset spread(type, name) %>% #convert mechanism id to string for merging back onto main df mutate(mechanismid = as.character(mechanismid)) %>% #keep only names with mechid and renaming with _F to identify as from FACTS select(mechanismid, implementingmechanismname, primepartner) %>% rename(implementingmechanismname_F = implementingmechanismname, primepartner_F = primepartner) #match mechanism id type for compatible merge df_to_clean <- mutate(df_to_clean, mechanismid = as.character(mechanismid)) #merge in official names df_to_clean <- left_join(df_to_clean, df_names, by="mechanismid") #replace prime partner and mech names with official names df_to_clean <- df_to_clean %>% mutate(implementingmechanismname = ifelse(is.na(implementingmechanismname_F), implementingmechanismname, implementingmechanismname_F), primepartner = ifelse(is.na(primepartner_F), primepartner, primepartner_F)) %>% select(-ends_with("_F")) }
context('Join') p0 <- list(param = data.frame(p0 = 3.2)) p1 <- list(param = data.frame(p1 = c(4))) p2 <- list(param = data.frame(p2 = c(TRUE, FALSE))) p3 <- list(param = data.frame(p3a = 'foo', p3b = c(5, 7))) pv1 <- list(param = data.frame(p4 = TRUE), value = list(list(v1 = 3, v2 = 5))) pv2 <- list(param = data.frame(p5 = c(-1, -2)), value = list(list(v3 = 'A', v4 = 'B'), list(v3 = 'C', v4 = 'D'))) test_that('single', { j <- inner_outer_join(p0) expect(is.pvcontainer(j)) expect_equal(j, p0) }) test_that('two', { j <- inner_outer_join(p0, p3) expect(is.pvcontainer(j)) expect_equal(nrow(j$param), 2) expect_equal(colnames(j$param), c('p0', 'p3a', 'p3b')) }) test_that('with value', { j <- inner_outer_join(pv1, pv2) expect(is.pvcontainer(j)) expect_equal(nrow(j$param), 2) expect_equal(colnames(j$param), c('p4', 'p5')) expect_equal(names(j$value[[1]]), c('v1', 'v2', 'v3', 'v4')) })
/tests/testthat/test_join.R
permissive
kostrzewa/paramvalf
R
false
false
1,115
r
context('Join') p0 <- list(param = data.frame(p0 = 3.2)) p1 <- list(param = data.frame(p1 = c(4))) p2 <- list(param = data.frame(p2 = c(TRUE, FALSE))) p3 <- list(param = data.frame(p3a = 'foo', p3b = c(5, 7))) pv1 <- list(param = data.frame(p4 = TRUE), value = list(list(v1 = 3, v2 = 5))) pv2 <- list(param = data.frame(p5 = c(-1, -2)), value = list(list(v3 = 'A', v4 = 'B'), list(v3 = 'C', v4 = 'D'))) test_that('single', { j <- inner_outer_join(p0) expect(is.pvcontainer(j)) expect_equal(j, p0) }) test_that('two', { j <- inner_outer_join(p0, p3) expect(is.pvcontainer(j)) expect_equal(nrow(j$param), 2) expect_equal(colnames(j$param), c('p0', 'p3a', 'p3b')) }) test_that('with value', { j <- inner_outer_join(pv1, pv2) expect(is.pvcontainer(j)) expect_equal(nrow(j$param), 2) expect_equal(colnames(j$param), c('p4', 'p5')) expect_equal(names(j$value[[1]]), c('v1', 'v2', 'v3', 'v4')) })
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/personalize_operations.R \name{personalize_get_solution_metrics} \alias{personalize_get_solution_metrics} \title{Gets the metrics for the specified solution version} \usage{ personalize_get_solution_metrics(solutionVersionArn) } \arguments{ \item{solutionVersionArn}{[required] The Amazon Resource Name (ARN) of the solution version for which to get metrics.} } \description{ Gets the metrics for the specified solution version. } \section{Request syntax}{ \preformatted{svc$get_solution_metrics( solutionVersionArn = "string" ) } } \keyword{internal}
/paws/man/personalize_get_solution_metrics.Rd
permissive
johnnytommy/paws
R
false
true
633
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/personalize_operations.R \name{personalize_get_solution_metrics} \alias{personalize_get_solution_metrics} \title{Gets the metrics for the specified solution version} \usage{ personalize_get_solution_metrics(solutionVersionArn) } \arguments{ \item{solutionVersionArn}{[required] The Amazon Resource Name (ARN) of the solution version for which to get metrics.} } \description{ Gets the metrics for the specified solution version. } \section{Request syntax}{ \preformatted{svc$get_solution_metrics( solutionVersionArn = "string" ) } } \keyword{internal}
#' @name comma #' @title Shortcut to add comma formatting #' @author Gene Leynes #' #' @param x Number or vector of numbers #' @param nDigits Number of trailing digits. Default is 2 #' #' @description #' Simple shortcut to make numbers into text with commas and some #' trailing decmimal digits. Just a shortcut to formatC. #' #' @examples #' comma(c(8987.35672, 8962.97978, 8998.06814, 8960.67199, 8997.08384)) #' # [1] "8,987.36" "8,962.98" "8,998.07" "8,960.67" "8,997.08" #' comma(c(8987.35672, 8962.97978, 8998.06814, 8960.67199, 8997.08384), 0) #' # [1] "8,987" "8,963" "8,998" "8,961" "8,997" #' #' #' comma <- function(x, nDigits=2){ nDigits2 <- trunc(log(abs(x),10))+1+nDigits # formatC(x, digits=nDigits2, big.mark=',') mapply(formatC, x=x, digits=nDigits2, big.mark=',') } if(FALSE){ vec <- c(8987.35672, 8962.97978, 8998.06814, 8960.67199, 8997.08384, 9040.95974, 8984.53862, 9017.97732, 8943.0501, 9003.01559) comma(vec) comma(vec, 0) result1 <- c("8,987.36", "8,962.98", "8,998.07", "8,960.67", "8,997.08", "9,040.96", "8,984.54", "9,017.98", "8,943.05", "9,003.02") result2 <- c("8,987", "8,963", "8,998", "8,961", "8,997", "9,041", "8,985", "9,018", "8,943", "9,003") checkIdentical(comma(vec), result1, "comma test 1 failed") checkIdentical(comma(vec, 0), result2, "comma test 2 failed") }
/R/comma.R
no_license
geneorama/geneorama
R
false
false
1,361
r
#' @name comma #' @title Shortcut to add comma formatting #' @author Gene Leynes #' #' @param x Number or vector of numbers #' @param nDigits Number of trailing digits. Default is 2 #' #' @description #' Simple shortcut to make numbers into text with commas and some #' trailing decmimal digits. Just a shortcut to formatC. #' #' @examples #' comma(c(8987.35672, 8962.97978, 8998.06814, 8960.67199, 8997.08384)) #' # [1] "8,987.36" "8,962.98" "8,998.07" "8,960.67" "8,997.08" #' comma(c(8987.35672, 8962.97978, 8998.06814, 8960.67199, 8997.08384), 0) #' # [1] "8,987" "8,963" "8,998" "8,961" "8,997" #' #' #' comma <- function(x, nDigits=2){ nDigits2 <- trunc(log(abs(x),10))+1+nDigits # formatC(x, digits=nDigits2, big.mark=',') mapply(formatC, x=x, digits=nDigits2, big.mark=',') } if(FALSE){ vec <- c(8987.35672, 8962.97978, 8998.06814, 8960.67199, 8997.08384, 9040.95974, 8984.53862, 9017.97732, 8943.0501, 9003.01559) comma(vec) comma(vec, 0) result1 <- c("8,987.36", "8,962.98", "8,998.07", "8,960.67", "8,997.08", "9,040.96", "8,984.54", "9,017.98", "8,943.05", "9,003.02") result2 <- c("8,987", "8,963", "8,998", "8,961", "8,997", "9,041", "8,985", "9,018", "8,943", "9,003") checkIdentical(comma(vec), result1, "comma test 1 failed") checkIdentical(comma(vec, 0), result2, "comma test 2 failed") }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/plotResult.R \name{plotResult} \alias{plotResult} \title{This function has been created to plot PROCESSED data results It is very Module-specific} \usage{ plotResult(data, toPubblish, pathToSave) } \arguments{ \item{toPubblish}{} \item{pathToSave}{} } \description{ This function has been created to plot PROCESSED data results It is very Module-specific }
/man/plotResult.Rd
no_license
ncwanner/Production_Validation
R
false
true
437
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/plotResult.R \name{plotResult} \alias{plotResult} \title{This function has been created to plot PROCESSED data results It is very Module-specific} \usage{ plotResult(data, toPubblish, pathToSave) } \arguments{ \item{toPubblish}{} \item{pathToSave}{} } \description{ This function has been created to plot PROCESSED data results It is very Module-specific }
#' @useDynLib biClassify #' @importFrom Rcpp evalCpp #' @export KOS #' @export SelectParams NULL
/biClassify/R/biClassifyArmadillo.R
no_license
akhikolla/InformationHouse
R
false
false
101
r
#' @useDynLib biClassify #' @importFrom Rcpp evalCpp #' @export KOS #' @export SelectParams NULL
################################# ### FRE7241 Homework #3 due Oct 27, 2015 ################################# # Max score 100pts # Please write in this file the R code needed to perform the tasks below, # rename it to your_name_hw4.R # and upload the file to NYU Classes ############## Part I # Summary: Simulate a trading strategy based on two VWAPs. # download the file "etf_data.Rdata" from NYU Classes, and load() it. # "etf_data.Rdata" contains an environment called "env_data", # with OHLC time series data for ETFs, including "VTI". library(quantmod) load(file="C:/Develop/data/etf_data.Rdata") # 1. (5pts) Define two integer windows (lookback periods) called # "win_short=10" and "win_long=100". # Calculate two vectors of VWAPs called "vwap_short" and "vwap_long", # for the "VTI" OHLC data. # You must use function v_wap() from the previous homework, win_short <- 10 win_long <- 100 ### write your code here # Calculate a numeric vector called "indi_cator", that is # equal to 1 when "vwap_short > vwap_long" and equal to -1 # when "vwap_short < vwap_long", # The sign of "indi_cator" will determine the strategy's risk # positions, either long risk or short risk. # You can use function sign(), ### write your code here ############## Part II # 2. (10pts) Calculate a boolean vector that is TRUE only on dates # right after the VWAPs have crossed, and call it "cross_es". # For example, if yesterday "vwap_short < vwap_long" and today # "vwap_short > vwap_long" (or vice versa), then today "cross_es" # should be TRUE, and otherwise it should be FALSE. # hint: the diff() of "indi_cator" is not zero right after the # VWAPs have crossed, and otherwise it's zero. # you can use the functions sign(), diff(), and is.na(), # and the logical operator "!=", # set any NAs to FALSE, ### write your code here # The strategy should perform trades after "cross_es" becomes TRUE, # but with a one period lag, to reflect that in practice it's # impossible to trade immediately. # Calculate a vector of integer indices corresponding to trade # dates, and call it "trade_dates". # hint: first calculate the indices corresponding to the periods when # "cross_es" is TRUE, and add "1" to them, to reflect the one period lag. # you can use function which(), ### write your code here # The strategy invests in a fixed number of shares called "num_shares". num_shares <- 100 # The strategy either owns "num_shares" number of shares (long position), # or sells the same number of shares short (short position). # Thus the strategy consists of consecutive time intervals of long risk # and short risk positions, depending on the sign of "indi_cator". # When "indi_cator" becomes positive then the strategy buys shares and # flips to a long risk position, and vice versa. ############## Part III # 3. (20pts) The strategy should be simulated over a number of periods # of time called "n_periods", which should be equal to the number of # rows in the OHLC time series data, # you can use function nrow(), ### write your code here # Calculate a numeric vector called "pos_ition", that is equal to the # number of shares owned by the strategy at any given period of time, # either positive (long risk position) or negative (short risk position). # The strategy should start with a position of zero. # The strategy position should be reset on "trade_dates", depending on # the sign of "indi_cator". # The strategy position should remain unchamged between the "trade_dates". # you can use functions numeric() and na.locf(), ### write your code here # Lag the vector "pos_ition" by one period and call it "lag_position". # The first value of "lag_position" should be zero. # you can use function c() combined with subsetting "[]", ### write your code here # you can inspect the vectors by merging and subsetting them: foo <- merge(Ad(env_data$VTI), vwap_short, vwap_long, cross_es, pos_ition) colnames(foo) <- c("price", "vwap_short", "vwap_long", "cross_es", "pos_ition") foo[(which(cross_es)[2]-3):(which(cross_es)[2]+3), ] # you should get this: # price vwap_short vwap_long cross_es pos_ition # 2007-02-27 59.09827 60.62776 60.13491 0 100 # 2007-02-28 59.56461 60.42235 60.10604 0 100 # 2007-03-01 59.39503 60.17368 60.06567 0 100 # 2007-03-02 58.54714 59.98786 60.03648 1 100 # 2007-03-05 57.86882 59.65299 59.95280 0 -100 # 2007-03-06 58.84390 59.47699 59.92486 0 -100 # 2007-03-07 58.79727 59.32994 59.90830 0 -100 # Calculate a vector of adjusted close prices from the OHLC data, # and call it "price_s". # Calculate a vector of lagged "price_s", and call it "lag_prices". # Calculate a vector of open prices from the OHLC data, # and call it "open_prices". # you can use functions Ad(), Op(), and c(), ### write your code here ############## Part IV # 4. (20pts) Calculate a vector of periodic (daily) profits and # losses and call it "pn_l". # The periodic (day over day) profit or loss (pnl) for a period # without any trade, is equal to the position in the previous # period, times the difference between this period's closing # price minus the previous period's closing price. # The periodic pnl for a period with a trade, is equal to the # sum of two terms. # The first term is equal to the position in the previous # period, times the difference between this period's opening # price minus the previous period's closing price. # The first term represents the realized pnl after trading # out of the previous position. # The second term is equal to the current (new) position times # the difference between this period's closing minus opening # prices. # The second term represents the unrealized pnl of the new # position on the day of the trade. # you can use the vectors "price_s", "lag_prices", "open_prices", # "pos_ition", "lag_position" and "trade_dates", ### write your code here # you can inspect the vectors by merging and subsetting them: foo <- merge(Ad(env_data$VTI), lag_prices, open_prices, pos_ition, lag_position, pn_l) colnames(foo) <- c("price", "lag_prices", "open_prices", "pos_ition", "lag_position", "pn_l") foo[(which(cross_es)[2]-3):(which(cross_es)[2]+3), ] # you should get this: # price lag_prices open_prices pos_ition lag_position pn_l # 2007-02-27 59.09827 61.37487 60.68807 100 100 -227.66000 # 2007-02-28 59.56461 59.09827 59.36535 100 100 46.63400 # 2007-03-01 59.39503 59.56461 58.48778 100 100 -16.95800 # 2007-03-02 58.54714 59.39503 59.16186 100 100 -84.78900 # 2007-03-05 57.86882 58.54714 57.99600 -100 100 -42.39516 # 2007-03-06 58.84390 57.86882 58.37756 -100 -100 -97.50800 # 2007-03-07 58.79727 58.84390 58.92021 -100 -100 4.66300 # Calculate the Sharpe ratio of the strategy returns "pn_l", # given by the sum() of "pn_l" divided by the sd() of "pn_l". ### write your code here # Create an xts series from "pos_ition" and call it "xts_position". # you can use functions xts() and index(), ### write your code here # Create an xts series from the cumulative sum of "pn_l" # and call it "xts_pnl". # you can use functions xts(), cumsum(), and index(), ### write your code here ############## Part V # 5. (20pts) Plot the time series of prices and the strategy pnl # in two panels. # open plot graphics device using function x11(), # set plot parameters using function par() with argument "mfrow", ### write your code here # Plot in the top panel the adjusted close prices of the OHLC data, # add "vwap_long" to the plot, # add background shading of areas corresponding to long positions # in "lightgreen" and short positions in "lightgrey". # hint: call chart_Series() once, and then call add_TA() three times, # wrap the first three calls in invisible() to prevent plotting, # except for the last add_TA() call. # you must use functions Ad(), chart_Series(), add_TA() # (with "on" parameter), and invisible(), # you can use the xts series "xts_position" for shading, # You can adapt code from the "time_series_univariate" pdf and R files. # Be sure to download the most recent version. ### write your code here # Plot in the bottom panel "xts_pnl", # add background shading of areas as before. # you must use functions chart_Series(), add_TA() # (with "on" parameter), and invisible(), ### write your code here ############## Part VI # 6. (10pts) Create a function called run_vwap() which performs # a simulation of a trading strategy based on two VWAPs (as above), # and returns the Sharpe ratio of the strategy returns, # run_vwap() should accept three arguments: # "win_short" and "win_long" - two integer lookback windows, # "da_ta" - OHLC time series data, # hint: combine all the code from the previous parts. ### write your code here # call run_vwap() as follows, to verify it works correctly, run_vwap(win_short=40, win_long=350, da_ta=env_data$VTI) ############## Part VII # 7. (10pts) # Create a named vector of integer values for "win_short" from=30, to=100, by=10 # called "short_windows", with the following values: # sh30 sh40 sh50 sh60 sh70 sh80 sh90 sh100 # 30 40 50 60 70 80 90 100 # Create a named vector of integer values for "win_long" from=200, to=400, by=25 # called "long_windows", with the following values: # lo200 lo225 lo250 lo275 lo300 lo325 lo350 lo375 lo400 # 200 225 250 275 300 325 350 375 400 # you can use functions seq(), paste0(), names(), and structure(), ### write your code here # perform two nested sapply() loops calling the function run_vwap(), # first over "short_windows", second over "long_windows". # To get full credit you must pass the arguments "da_ta=env_data$VTI" # into run_vwap() through the dots argument of the sapply() functions, # you can use an anonymous function, # the output should ba a named matrix called "mat_rix" as follows # (transpose will also get full credit): # sh30 sh40 sh50 sh60 sh70 sh80 sh90 sh100 # lo200 84.06524 91.24971 93.77047 92.01054 87.79107 76.23158 57.91753 74.07076 # lo225 72.06119 97.77155 92.31084 88.33317 76.08169 75.71970 41.36928 38.56718 # lo250 86.73914 88.73794 91.26042 95.83383 82.29601 67.69120 61.48578 43.94475 # lo275 84.42591 95.18654 92.79157 81.29927 86.75097 55.87239 55.67067 60.19943 # lo300 90.90760 94.00829 103.63414 108.20157 84.86396 64.79328 49.14296 64.55424 # lo325 92.71081 105.49712 99.21752 108.44899 72.44130 66.29816 65.10376 73.93077 # lo350 95.65559 113.38591 112.83352 87.72392 85.38218 70.43943 66.78933 79.28444 # lo375 89.53747 121.43885 105.97034 94.08813 88.17621 71.61272 71.60992 64.63382 # lo400 91.10256 115.04823 95.78384 99.42304 89.05842 78.38954 67.25607 82.20783 ### write your code here ############## Part VIII # 8. (5pts) Draw an interactive 3d surface plot of "mat_rix". # you can use function persp3d(), # You can adapt code from the "plotting" pdf and R files. # Be sure to download the most recent version. library(rgl) # load rgl ### write your code here
/lecture_slides/FRE7241_HW4.R
no_license
fdoperezi/lecture_slides
R
false
false
11,332
r
################################# ### FRE7241 Homework #3 due Oct 27, 2015 ################################# # Max score 100pts # Please write in this file the R code needed to perform the tasks below, # rename it to your_name_hw4.R # and upload the file to NYU Classes ############## Part I # Summary: Simulate a trading strategy based on two VWAPs. # download the file "etf_data.Rdata" from NYU Classes, and load() it. # "etf_data.Rdata" contains an environment called "env_data", # with OHLC time series data for ETFs, including "VTI". library(quantmod) load(file="C:/Develop/data/etf_data.Rdata") # 1. (5pts) Define two integer windows (lookback periods) called # "win_short=10" and "win_long=100". # Calculate two vectors of VWAPs called "vwap_short" and "vwap_long", # for the "VTI" OHLC data. # You must use function v_wap() from the previous homework, win_short <- 10 win_long <- 100 ### write your code here # Calculate a numeric vector called "indi_cator", that is # equal to 1 when "vwap_short > vwap_long" and equal to -1 # when "vwap_short < vwap_long", # The sign of "indi_cator" will determine the strategy's risk # positions, either long risk or short risk. # You can use function sign(), ### write your code here ############## Part II # 2. (10pts) Calculate a boolean vector that is TRUE only on dates # right after the VWAPs have crossed, and call it "cross_es". # For example, if yesterday "vwap_short < vwap_long" and today # "vwap_short > vwap_long" (or vice versa), then today "cross_es" # should be TRUE, and otherwise it should be FALSE. # hint: the diff() of "indi_cator" is not zero right after the # VWAPs have crossed, and otherwise it's zero. # you can use the functions sign(), diff(), and is.na(), # and the logical operator "!=", # set any NAs to FALSE, ### write your code here # The strategy should perform trades after "cross_es" becomes TRUE, # but with a one period lag, to reflect that in practice it's # impossible to trade immediately. # Calculate a vector of integer indices corresponding to trade # dates, and call it "trade_dates". # hint: first calculate the indices corresponding to the periods when # "cross_es" is TRUE, and add "1" to them, to reflect the one period lag. # you can use function which(), ### write your code here # The strategy invests in a fixed number of shares called "num_shares". num_shares <- 100 # The strategy either owns "num_shares" number of shares (long position), # or sells the same number of shares short (short position). # Thus the strategy consists of consecutive time intervals of long risk # and short risk positions, depending on the sign of "indi_cator". # When "indi_cator" becomes positive then the strategy buys shares and # flips to a long risk position, and vice versa. ############## Part III # 3. (20pts) The strategy should be simulated over a number of periods # of time called "n_periods", which should be equal to the number of # rows in the OHLC time series data, # you can use function nrow(), ### write your code here # Calculate a numeric vector called "pos_ition", that is equal to the # number of shares owned by the strategy at any given period of time, # either positive (long risk position) or negative (short risk position). # The strategy should start with a position of zero. # The strategy position should be reset on "trade_dates", depending on # the sign of "indi_cator". # The strategy position should remain unchamged between the "trade_dates". # you can use functions numeric() and na.locf(), ### write your code here # Lag the vector "pos_ition" by one period and call it "lag_position". # The first value of "lag_position" should be zero. # you can use function c() combined with subsetting "[]", ### write your code here # you can inspect the vectors by merging and subsetting them: foo <- merge(Ad(env_data$VTI), vwap_short, vwap_long, cross_es, pos_ition) colnames(foo) <- c("price", "vwap_short", "vwap_long", "cross_es", "pos_ition") foo[(which(cross_es)[2]-3):(which(cross_es)[2]+3), ] # you should get this: # price vwap_short vwap_long cross_es pos_ition # 2007-02-27 59.09827 60.62776 60.13491 0 100 # 2007-02-28 59.56461 60.42235 60.10604 0 100 # 2007-03-01 59.39503 60.17368 60.06567 0 100 # 2007-03-02 58.54714 59.98786 60.03648 1 100 # 2007-03-05 57.86882 59.65299 59.95280 0 -100 # 2007-03-06 58.84390 59.47699 59.92486 0 -100 # 2007-03-07 58.79727 59.32994 59.90830 0 -100 # Calculate a vector of adjusted close prices from the OHLC data, # and call it "price_s". # Calculate a vector of lagged "price_s", and call it "lag_prices". # Calculate a vector of open prices from the OHLC data, # and call it "open_prices". # you can use functions Ad(), Op(), and c(), ### write your code here ############## Part IV # 4. (20pts) Calculate a vector of periodic (daily) profits and # losses and call it "pn_l". # The periodic (day over day) profit or loss (pnl) for a period # without any trade, is equal to the position in the previous # period, times the difference between this period's closing # price minus the previous period's closing price. # The periodic pnl for a period with a trade, is equal to the # sum of two terms. # The first term is equal to the position in the previous # period, times the difference between this period's opening # price minus the previous period's closing price. # The first term represents the realized pnl after trading # out of the previous position. # The second term is equal to the current (new) position times # the difference between this period's closing minus opening # prices. # The second term represents the unrealized pnl of the new # position on the day of the trade. # you can use the vectors "price_s", "lag_prices", "open_prices", # "pos_ition", "lag_position" and "trade_dates", ### write your code here # you can inspect the vectors by merging and subsetting them: foo <- merge(Ad(env_data$VTI), lag_prices, open_prices, pos_ition, lag_position, pn_l) colnames(foo) <- c("price", "lag_prices", "open_prices", "pos_ition", "lag_position", "pn_l") foo[(which(cross_es)[2]-3):(which(cross_es)[2]+3), ] # you should get this: # price lag_prices open_prices pos_ition lag_position pn_l # 2007-02-27 59.09827 61.37487 60.68807 100 100 -227.66000 # 2007-02-28 59.56461 59.09827 59.36535 100 100 46.63400 # 2007-03-01 59.39503 59.56461 58.48778 100 100 -16.95800 # 2007-03-02 58.54714 59.39503 59.16186 100 100 -84.78900 # 2007-03-05 57.86882 58.54714 57.99600 -100 100 -42.39516 # 2007-03-06 58.84390 57.86882 58.37756 -100 -100 -97.50800 # 2007-03-07 58.79727 58.84390 58.92021 -100 -100 4.66300 # Calculate the Sharpe ratio of the strategy returns "pn_l", # given by the sum() of "pn_l" divided by the sd() of "pn_l". ### write your code here # Create an xts series from "pos_ition" and call it "xts_position". # you can use functions xts() and index(), ### write your code here # Create an xts series from the cumulative sum of "pn_l" # and call it "xts_pnl". # you can use functions xts(), cumsum(), and index(), ### write your code here ############## Part V # 5. (20pts) Plot the time series of prices and the strategy pnl # in two panels. # open plot graphics device using function x11(), # set plot parameters using function par() with argument "mfrow", ### write your code here # Plot in the top panel the adjusted close prices of the OHLC data, # add "vwap_long" to the plot, # add background shading of areas corresponding to long positions # in "lightgreen" and short positions in "lightgrey". # hint: call chart_Series() once, and then call add_TA() three times, # wrap the first three calls in invisible() to prevent plotting, # except for the last add_TA() call. # you must use functions Ad(), chart_Series(), add_TA() # (with "on" parameter), and invisible(), # you can use the xts series "xts_position" for shading, # You can adapt code from the "time_series_univariate" pdf and R files. # Be sure to download the most recent version. ### write your code here # Plot in the bottom panel "xts_pnl", # add background shading of areas as before. # you must use functions chart_Series(), add_TA() # (with "on" parameter), and invisible(), ### write your code here ############## Part VI # 6. (10pts) Create a function called run_vwap() which performs # a simulation of a trading strategy based on two VWAPs (as above), # and returns the Sharpe ratio of the strategy returns, # run_vwap() should accept three arguments: # "win_short" and "win_long" - two integer lookback windows, # "da_ta" - OHLC time series data, # hint: combine all the code from the previous parts. ### write your code here # call run_vwap() as follows, to verify it works correctly, run_vwap(win_short=40, win_long=350, da_ta=env_data$VTI) ############## Part VII # 7. (10pts) # Create a named vector of integer values for "win_short" from=30, to=100, by=10 # called "short_windows", with the following values: # sh30 sh40 sh50 sh60 sh70 sh80 sh90 sh100 # 30 40 50 60 70 80 90 100 # Create a named vector of integer values for "win_long" from=200, to=400, by=25 # called "long_windows", with the following values: # lo200 lo225 lo250 lo275 lo300 lo325 lo350 lo375 lo400 # 200 225 250 275 300 325 350 375 400 # you can use functions seq(), paste0(), names(), and structure(), ### write your code here # perform two nested sapply() loops calling the function run_vwap(), # first over "short_windows", second over "long_windows". # To get full credit you must pass the arguments "da_ta=env_data$VTI" # into run_vwap() through the dots argument of the sapply() functions, # you can use an anonymous function, # the output should ba a named matrix called "mat_rix" as follows # (transpose will also get full credit): # sh30 sh40 sh50 sh60 sh70 sh80 sh90 sh100 # lo200 84.06524 91.24971 93.77047 92.01054 87.79107 76.23158 57.91753 74.07076 # lo225 72.06119 97.77155 92.31084 88.33317 76.08169 75.71970 41.36928 38.56718 # lo250 86.73914 88.73794 91.26042 95.83383 82.29601 67.69120 61.48578 43.94475 # lo275 84.42591 95.18654 92.79157 81.29927 86.75097 55.87239 55.67067 60.19943 # lo300 90.90760 94.00829 103.63414 108.20157 84.86396 64.79328 49.14296 64.55424 # lo325 92.71081 105.49712 99.21752 108.44899 72.44130 66.29816 65.10376 73.93077 # lo350 95.65559 113.38591 112.83352 87.72392 85.38218 70.43943 66.78933 79.28444 # lo375 89.53747 121.43885 105.97034 94.08813 88.17621 71.61272 71.60992 64.63382 # lo400 91.10256 115.04823 95.78384 99.42304 89.05842 78.38954 67.25607 82.20783 ### write your code here ############## Part VIII # 8. (5pts) Draw an interactive 3d surface plot of "mat_rix". # you can use function persp3d(), # You can adapt code from the "plotting" pdf and R files. # Be sure to download the most recent version. library(rgl) # load rgl ### write your code here
#Comencemos a ver algunos datos de béisbol y tratemos de responder sus preguntas utilizando estos datos. #Primero, ¿los equipos que batean más jonrones anotan más carreras? #Sabemos cuál será la respuesta a esto, pero veamos los datos de todos modos. #Vamos a examinar los datos de 1961 a 2001. #Terminamos en 2001 porque, recuerde, estamos en 2002, preparándonos para formar un equipo. #Empezamos en 1961, porque ese año, la liga cambió de 154 juegos a 162 juegos. #La visualización de elección cuando se explora la relación entre dos variables, como home runs y runs, es un diagrama de dispersión. #El siguiente código muestra cómo hacer ese diagrama de dispersión. #Comenzamos cargando la biblioteca Lahman que tiene todas estas estadísticas de béisbol. library(tidyverse) library(Lahman) library(dslabs) data("Teams") #Y luego simplemente hacemos un diagrama de dispersión usando ggplot. ds_theme_set() Teams %>% filter(yearID %in% 1961:2001) %>% mutate(HR_per_game = HR/G, R_per_game = R/G) %>% ggplot(aes(HR_per_game, R_per_game)) + geom_point(alpha = 0.5) #Aquí hay una gráfica de carreras por juego versus jonrones por juego. #La trama muestra que los equipos de asociación muy fuertes con más jonrones tendieron a anotar más carreras. #Ahora, examinemos la relación entre las bases robadas y las victorias. Teams %>% filter(yearID %in% 1961:2001) %>% mutate(SB_per_game = SB/G, R_per_game = R/G) %>% ggplot(aes(SB_per_game, R_per_game)) + geom_point(alpha = 0.5) #Aquí están las carreras por juego trazadas contra bases robadas por juego. #Aquí, la relación no es tan clara. #Finalmente, examinemos la relación entre las bases por bolas y carreras. #Aquí hay carreras por juego contra bases por bolas por juego. Teams %>% filter(yearID %in% 1961:2001) %>% mutate(BB_per_game = BB/G, R_per_game = R/G) %>% ggplot(aes(BB_per_game, R_per_game)) + geom_point(alpha = 0.5) #Aunque la relación no es tan fuerte como lo fue para los jonrones, sí vemos una relación bastante fuerte aquí. #Sabemos que, por definición, los jonrones provocan carreras, porque cuando se llega a un jonrón, al menos una carrera puntuará. #Ahora bien, es posible que los jonrones también causen problemas. #Si entiendes el juego, estarás de acuerdo conmigo en que ese podría ser el caso. #Entonces, puede parecer que una base por bola está causando carreras, cuando de hecho, son jonrones los que están causando ambas. #Esto se llama confusión. #Un concepto importante sobre el que aprenderá. #La regresión lineal nos ayudará a analizar todo esto y cuantificar las asociaciones. #Esto nos ayudará a determinar qué jugadores reclutar. #Específicamente, trataremos de predecir cosas como cuántas carreras más anotará el equipo si aumentamos el número de bases en bolas pero mantenemos fijos los jonrones. #La regresión también nos ayudará a responder esta pregunta. #Ejercicios Teams %>% filter(yearID %in% 1961:2001 ) %>% mutate(AB_per_game = AB/G, R_per_game = R/G) %>% ggplot(aes(AB_per_game, R_per_game)) + geom_point(alpha = 0.5)
/HC_BB_SB.R
no_license
wparedesgt/Regresiones
R
false
false
3,144
r
#Comencemos a ver algunos datos de béisbol y tratemos de responder sus preguntas utilizando estos datos. #Primero, ¿los equipos que batean más jonrones anotan más carreras? #Sabemos cuál será la respuesta a esto, pero veamos los datos de todos modos. #Vamos a examinar los datos de 1961 a 2001. #Terminamos en 2001 porque, recuerde, estamos en 2002, preparándonos para formar un equipo. #Empezamos en 1961, porque ese año, la liga cambió de 154 juegos a 162 juegos. #La visualización de elección cuando se explora la relación entre dos variables, como home runs y runs, es un diagrama de dispersión. #El siguiente código muestra cómo hacer ese diagrama de dispersión. #Comenzamos cargando la biblioteca Lahman que tiene todas estas estadísticas de béisbol. library(tidyverse) library(Lahman) library(dslabs) data("Teams") #Y luego simplemente hacemos un diagrama de dispersión usando ggplot. ds_theme_set() Teams %>% filter(yearID %in% 1961:2001) %>% mutate(HR_per_game = HR/G, R_per_game = R/G) %>% ggplot(aes(HR_per_game, R_per_game)) + geom_point(alpha = 0.5) #Aquí hay una gráfica de carreras por juego versus jonrones por juego. #La trama muestra que los equipos de asociación muy fuertes con más jonrones tendieron a anotar más carreras. #Ahora, examinemos la relación entre las bases robadas y las victorias. Teams %>% filter(yearID %in% 1961:2001) %>% mutate(SB_per_game = SB/G, R_per_game = R/G) %>% ggplot(aes(SB_per_game, R_per_game)) + geom_point(alpha = 0.5) #Aquí están las carreras por juego trazadas contra bases robadas por juego. #Aquí, la relación no es tan clara. #Finalmente, examinemos la relación entre las bases por bolas y carreras. #Aquí hay carreras por juego contra bases por bolas por juego. Teams %>% filter(yearID %in% 1961:2001) %>% mutate(BB_per_game = BB/G, R_per_game = R/G) %>% ggplot(aes(BB_per_game, R_per_game)) + geom_point(alpha = 0.5) #Aunque la relación no es tan fuerte como lo fue para los jonrones, sí vemos una relación bastante fuerte aquí. #Sabemos que, por definición, los jonrones provocan carreras, porque cuando se llega a un jonrón, al menos una carrera puntuará. #Ahora bien, es posible que los jonrones también causen problemas. #Si entiendes el juego, estarás de acuerdo conmigo en que ese podría ser el caso. #Entonces, puede parecer que una base por bola está causando carreras, cuando de hecho, son jonrones los que están causando ambas. #Esto se llama confusión. #Un concepto importante sobre el que aprenderá. #La regresión lineal nos ayudará a analizar todo esto y cuantificar las asociaciones. #Esto nos ayudará a determinar qué jugadores reclutar. #Específicamente, trataremos de predecir cosas como cuántas carreras más anotará el equipo si aumentamos el número de bases en bolas pero mantenemos fijos los jonrones. #La regresión también nos ayudará a responder esta pregunta. #Ejercicios Teams %>% filter(yearID %in% 1961:2001 ) %>% mutate(AB_per_game = AB/G, R_per_game = R/G) %>% ggplot(aes(AB_per_game, R_per_game)) + geom_point(alpha = 0.5)
path <- system.file("extdata", package = "dslabs") filename <- "life-expectancy-and-fertility-two-countries-example.csv" filename <- file.path(path, filename) raw_dat <- read_csv(filename) select(raw_dat, 1:5) # we gather all columns into one, call it key dat <- raw_dat %>% gather(key, value,-country) dat # separate the column key so we have year and variable in 2 columns new_dat <- dat %>% separate(key,c("year", "variable_name"),"_",extra="merge") new_dat # now we want to spread variable_name into 2 separate columns for fertility # and life expectancy, and fill it with the value new_dat %>% spread(variable_name,value)
/PH125.6 Data Wrangling/R code/separate.R
no_license
alvarobarbera/HarvardX-PH125-DataScience
R
false
false
639
r
path <- system.file("extdata", package = "dslabs") filename <- "life-expectancy-and-fertility-two-countries-example.csv" filename <- file.path(path, filename) raw_dat <- read_csv(filename) select(raw_dat, 1:5) # we gather all columns into one, call it key dat <- raw_dat %>% gather(key, value,-country) dat # separate the column key so we have year and variable in 2 columns new_dat <- dat %>% separate(key,c("year", "variable_name"),"_",extra="merge") new_dat # now we want to spread variable_name into 2 separate columns for fertility # and life expectancy, and fill it with the value new_dat %>% spread(variable_name,value)
pthres <- c(5E-08,1E-07,5E-07,1E-06,5E-06,1E-05,5E-05,1E-04,1E-03,1E-02,1E-01) load("/data/zhangh24/MR_MA/result/simulation/prs/summary_gwas_our.rdata") alpha_est_mat = result[[1]] alpha_sd_mat = result[[2]] alpha_p_mat = result[[3]] gamma_est_mat = result[[4]] gamma_sd_mat = result[[5]] gamma_p_mat = result[[6]] load("/data/zhangh24/MR_MA/result/simulation/prs/M_mat.rdata") n.train = 100000 M_mat = M_mat[1:n.train,] # result_new <- list(alpha_est_mat[idx,1:10],gamma_est_mat[idx,1:10]) # save(result_new,file = "/data/zhangh24/MR_MA/result/simulation/prs/summary_gwas_06.rdata") IVW_c = function(Gamma,var_Gamma,gamma,var_gamma){ p <- length(Gamma) raio_vec = rep(0,p) ratio_var_vec = rep(0,p) raio_vec = Gamma/gamma ratio_var_vec = var_Gamma/gamma^2+var_gamma*Gamma^2/gamma^4 Meta_result = Meta(raio_vec,ratio_var_vec) ratio_ivw = Meta_result[1] ratio_ivw_var = Meta_result[2] coef_low = ratio_ivw-1.96*sqrt(ratio_ivw_var) coef_high = ratio_ivw+1.96*sqrt(ratio_ivw_var) cover = ifelse((beta_M>=coef_low& beta_M<=coef_high),1,0) p_value = 2*pnorm(-abs(ratio_ivw/sqrt(ratio_ivw_var)),lower.tail = T) return(c(ratio_ivw,ratio_ivw_var,cover, coef_low,coef_high,p_value)) } Meta = function(coef_vec,var_vec){ meta_var = (sum(1/var_vec))^-1 meta_coef = meta_var*sum(coef_vec/var_vec) return(c(meta_coef,meta_var)) } # n.rep = ncol(alpha_est_mat) # IVW_best_est = rep(0,n.rep) # IVW_best_low_est = rep(0,n.rep) # IVW_best_high_est = rep(0,n.rep) # IVW # for(l in 1:n.rep){ # #idx = 1:5000 # Gamma = gamma_est_mat[idx,l] # var_Gamma = gamma_sd_mat[idx,l]^2 # alpha = alpha_est_mat[idx,l] # var_alpha = alpha_sd_mat[idx,l]^2 # # result_IVW= IVW_c(Gamma,var_Gamma, # alpha,var_alpha) # IVW_best_est[l] = result_IVW[1] # IVW_best_low_est[l] = result_IVW[4] # IVW_best_high_est[l] = result_IVW[5] # #IVW_cover[l] = result_IVW[3] # # # } # MRLR <- function(Gamma,var_Gamma,gamma,var_gamma){ # K <- length(Gamma) # keep.ind <- c(1:K) # # #first step # model1 = lm(Gamma~gamma-1) # coef_est = coefficients(model1) # W_vec = 1/(var_Gamma+coef_est^2*var_gamma) # # coef_best = sum(Gamma*gamma*W_vec)/sum(gamma^2*W_vec) # sigma_est = sum((Gamma-coef_est*gamma)^2)/(K-1) # # W_vec = 1/(var_Gamma+coef_est^2*var_gamma) # xwx_iv = 1/sum(gamma^2*W_vec) # # var_coef_est = sigma_est*xwx_iv*t(gamma)%*%diag(W_vec)%*%diag(W_vec)%*%gamma*xwx_iv # # coef_low <- coef_est+qt(0.025,(K-1))*sqrt(var_coef_est) # coef_high <- coef_est+qt(0.975,(K-1))*sqrt(var_coef_est) # #coef_low_update <- confint(model1,level=0.95)[1] # #coef_high_update <- confint(model1,level=0.95)[2] # cover <- ifelse((beta_M>=coef_low& # beta_M<=coef_high),1,0) # # return(list(coef_est,coef_low,coef_high,cover)) # } load(paste0("/data/zhangh24/MR_MA/result/simulation/prs/prs_result_combined_twostage.rdata")) prs_m_mat <- prs_result[[1]] prs_y_mat <- prs_result[[2]] n.rep = 1000 #beta_M = mean(best_prs_est) # MRLR_est = rep(0,n.rep) # MRLR_low_est = rep(0,n.rep) # MRLR_high_est = rep(0,n.rep) # MRLR_cover = rep(0,n.rep) # # MRLR_PRS_est = rep(0,n.rep) # MRLR_PRS_low_est = rep(0,n.rep) # MRLR_PRS_high_est = rep(0,n.rep) # MRLR_PRS_cover = rep(0,n.rep) # IVW_p = rep(0,n.rep) # IVW_PRS_est = rep(0,n.rep) # IVW_PRS_low_est = rep(0,n.rep) # IVW_PRS_high_est = rep(0,n.rep) # IVW_PRS_cover = rep(0,n.rep) # IVW_PRS_p = rep(0,n.rep) IVW_est = rep(0,n.rep) IVW_low_est = rep(0,n.rep) IVW_high_est = rep(0,n.rep) IVW_p = rep(0,n.rep) IVW_cover = rep(0,n.rep) beta_M = 0.15 MRPRS_est = rep(0,n.rep) MRPRS_low_est = rep(0,n.rep) MRPRS_high_est = rep(0,n.rep) MRPRS_cover = rep(0,n.rep) MRPRS_p = rep(0,n.rep) for(l in 1:n.rep){ print(l) #IVW method restricted to genome-wide SNPs idx <- which(alpha_p_mat[,l]<=5E-8) Gamma = gamma_est_mat[idx,l] var_Gamma = gamma_sd_mat[idx,l]^2 alpha = alpha_est_mat[idx,l] var_alpha = alpha_sd_mat[idx,l]^2 result_IVW = IVW_c(Gamma,var_Gamma, alpha,var_alpha) IVW_est[l] = result_IVW[1] IVW_low_est[l] = result_IVW[4] IVW_high_est[l] = result_IVW[5] IVW_cover[l] = result_IVW[3] IVW_p[l] = result_IVW[6] #MR-PRS method restricted to best C+T SNPs #model_m = lm(M_mat[,l]~prs_m_mat[,l]) idx = which(alpha_p_mat[,l]<=1E-03) Q = length(idx) alpha_prs = alpha_est_mat[idx,l] sd_prs = alpha_sd_mat[idx,l] sigma_m = 1-sum(alpha_prs^2-sd_prs^2) F = crossprod(prs_m_mat[,l])/sigma_m/Q model <- lm(prs_y_mat[,l]~prs_m_mat[,l]) MRPRS_est[l] <- coefficients(summary(model))[2,1]*(F+1)/F temp_ci <- confint(model) MRPRS_low_est[l] <- temp_ci[2,1]*(F+1)/F MRPRS_high_est[l] <- temp_ci[2,2]*(F+1)/F MRPRS_cover[l] = ifelse((beta_M>=temp_ci[2,1])& (beta_M<=temp_ci[2,2]),1,0) MRPRS_p[l] <- coefficients(summary(model))[2,4] } # result_MRLR = MRLR(Gamma,var_Gamma, # alpha,var_alpha) # MRLR_est[l] = as.numeric(result_MRLR[1]) # MRLR_low_est[l] = as.numeric(result_MRLR[2]) # MRLR_high_est[l] = as.numeric(result_MRLR[3]) # MRLR_cover[l] = as.numeric(result_MRLR[4]) # idx <- which(alpha_p_mat[,l]<=1E-03) # Gamma = gamma_est_mat[idx,l] # var_Gamma = gamma_sd_mat[idx,l]^2 # alpha = alpha_est_mat[idx,l] # var_alpha = alpha_sd_mat[idx,l]^2 # result_IVW = IVW_c(Gamma,var_Gamma, # alpha,var_alpha) # IVW_PRS_est[l] = result_IVW[1] # IVW_PRS_low_est[l] = result_IVW[4] # IVW_PRS_high_est[l] = result_IVW[5] # IVW_PRS_cover[l] = result_IVW[3] # IVW_PRS_p[l] = result_IVW[6] # result_PRS_MRLR = MRLR(Gamma,var_Gamma, # alpha,var_alpha) # MRLR_PRS_est = as.numeric(result_PRS_MRLR[1]) # MRLR_PRS_low_est = as.numeric(result_PRS_MRLR[2]) # MRLR_PRS_high_est = as.numeric(result_PRS_MRLR[3]) # MRLR_PRS_cover = as.numeric(result_PRS_MRLR[4]) #PRS_est[l] = crossprod(Gamma,alpha)/crossprod(alpha,alpha) } est = c(mean(IVW_est),mean(MRLR_est), mean(IVW_PRS_est),mean(MRLR_PRS_est), mean(best_prs_est), mean(IVW_best_est), mean(prs_est)) est_low = c(mean(IVW_low_est),mean(MRLR_low_est), mean(IVW_PRS_low_est),mean(MRLR_PRS_low_est), mean(best_prs_low), mean(IVW_best_low_est), mean(prs_low_est)) est_high = c(mean(IVW_high_est),mean(MRLR_high_est), mean(IVW_PRS_high_est),mean(MRLR_PRS_high_est), mean(best_prs_high), mean(IVW_best_high_est), mean(prs_high_est)) method = c( ) data <- data.frame(method,est,est_low,est_high) data$method <- as.factor(data$method) levels(data$method) <- c("Two-stage regression (best PRS)", "IVW true causal SNPS", "IVW (P<5E-08)","MR-weighted (P<5E-08)", "IVW (P<1E-03)"," MR-weighted (P<1E-03)", "Two-stage regression (P<1E-03)") save(data,file = "/data/zhangh24/MR_MA/result/simulation/prs/data_for_plot.rdata") library(ggplot2) ggplot(data) + #theme_Publication()+ geom_bar(aes(x=method, y=est), stat="identity", fill="royalblue", alpha=0.7) + geom_errorbar(aes(x=name, ymin=est_low, ymax=est_high, width=0.2), colour="firebrick2", alpha=0.9, size=0.8) + coord_flip() + # ylim(-5, 15) + labs(x = 'Variables', y = expression("95% CI"), title = 'MR method comparasion') + theme(plot.title=element_text(hjust=0.5, size=30), plot.subtitle=element_text(size=20), axis.title.x=element_text(margin=margin(t=10, r=0, b=0, l=0), hjust=0.5, size=15), axis.title.y=element_text(margin=margin(t=0, r=10, b=0, l=0), vjust=0.5, size=15), axis.text.x=element_text(size=15), axis.text.y=element_text(size=15), legend.title=element_text(size=15), legend.text=element_text(size=15))
/code/simulation/PRS/MR_analysis.R
no_license
andrewhaoyu/MR_MA
R
false
false
7,962
r
pthres <- c(5E-08,1E-07,5E-07,1E-06,5E-06,1E-05,5E-05,1E-04,1E-03,1E-02,1E-01) load("/data/zhangh24/MR_MA/result/simulation/prs/summary_gwas_our.rdata") alpha_est_mat = result[[1]] alpha_sd_mat = result[[2]] alpha_p_mat = result[[3]] gamma_est_mat = result[[4]] gamma_sd_mat = result[[5]] gamma_p_mat = result[[6]] load("/data/zhangh24/MR_MA/result/simulation/prs/M_mat.rdata") n.train = 100000 M_mat = M_mat[1:n.train,] # result_new <- list(alpha_est_mat[idx,1:10],gamma_est_mat[idx,1:10]) # save(result_new,file = "/data/zhangh24/MR_MA/result/simulation/prs/summary_gwas_06.rdata") IVW_c = function(Gamma,var_Gamma,gamma,var_gamma){ p <- length(Gamma) raio_vec = rep(0,p) ratio_var_vec = rep(0,p) raio_vec = Gamma/gamma ratio_var_vec = var_Gamma/gamma^2+var_gamma*Gamma^2/gamma^4 Meta_result = Meta(raio_vec,ratio_var_vec) ratio_ivw = Meta_result[1] ratio_ivw_var = Meta_result[2] coef_low = ratio_ivw-1.96*sqrt(ratio_ivw_var) coef_high = ratio_ivw+1.96*sqrt(ratio_ivw_var) cover = ifelse((beta_M>=coef_low& beta_M<=coef_high),1,0) p_value = 2*pnorm(-abs(ratio_ivw/sqrt(ratio_ivw_var)),lower.tail = T) return(c(ratio_ivw,ratio_ivw_var,cover, coef_low,coef_high,p_value)) } Meta = function(coef_vec,var_vec){ meta_var = (sum(1/var_vec))^-1 meta_coef = meta_var*sum(coef_vec/var_vec) return(c(meta_coef,meta_var)) } # n.rep = ncol(alpha_est_mat) # IVW_best_est = rep(0,n.rep) # IVW_best_low_est = rep(0,n.rep) # IVW_best_high_est = rep(0,n.rep) # IVW # for(l in 1:n.rep){ # #idx = 1:5000 # Gamma = gamma_est_mat[idx,l] # var_Gamma = gamma_sd_mat[idx,l]^2 # alpha = alpha_est_mat[idx,l] # var_alpha = alpha_sd_mat[idx,l]^2 # # result_IVW= IVW_c(Gamma,var_Gamma, # alpha,var_alpha) # IVW_best_est[l] = result_IVW[1] # IVW_best_low_est[l] = result_IVW[4] # IVW_best_high_est[l] = result_IVW[5] # #IVW_cover[l] = result_IVW[3] # # # } # MRLR <- function(Gamma,var_Gamma,gamma,var_gamma){ # K <- length(Gamma) # keep.ind <- c(1:K) # # #first step # model1 = lm(Gamma~gamma-1) # coef_est = coefficients(model1) # W_vec = 1/(var_Gamma+coef_est^2*var_gamma) # # coef_best = sum(Gamma*gamma*W_vec)/sum(gamma^2*W_vec) # sigma_est = sum((Gamma-coef_est*gamma)^2)/(K-1) # # W_vec = 1/(var_Gamma+coef_est^2*var_gamma) # xwx_iv = 1/sum(gamma^2*W_vec) # # var_coef_est = sigma_est*xwx_iv*t(gamma)%*%diag(W_vec)%*%diag(W_vec)%*%gamma*xwx_iv # # coef_low <- coef_est+qt(0.025,(K-1))*sqrt(var_coef_est) # coef_high <- coef_est+qt(0.975,(K-1))*sqrt(var_coef_est) # #coef_low_update <- confint(model1,level=0.95)[1] # #coef_high_update <- confint(model1,level=0.95)[2] # cover <- ifelse((beta_M>=coef_low& # beta_M<=coef_high),1,0) # # return(list(coef_est,coef_low,coef_high,cover)) # } load(paste0("/data/zhangh24/MR_MA/result/simulation/prs/prs_result_combined_twostage.rdata")) prs_m_mat <- prs_result[[1]] prs_y_mat <- prs_result[[2]] n.rep = 1000 #beta_M = mean(best_prs_est) # MRLR_est = rep(0,n.rep) # MRLR_low_est = rep(0,n.rep) # MRLR_high_est = rep(0,n.rep) # MRLR_cover = rep(0,n.rep) # # MRLR_PRS_est = rep(0,n.rep) # MRLR_PRS_low_est = rep(0,n.rep) # MRLR_PRS_high_est = rep(0,n.rep) # MRLR_PRS_cover = rep(0,n.rep) # IVW_p = rep(0,n.rep) # IVW_PRS_est = rep(0,n.rep) # IVW_PRS_low_est = rep(0,n.rep) # IVW_PRS_high_est = rep(0,n.rep) # IVW_PRS_cover = rep(0,n.rep) # IVW_PRS_p = rep(0,n.rep) IVW_est = rep(0,n.rep) IVW_low_est = rep(0,n.rep) IVW_high_est = rep(0,n.rep) IVW_p = rep(0,n.rep) IVW_cover = rep(0,n.rep) beta_M = 0.15 MRPRS_est = rep(0,n.rep) MRPRS_low_est = rep(0,n.rep) MRPRS_high_est = rep(0,n.rep) MRPRS_cover = rep(0,n.rep) MRPRS_p = rep(0,n.rep) for(l in 1:n.rep){ print(l) #IVW method restricted to genome-wide SNPs idx <- which(alpha_p_mat[,l]<=5E-8) Gamma = gamma_est_mat[idx,l] var_Gamma = gamma_sd_mat[idx,l]^2 alpha = alpha_est_mat[idx,l] var_alpha = alpha_sd_mat[idx,l]^2 result_IVW = IVW_c(Gamma,var_Gamma, alpha,var_alpha) IVW_est[l] = result_IVW[1] IVW_low_est[l] = result_IVW[4] IVW_high_est[l] = result_IVW[5] IVW_cover[l] = result_IVW[3] IVW_p[l] = result_IVW[6] #MR-PRS method restricted to best C+T SNPs #model_m = lm(M_mat[,l]~prs_m_mat[,l]) idx = which(alpha_p_mat[,l]<=1E-03) Q = length(idx) alpha_prs = alpha_est_mat[idx,l] sd_prs = alpha_sd_mat[idx,l] sigma_m = 1-sum(alpha_prs^2-sd_prs^2) F = crossprod(prs_m_mat[,l])/sigma_m/Q model <- lm(prs_y_mat[,l]~prs_m_mat[,l]) MRPRS_est[l] <- coefficients(summary(model))[2,1]*(F+1)/F temp_ci <- confint(model) MRPRS_low_est[l] <- temp_ci[2,1]*(F+1)/F MRPRS_high_est[l] <- temp_ci[2,2]*(F+1)/F MRPRS_cover[l] = ifelse((beta_M>=temp_ci[2,1])& (beta_M<=temp_ci[2,2]),1,0) MRPRS_p[l] <- coefficients(summary(model))[2,4] } # result_MRLR = MRLR(Gamma,var_Gamma, # alpha,var_alpha) # MRLR_est[l] = as.numeric(result_MRLR[1]) # MRLR_low_est[l] = as.numeric(result_MRLR[2]) # MRLR_high_est[l] = as.numeric(result_MRLR[3]) # MRLR_cover[l] = as.numeric(result_MRLR[4]) # idx <- which(alpha_p_mat[,l]<=1E-03) # Gamma = gamma_est_mat[idx,l] # var_Gamma = gamma_sd_mat[idx,l]^2 # alpha = alpha_est_mat[idx,l] # var_alpha = alpha_sd_mat[idx,l]^2 # result_IVW = IVW_c(Gamma,var_Gamma, # alpha,var_alpha) # IVW_PRS_est[l] = result_IVW[1] # IVW_PRS_low_est[l] = result_IVW[4] # IVW_PRS_high_est[l] = result_IVW[5] # IVW_PRS_cover[l] = result_IVW[3] # IVW_PRS_p[l] = result_IVW[6] # result_PRS_MRLR = MRLR(Gamma,var_Gamma, # alpha,var_alpha) # MRLR_PRS_est = as.numeric(result_PRS_MRLR[1]) # MRLR_PRS_low_est = as.numeric(result_PRS_MRLR[2]) # MRLR_PRS_high_est = as.numeric(result_PRS_MRLR[3]) # MRLR_PRS_cover = as.numeric(result_PRS_MRLR[4]) #PRS_est[l] = crossprod(Gamma,alpha)/crossprod(alpha,alpha) } est = c(mean(IVW_est),mean(MRLR_est), mean(IVW_PRS_est),mean(MRLR_PRS_est), mean(best_prs_est), mean(IVW_best_est), mean(prs_est)) est_low = c(mean(IVW_low_est),mean(MRLR_low_est), mean(IVW_PRS_low_est),mean(MRLR_PRS_low_est), mean(best_prs_low), mean(IVW_best_low_est), mean(prs_low_est)) est_high = c(mean(IVW_high_est),mean(MRLR_high_est), mean(IVW_PRS_high_est),mean(MRLR_PRS_high_est), mean(best_prs_high), mean(IVW_best_high_est), mean(prs_high_est)) method = c( ) data <- data.frame(method,est,est_low,est_high) data$method <- as.factor(data$method) levels(data$method) <- c("Two-stage regression (best PRS)", "IVW true causal SNPS", "IVW (P<5E-08)","MR-weighted (P<5E-08)", "IVW (P<1E-03)"," MR-weighted (P<1E-03)", "Two-stage regression (P<1E-03)") save(data,file = "/data/zhangh24/MR_MA/result/simulation/prs/data_for_plot.rdata") library(ggplot2) ggplot(data) + #theme_Publication()+ geom_bar(aes(x=method, y=est), stat="identity", fill="royalblue", alpha=0.7) + geom_errorbar(aes(x=name, ymin=est_low, ymax=est_high, width=0.2), colour="firebrick2", alpha=0.9, size=0.8) + coord_flip() + # ylim(-5, 15) + labs(x = 'Variables', y = expression("95% CI"), title = 'MR method comparasion') + theme(plot.title=element_text(hjust=0.5, size=30), plot.subtitle=element_text(size=20), axis.title.x=element_text(margin=margin(t=10, r=0, b=0, l=0), hjust=0.5, size=15), axis.title.y=element_text(margin=margin(t=0, r=10, b=0, l=0), vjust=0.5, size=15), axis.text.x=element_text(size=15), axis.text.y=element_text(size=15), legend.title=element_text(size=15), legend.text=element_text(size=15))
## Put comments here that give an overall description of what your ## functions do ## Write a short comment describing this function makeCacheMatrix <- function(x = matrix()) { m <- NULL set <- function(y) { x <<- y m <<- NULL } get <- function() x setInverse <- function(inverse) m <<- inverse getInverse <- function() m list(set = set, get = get, setInverse = setInverse, getInverse = getInverse) } ## Write a short comment describing this function cacheSolve <- function(x, ...) { ## Return a matrix that is the inverse of 'x' m <- x$getInverse() if(!is.null(m)) { message("getting cached data") return(m) } data <- x$get() m <- inverse(data, ...) x$setInverse(m) m }
/cachematrix.R
no_license
Paulominandy/ProgrammingAssignment2
R
false
false
742
r
## Put comments here that give an overall description of what your ## functions do ## Write a short comment describing this function makeCacheMatrix <- function(x = matrix()) { m <- NULL set <- function(y) { x <<- y m <<- NULL } get <- function() x setInverse <- function(inverse) m <<- inverse getInverse <- function() m list(set = set, get = get, setInverse = setInverse, getInverse = getInverse) } ## Write a short comment describing this function cacheSolve <- function(x, ...) { ## Return a matrix that is the inverse of 'x' m <- x$getInverse() if(!is.null(m)) { message("getting cached data") return(m) } data <- x$get() m <- inverse(data, ...) x$setInverse(m) m }
.onLoad <- function(libname, pkgname) { shiny::registerInputHandler("echarts4rParse", function(data, ...) { jsonlite::fromJSON(jsonlite::toJSON(data, auto_unbox = TRUE)) }, force = TRUE) }
/R/zzz.R
no_license
elefank/echarts4r
R
false
false
196
r
.onLoad <- function(libname, pkgname) { shiny::registerInputHandler("echarts4rParse", function(data, ...) { jsonlite::fromJSON(jsonlite::toJSON(data, auto_unbox = TRUE)) }, force = TRUE) }
## single sample MPI test.anno_single <- function() { # if(require("Rmpi", quietly=TRUE)){ file <- system.file('mzdata/MM14.mzdata', package = "CAMERA") xs <- xcmsSet(file, method="centWave", ppm=30, peakwidth=c(5,10)) an <- xsAnnotate(xs) # ,nSlaves=2) Disabled because of unconfigured RMPI on BioC Build machines anF <- groupFWHM(an) anI <- findIsotopes(anF) file <- system.file('rules/primary_adducts_pos.csv', package = "CAMERA") rules <- read.csv(file) anFA <- findAdducts(anI,polarity="positive",rules=rules) checkEqualsNumeric(length(unique(anFA@annoID[,1])),30) cleanParallel(an) # } } test.anno_multi <- function() { library(faahKO) # if(require("Rmpi", quietly=TRUE)){ filepath <- system.file("cdf", package = "faahKO") xsg <- group(faahko) file <- system.file('rules/primary_adducts_pos.csv', package = "CAMERA") rules <- read.csv(file) xsa <- xsAnnotate(xsg, sample=1) # ,nSlaves=2) Disabled because of unconfigured RMPI on BioC Build machines xsaF <- groupFWHM(xsa, sigma=6, perfwhm=0.6) xsaC <- groupCorr(xsaF) xsaFI <- findIsotopes(xsaC) xsaFA <- findAdducts(xsaFI, polarity="positive",rules=rules) checkEqualsNumeric(length(unique(xsaFA@annoID[,1])),20) cleanParallel(xsa) xsa <- xsAnnotate(xsg, sample=c(1:8)) # ,nSlaves=2) Disabled because of unconfigured RMPI on BioC Build machines xsaF <- groupFWHM(xsa, sigma=6, perfwhm=0.6) xsaC <- groupCorr(xsaF) xsaFI <- findIsotopes(xsaC) xsaFA <- findAdducts(xsaFI, polarity="positive",rules=rules) checkEqualsNumeric(length(unique(xsaFA@annoID[,1])),28) cleanParallel(xsa) xsa <- xsAnnotate(xsg, sample=NA) # ,nSlaves=2) Disabled because of unconfigured RMPI on BioC Build machines xsaF <- groupFWHM(xsa, sigma=6, perfwhm=0.6) xsaC <- groupCorr(xsaF) xsaFI <- findIsotopes(xsaC) xsaFA <- findAdducts(xsaFI, polarity="positive",rules=rules) checkEqualsNumeric(length(unique(xsaFA@annoID[,1])),16) cleanParallel(xsa) # } }
/inst/unitTests/test_findAdductsMPI.R
no_license
samsonjm/CAMERA
R
false
false
2,110
r
## single sample MPI test.anno_single <- function() { # if(require("Rmpi", quietly=TRUE)){ file <- system.file('mzdata/MM14.mzdata', package = "CAMERA") xs <- xcmsSet(file, method="centWave", ppm=30, peakwidth=c(5,10)) an <- xsAnnotate(xs) # ,nSlaves=2) Disabled because of unconfigured RMPI on BioC Build machines anF <- groupFWHM(an) anI <- findIsotopes(anF) file <- system.file('rules/primary_adducts_pos.csv', package = "CAMERA") rules <- read.csv(file) anFA <- findAdducts(anI,polarity="positive",rules=rules) checkEqualsNumeric(length(unique(anFA@annoID[,1])),30) cleanParallel(an) # } } test.anno_multi <- function() { library(faahKO) # if(require("Rmpi", quietly=TRUE)){ filepath <- system.file("cdf", package = "faahKO") xsg <- group(faahko) file <- system.file('rules/primary_adducts_pos.csv', package = "CAMERA") rules <- read.csv(file) xsa <- xsAnnotate(xsg, sample=1) # ,nSlaves=2) Disabled because of unconfigured RMPI on BioC Build machines xsaF <- groupFWHM(xsa, sigma=6, perfwhm=0.6) xsaC <- groupCorr(xsaF) xsaFI <- findIsotopes(xsaC) xsaFA <- findAdducts(xsaFI, polarity="positive",rules=rules) checkEqualsNumeric(length(unique(xsaFA@annoID[,1])),20) cleanParallel(xsa) xsa <- xsAnnotate(xsg, sample=c(1:8)) # ,nSlaves=2) Disabled because of unconfigured RMPI on BioC Build machines xsaF <- groupFWHM(xsa, sigma=6, perfwhm=0.6) xsaC <- groupCorr(xsaF) xsaFI <- findIsotopes(xsaC) xsaFA <- findAdducts(xsaFI, polarity="positive",rules=rules) checkEqualsNumeric(length(unique(xsaFA@annoID[,1])),28) cleanParallel(xsa) xsa <- xsAnnotate(xsg, sample=NA) # ,nSlaves=2) Disabled because of unconfigured RMPI on BioC Build machines xsaF <- groupFWHM(xsa, sigma=6, perfwhm=0.6) xsaC <- groupCorr(xsaF) xsaFI <- findIsotopes(xsaC) xsaFA <- findAdducts(xsaFI, polarity="positive",rules=rules) checkEqualsNumeric(length(unique(xsaFA@annoID[,1])),16) cleanParallel(xsa) # } }
myfunction <- function () { x <- rnorm (100) mean(x) ] second <- function (y){ y + rnorm (length(y)) } myfunction <- function () { }
/Programmjng-R/Intro.R
no_license
farazrehman/datasciencecoursera
R
false
false
149
r
myfunction <- function () { x <- rnorm (100) mean(x) ] second <- function (y){ y + rnorm (length(y)) } myfunction <- function () { }
mrn1 <- read_csv(paste0(path, "/M2GEN/Garrick_raw data/10R20000134_2020-05-05_avatar_v2_clinical-with-events/MRN.csv")) %>% rename(avatar_id = "AvatarKey") mrn2 <- read_csv(paste0(path, "/M2GEN/Garrick_raw data/10R20000134_2020-05-05_avatar_v4_clinical-with-events/MRN.csv")) %>% rename(avatar_id = "AvatarKey") mrn <- bind_rows(mrn1, mrn2, Demo_linkage) %>% distinct() %>% mutate(MRN = as.character(MRN)) %>% arrange(MRN) %>% distinct(avatar_id, .keep_all = TRUE) # Radiation radiation <- radiation %>% mutate(radiation_check = case_when( rad_start_date < rad_stop_date ~ "OK", rad_start_date > rad_stop_date ~ "not good" )) table(radiation$radiation_check) wrong_date <- (radiation[which(radiation$radiation_check == "not good"),]) %>% left_join(., mrn, by = c("avatar_id")) write.csv(wrong_date, paste0(path, "/sanity check output/radiation start date after stop date.csv")) # Treatment treatment <- treatment %>% mutate(treatment_check = case_when( drug_start_date < drug_stop_date ~ "OK", drug_start_date > drug_stop_date ~ "not good" )) table(treatment$treatment_check) wrong_date <- (treatment[which(treatment$treatment_check == "not good"),]) %>% left_join(., mrn, by = c("avatar_id")) write.csv(wrong_date, paste0(path, "/sanity check output/drugs start date after stop date.csv")) sanity_check <- germline_patient_data %>% mutate(diag_check = case_when( date_of_diagnosis_1 < date_of_diagnosis_2 & date_of_diagnosis_2 < date_of_diagnosis_3 & date_of_diagnosis_3 < date_of_diagnosis_4 ~ "OK", date_of_diagnosis_1 < date_of_diagnosis_2 & date_of_diagnosis_2 < date_of_diagnosis_3 ~ "OK", date_of_diagnosis_1 < date_of_diagnosis_2 ~ "OK", date_of_diagnosis_1 > date_of_diagnosis_2 & date_of_diagnosis_2 > date_of_diagnosis_3 & date_of_diagnosis_3 > date_of_diagnosis_4 ~ "not good", date_of_diagnosis_1 > date_of_diagnosis_2 & date_of_diagnosis_2 > date_of_diagnosis_3 ~ "not good", date_of_diagnosis_1 > date_of_diagnosis_2 ~ "not good" )) %>% mutate(rad_check = case_when( rad_start_date_1 < rad_start_date_2 & rad_start_date_2 < rad_start_date_3 & rad_start_date_3 < rad_start_date_4 ~ "OK", rad_start_date_1 < rad_start_date_2 & rad_start_date_2 < rad_start_date_3 ~ "OK", rad_start_date_1 < rad_start_date_2 ~ "OK", rad_start_date_1 > rad_start_date_2 & rad_start_date_2 > rad_start_date_3 & rad_start_date_3 >= rad_start_date_4 ~ "not good", rad_start_date_1 > rad_start_date_2 & rad_start_date_2 >= rad_start_date_3 ~ "not good", rad_start_date_1 >= rad_start_date_2 ~ "not good" )) %>% mutate(sct_check = case_when( date_of_bmt_1 < date_of_bmt_2 & date_of_bmt_2 < date_of_bmt_3 ~ "OK", date_of_bmt_1 < date_of_bmt_2 ~ "OK", date_of_bmt_2 >= date_of_bmt_3 | date_of_bmt_1 >= date_of_bmt_2 ~ "not good" )) %>% mutate(treat_check = case_when( line_start_date_1 > line_start_date_2 | line_start_date_2 > line_start_date_3 | line_start_date_3 > line_start_date_4 | line_start_date_4 > line_start_date_5 | line_start_date_5 > line_start_date_6 | line_start_date_6 > line_start_date_7 | line_start_date_7 > line_start_date_8 | line_start_date_8 > line_start_date_9 | line_start_date_9 > line_start_date_10 | line_start_date_10 > line_start_date_11 | line_start_date_11 > line_start_date_12 | line_start_date_12 > line_start_date_13 | line_start_date_13 > line_start_date_14 | line_start_date_14 > line_start_date_15 ~ "not good", line_start_date_14 <= line_start_date_15 ~ "OK", line_start_date_13 <= line_start_date_14 ~ "OK", line_start_date_12 <= line_start_date_13 ~ "OK", line_start_date_11 <= line_start_date_12 ~ "OK", line_start_date_10 <= line_start_date_11 ~ "OK", line_start_date_9 <= line_start_date_10 ~ "OK", line_start_date_8 <= line_start_date_9 ~ "OK", line_start_date_7 <= line_start_date_8 ~ "OK", line_start_date_6 <= line_start_date_7 ~ "OK", line_start_date_5 <= line_start_date_6 ~ "OK", line_start_date_4 <= line_start_date_5 ~ "OK", line_start_date_3 <= line_start_date_4 ~ "OK", line_start_date_2 <= line_start_date_3 ~ "OK", line_start_date_1 <= line_start_date_2 ~ "OK" )) %>% mutate(birth_BF_lastdate = case_when( last_date_available > Date_of_Birth ~ "OK", last_date_available <= Date_of_Birth ~ "not good" )) %>% mutate(birth_BF_diag = case_when( date_of_diagnosis_1 > Date_of_Birth ~ "OK", date_of_diagnosis_1 <= Date_of_Birth ~ "not good" )) %>% mutate(diag_BF_lastdate = case_when( last_date_available > Dx_date_closest_germline ~ "OK", last_date_available <= Dx_date_closest_germline ~ "not good" )) %>% mutate(birth_diag_lastdate = case_when( Date_of_Birth < Dx_date_closest_germline & Dx_date_closest_germline < last_date_available ~ "OK" )) %>% mutate(rad_after_diag = case_when( rad_start_date_1 > Dx_date_closest_germline ~ "OK" )) %>% mutate(bmt_after_diag = case_when( date_of_bmt_1 > Dx_date_closest_germline ~ "OK" )) %>% mutate(drug_after_diag = case_when( line_start_date_1 > Dx_date_closest_germline ~ "OK" )) %>% mutate(diag_BF_progression = case_when( progression_date > Dx_date_closest_germline ~ "OK", progression_date <= Dx_date_closest_germline ~ "not good" )) %>% mutate(progression_BF_death = case_when( progression_date < date_death ~ "OK", progression_date >= date_death ~ "not good" )) %>% mutate(drug_bf_bmt = case_when( line_start_date_1 <= date_of_bmt_1 ~ "OK", line_start_date_1 > date_of_bmt_1 ~ "not good", is.na(line_start_date_1) & !is.na(date_of_bmt_1) ~ "not good" )) table_sanity_check <- as.data.table(matrix(c("check", "radiation_check", "treatment_check", "diag_check", "rad_check", "sct_check", "treat_check", "birth_BF_lastdate", "birth_BF_diag", "diag_BF_lastdate", "birth_diag_lastdate", "rad_after_diag", "bmt_after_diag", "drug_after_diag", "diag_BF_progression", "progression_BF_death", "comments", "if stop date after start date", "if stop date after start date", "if diag1 before diag2 before diag3 etc", "if rad1 before rad2 before rad3 etc", "if bmt1 before bmt2 before bmt3 etc", "if drug1 before drug2 before drug3 etc", "if birth before last date available", "if birth before diag", "if diag before last date available", "if birth before diag before last date available", "if rad1 after diag","if bmt1 after diag", "if drug1 after diag", "if progression after diag", "if progression before death", "OK", sum(str_count(radiation$radiation_check, "OK"), na.rm = TRUE), sum(str_count(treatment$treatment_check, "OK"), na.rm = TRUE), sum(str_count(sanity_check$diag_check, "OK"), na.rm = TRUE), sum(str_count(sanity_check$rad_check, "OK"), na.rm = TRUE), sum(str_count(sanity_check$sct_check, "OK"), na.rm = TRUE), sum(str_count(sanity_check$treat_check, "OK"), na.rm = TRUE), sum(str_count(sanity_check$birth_BF_lastdate, "OK"), na.rm = TRUE), sum(str_count(sanity_check$birth_BF_diag, "OK"), na.rm = TRUE), sum(str_count(sanity_check$diag_BF_lastdate, "OK"), na.rm = TRUE), sum(str_count(sanity_check$birth_diag_lastdate, "OK"), na.rm = TRUE), sum(str_count(sanity_check$rad_after_diag, "OK"), na.rm = TRUE), sum(str_count(sanity_check$bmt_after_diag, "OK"), na.rm = TRUE), sum(str_count(sanity_check$drug_after_diag, "OK"), na.rm = TRUE), sum(str_count(sanity_check$diag_BF_progression, "OK"), na.rm = TRUE), sum(str_count(sanity_check$progression_BF_death, "OK"), na.rm = TRUE), "not good", sum(str_count(radiation$radiation_check, "not good"), na.rm = TRUE), sum(str_count(treatment$treatment_check, "not good"), na.rm = TRUE), sum(str_count(sanity_check$diag_check, "not good"), na.rm = TRUE), sum(str_count(sanity_check$rad_check, "not good"), na.rm = TRUE), sum(str_count(sanity_check$sct_check, "not good"), na.rm = TRUE), sum(str_count(sanity_check$treat_check, "not good"), na.rm = TRUE), sum(str_count(sanity_check$birth_BF_lastdate, "not good"), na.rm = TRUE), sum(str_count(sanity_check$birth_BF_diag, "not good"), na.rm = TRUE), sum(str_count(sanity_check$diag_BF_lastdate, "not good"), na.rm = TRUE), sum(str_count(sanity_check$birth_diag_lastdate, "not good"), na.rm = TRUE), sum(str_count(sanity_check$rad_after_diag, "not good"), na.rm = TRUE), sum(str_count(sanity_check$bmt_after_diag, "not good"), na.rm = TRUE), sum(str_count(sanity_check$drug_after_diag, "not good"), na.rm = TRUE), sum(str_count(sanity_check$diag_BF_progression, "not good"), na.rm = TRUE), sum(str_count(sanity_check$progression_BF_death, "not good"), na.rm = TRUE) ), ncol = 16, byrow=TRUE)) write.csv(table_sanity_check, paste0(path, "/sanity check output/sanity check.csv")) wrong_date_bmt <- as.data.table(sanity_check[which(sanity_check$sct_check == "not good"), c("avatar_id", "mrn", "date_of_bmt_1", "date_of_bmt_2")]) write.csv(wrong_date_bmt, paste0(path, "/sanity check output/wrong_date_bmt.csv")) wrong_date_drug <- sanity_check[which(sanity_check$treat_check == "not good"), ] %>% select("avatar_id", "mrn", starts_with("drug_start"), starts_with("drug_name")) wrong_date_drug <- as.data.table(wrong_date_drug) write.csv(wrong_date_drug, paste0(path, "/sanity check output/wrong_date_drug.csv")) wrong_date_rad <- sanity_check[which(sanity_check$rad_check == "not good"), c("avatar_id", "rad_start_date_1", "rad_start_date_2", "rad_start_date_3")] # no big deal, they are the same (1=2) write.csv(wrong_date_rad, paste0(path, "/sanity check output/wrong_date_rad.csv")) wrong_diag_or_lastdate <- as.data.table(sanity_check[which(sanity_check$diag_BF_lastdate == "not good"), c("avatar_id", "Dx_date_closest_germline", "last_date_available", "date_death", "date_last_follow_up")]) %>% left_join(., mrn, by = c("avatar_id")) write.csv(wrong_diag_or_lastdate, paste0(path, "/sanity check output/wrong_diag_or_lastdate.csv")) missing_diag <- as.data.table(sanity_check[which(is.na(sanity_check$Dx_date_closest_germline)), c("avatar_id", "Dx_date_closest_germline")]) write.csv(missing_diag, paste0(path, "/sanity check output/missing_diag date.csv")) wrong_progression <- as.data.table(sanity_check[which(sanity_check$diag_BF_progression == "not good"), c("avatar_id", "progression_date", "Dx_date_closest_germline", "date_of_diagnosis_1", "date_of_diagnosis_2", "Disease_Status_germline")]) write.csv(wrong_progression, paste0(path, "/sanity check output/wrong_progression date.csv")) wrong_progression_BF_death <- as.data.table(sanity_check[which(sanity_check$progression_BF_death == "not good"), c("avatar_id", "progression_date", "Dx_date_closest_germline", "date_death" , "Disease_Status_germline")]) write.csv(wrong_progression_BF_death, paste0(path, "/sanity check output/progression_BF_death date.csv")) table(sanity_check$drug_bf_bmt) drug_bf_bmt <- as.data.table(sanity_check[which(sanity_check$drug_bf_bmt == "not good"), c("avatar_id", "date_of_bmt_1", "drug_start_date_1", "line_start_date_1", "drug_name__1", "imids_maintenance")]) %>% left_join(., mrn, by = c("avatar_id")) write.csv(drug_bf_bmt, paste0(path, "/sanity check output/drugs start before bmt.csv")) rm(sanity_check, table_sanity_check, wrong_date, wrong_date_bmt, wrong_date_drug, wrong_date_rad, wrong_diag_or_lastdate, missing_diag) ######## Check long drug name trial_reg_name <- germline_patient_data %>% select(avatar_id, starts_with("regimen_name_"), starts_with("drug_start_date_"), starts_with("drug_stop_date_"), starts_with("line_start_date_"), starts_with("line_stop_date_")) %>% pivot_longer(cols = starts_with("regimen_name_"), names_to = "regimen_number", values_to = "regimen_name_", values_drop_na = TRUE) %>% filter(str_detect(regimen_name_, "clinical")) trial_reg_name <- left_join(trial_reg_name, mrn, by = "avatar_id") write_csv(trial_reg_name, paste0(path, "/sanity check output/clinical trial as reg_name.csv")) # trial_reg_name <- germline_patient_data %>% # select(avatar_id, starts_with("regimen_name_"), starts_with("drug_start_date_"), # starts_with("drug_stop_date_"), # starts_with("line_start_date_"), starts_with("line_stop_date_")) %>% # filter(.vars == vars(starts_with("regimen_name_") ) , # .vars_predicate == any_vars(str_detect(. , "clinical"))) trial_reg_name <- treatment %>% filter(str_detect(drug_name_, "clinical")) %>% left_join(., mrn, by = "avatar_id") write_csv(trial_reg_name, paste0(path, "/sanity check output/clinical trial as reg_name.csv")) # How many patients had HCT but no melphalan---- hct_data <- germline_patient_data %>% distinct(avatar_id, .keep_all = TRUE) %>% filter(!is.na(date_of_bmt_1)) hct_data1 <- hct_data %>% pivot_longer(cols = starts_with("drug_name_"), values_to = "drug_name_") %>% filter(str_detect(drug_name_, "melphalan")) %>% distinct(avatar_id, .keep_all = TRUE) uid <- paste(unique(hct_data1$avatar_id), collapse = '|') hct_data_no_mel <- hct_data[(!grepl(uid, hct_data$avatar_id)),] %>% select(avatar_id, starts_with("date_of_bmt_"), starts_with("drug_name")) write_csv(hct_data_no_mel, paste0(path, "/sanity check output/patients received HCT but no melphalan.csv"))
/R/04.sanity_check.R
no_license
GillisLabAtMoffitt/CHIP-Avatar
R
false
false
14,690
r
mrn1 <- read_csv(paste0(path, "/M2GEN/Garrick_raw data/10R20000134_2020-05-05_avatar_v2_clinical-with-events/MRN.csv")) %>% rename(avatar_id = "AvatarKey") mrn2 <- read_csv(paste0(path, "/M2GEN/Garrick_raw data/10R20000134_2020-05-05_avatar_v4_clinical-with-events/MRN.csv")) %>% rename(avatar_id = "AvatarKey") mrn <- bind_rows(mrn1, mrn2, Demo_linkage) %>% distinct() %>% mutate(MRN = as.character(MRN)) %>% arrange(MRN) %>% distinct(avatar_id, .keep_all = TRUE) # Radiation radiation <- radiation %>% mutate(radiation_check = case_when( rad_start_date < rad_stop_date ~ "OK", rad_start_date > rad_stop_date ~ "not good" )) table(radiation$radiation_check) wrong_date <- (radiation[which(radiation$radiation_check == "not good"),]) %>% left_join(., mrn, by = c("avatar_id")) write.csv(wrong_date, paste0(path, "/sanity check output/radiation start date after stop date.csv")) # Treatment treatment <- treatment %>% mutate(treatment_check = case_when( drug_start_date < drug_stop_date ~ "OK", drug_start_date > drug_stop_date ~ "not good" )) table(treatment$treatment_check) wrong_date <- (treatment[which(treatment$treatment_check == "not good"),]) %>% left_join(., mrn, by = c("avatar_id")) write.csv(wrong_date, paste0(path, "/sanity check output/drugs start date after stop date.csv")) sanity_check <- germline_patient_data %>% mutate(diag_check = case_when( date_of_diagnosis_1 < date_of_diagnosis_2 & date_of_diagnosis_2 < date_of_diagnosis_3 & date_of_diagnosis_3 < date_of_diagnosis_4 ~ "OK", date_of_diagnosis_1 < date_of_diagnosis_2 & date_of_diagnosis_2 < date_of_diagnosis_3 ~ "OK", date_of_diagnosis_1 < date_of_diagnosis_2 ~ "OK", date_of_diagnosis_1 > date_of_diagnosis_2 & date_of_diagnosis_2 > date_of_diagnosis_3 & date_of_diagnosis_3 > date_of_diagnosis_4 ~ "not good", date_of_diagnosis_1 > date_of_diagnosis_2 & date_of_diagnosis_2 > date_of_diagnosis_3 ~ "not good", date_of_diagnosis_1 > date_of_diagnosis_2 ~ "not good" )) %>% mutate(rad_check = case_when( rad_start_date_1 < rad_start_date_2 & rad_start_date_2 < rad_start_date_3 & rad_start_date_3 < rad_start_date_4 ~ "OK", rad_start_date_1 < rad_start_date_2 & rad_start_date_2 < rad_start_date_3 ~ "OK", rad_start_date_1 < rad_start_date_2 ~ "OK", rad_start_date_1 > rad_start_date_2 & rad_start_date_2 > rad_start_date_3 & rad_start_date_3 >= rad_start_date_4 ~ "not good", rad_start_date_1 > rad_start_date_2 & rad_start_date_2 >= rad_start_date_3 ~ "not good", rad_start_date_1 >= rad_start_date_2 ~ "not good" )) %>% mutate(sct_check = case_when( date_of_bmt_1 < date_of_bmt_2 & date_of_bmt_2 < date_of_bmt_3 ~ "OK", date_of_bmt_1 < date_of_bmt_2 ~ "OK", date_of_bmt_2 >= date_of_bmt_3 | date_of_bmt_1 >= date_of_bmt_2 ~ "not good" )) %>% mutate(treat_check = case_when( line_start_date_1 > line_start_date_2 | line_start_date_2 > line_start_date_3 | line_start_date_3 > line_start_date_4 | line_start_date_4 > line_start_date_5 | line_start_date_5 > line_start_date_6 | line_start_date_6 > line_start_date_7 | line_start_date_7 > line_start_date_8 | line_start_date_8 > line_start_date_9 | line_start_date_9 > line_start_date_10 | line_start_date_10 > line_start_date_11 | line_start_date_11 > line_start_date_12 | line_start_date_12 > line_start_date_13 | line_start_date_13 > line_start_date_14 | line_start_date_14 > line_start_date_15 ~ "not good", line_start_date_14 <= line_start_date_15 ~ "OK", line_start_date_13 <= line_start_date_14 ~ "OK", line_start_date_12 <= line_start_date_13 ~ "OK", line_start_date_11 <= line_start_date_12 ~ "OK", line_start_date_10 <= line_start_date_11 ~ "OK", line_start_date_9 <= line_start_date_10 ~ "OK", line_start_date_8 <= line_start_date_9 ~ "OK", line_start_date_7 <= line_start_date_8 ~ "OK", line_start_date_6 <= line_start_date_7 ~ "OK", line_start_date_5 <= line_start_date_6 ~ "OK", line_start_date_4 <= line_start_date_5 ~ "OK", line_start_date_3 <= line_start_date_4 ~ "OK", line_start_date_2 <= line_start_date_3 ~ "OK", line_start_date_1 <= line_start_date_2 ~ "OK" )) %>% mutate(birth_BF_lastdate = case_when( last_date_available > Date_of_Birth ~ "OK", last_date_available <= Date_of_Birth ~ "not good" )) %>% mutate(birth_BF_diag = case_when( date_of_diagnosis_1 > Date_of_Birth ~ "OK", date_of_diagnosis_1 <= Date_of_Birth ~ "not good" )) %>% mutate(diag_BF_lastdate = case_when( last_date_available > Dx_date_closest_germline ~ "OK", last_date_available <= Dx_date_closest_germline ~ "not good" )) %>% mutate(birth_diag_lastdate = case_when( Date_of_Birth < Dx_date_closest_germline & Dx_date_closest_germline < last_date_available ~ "OK" )) %>% mutate(rad_after_diag = case_when( rad_start_date_1 > Dx_date_closest_germline ~ "OK" )) %>% mutate(bmt_after_diag = case_when( date_of_bmt_1 > Dx_date_closest_germline ~ "OK" )) %>% mutate(drug_after_diag = case_when( line_start_date_1 > Dx_date_closest_germline ~ "OK" )) %>% mutate(diag_BF_progression = case_when( progression_date > Dx_date_closest_germline ~ "OK", progression_date <= Dx_date_closest_germline ~ "not good" )) %>% mutate(progression_BF_death = case_when( progression_date < date_death ~ "OK", progression_date >= date_death ~ "not good" )) %>% mutate(drug_bf_bmt = case_when( line_start_date_1 <= date_of_bmt_1 ~ "OK", line_start_date_1 > date_of_bmt_1 ~ "not good", is.na(line_start_date_1) & !is.na(date_of_bmt_1) ~ "not good" )) table_sanity_check <- as.data.table(matrix(c("check", "radiation_check", "treatment_check", "diag_check", "rad_check", "sct_check", "treat_check", "birth_BF_lastdate", "birth_BF_diag", "diag_BF_lastdate", "birth_diag_lastdate", "rad_after_diag", "bmt_after_diag", "drug_after_diag", "diag_BF_progression", "progression_BF_death", "comments", "if stop date after start date", "if stop date after start date", "if diag1 before diag2 before diag3 etc", "if rad1 before rad2 before rad3 etc", "if bmt1 before bmt2 before bmt3 etc", "if drug1 before drug2 before drug3 etc", "if birth before last date available", "if birth before diag", "if diag before last date available", "if birth before diag before last date available", "if rad1 after diag","if bmt1 after diag", "if drug1 after diag", "if progression after diag", "if progression before death", "OK", sum(str_count(radiation$radiation_check, "OK"), na.rm = TRUE), sum(str_count(treatment$treatment_check, "OK"), na.rm = TRUE), sum(str_count(sanity_check$diag_check, "OK"), na.rm = TRUE), sum(str_count(sanity_check$rad_check, "OK"), na.rm = TRUE), sum(str_count(sanity_check$sct_check, "OK"), na.rm = TRUE), sum(str_count(sanity_check$treat_check, "OK"), na.rm = TRUE), sum(str_count(sanity_check$birth_BF_lastdate, "OK"), na.rm = TRUE), sum(str_count(sanity_check$birth_BF_diag, "OK"), na.rm = TRUE), sum(str_count(sanity_check$diag_BF_lastdate, "OK"), na.rm = TRUE), sum(str_count(sanity_check$birth_diag_lastdate, "OK"), na.rm = TRUE), sum(str_count(sanity_check$rad_after_diag, "OK"), na.rm = TRUE), sum(str_count(sanity_check$bmt_after_diag, "OK"), na.rm = TRUE), sum(str_count(sanity_check$drug_after_diag, "OK"), na.rm = TRUE), sum(str_count(sanity_check$diag_BF_progression, "OK"), na.rm = TRUE), sum(str_count(sanity_check$progression_BF_death, "OK"), na.rm = TRUE), "not good", sum(str_count(radiation$radiation_check, "not good"), na.rm = TRUE), sum(str_count(treatment$treatment_check, "not good"), na.rm = TRUE), sum(str_count(sanity_check$diag_check, "not good"), na.rm = TRUE), sum(str_count(sanity_check$rad_check, "not good"), na.rm = TRUE), sum(str_count(sanity_check$sct_check, "not good"), na.rm = TRUE), sum(str_count(sanity_check$treat_check, "not good"), na.rm = TRUE), sum(str_count(sanity_check$birth_BF_lastdate, "not good"), na.rm = TRUE), sum(str_count(sanity_check$birth_BF_diag, "not good"), na.rm = TRUE), sum(str_count(sanity_check$diag_BF_lastdate, "not good"), na.rm = TRUE), sum(str_count(sanity_check$birth_diag_lastdate, "not good"), na.rm = TRUE), sum(str_count(sanity_check$rad_after_diag, "not good"), na.rm = TRUE), sum(str_count(sanity_check$bmt_after_diag, "not good"), na.rm = TRUE), sum(str_count(sanity_check$drug_after_diag, "not good"), na.rm = TRUE), sum(str_count(sanity_check$diag_BF_progression, "not good"), na.rm = TRUE), sum(str_count(sanity_check$progression_BF_death, "not good"), na.rm = TRUE) ), ncol = 16, byrow=TRUE)) write.csv(table_sanity_check, paste0(path, "/sanity check output/sanity check.csv")) wrong_date_bmt <- as.data.table(sanity_check[which(sanity_check$sct_check == "not good"), c("avatar_id", "mrn", "date_of_bmt_1", "date_of_bmt_2")]) write.csv(wrong_date_bmt, paste0(path, "/sanity check output/wrong_date_bmt.csv")) wrong_date_drug <- sanity_check[which(sanity_check$treat_check == "not good"), ] %>% select("avatar_id", "mrn", starts_with("drug_start"), starts_with("drug_name")) wrong_date_drug <- as.data.table(wrong_date_drug) write.csv(wrong_date_drug, paste0(path, "/sanity check output/wrong_date_drug.csv")) wrong_date_rad <- sanity_check[which(sanity_check$rad_check == "not good"), c("avatar_id", "rad_start_date_1", "rad_start_date_2", "rad_start_date_3")] # no big deal, they are the same (1=2) write.csv(wrong_date_rad, paste0(path, "/sanity check output/wrong_date_rad.csv")) wrong_diag_or_lastdate <- as.data.table(sanity_check[which(sanity_check$diag_BF_lastdate == "not good"), c("avatar_id", "Dx_date_closest_germline", "last_date_available", "date_death", "date_last_follow_up")]) %>% left_join(., mrn, by = c("avatar_id")) write.csv(wrong_diag_or_lastdate, paste0(path, "/sanity check output/wrong_diag_or_lastdate.csv")) missing_diag <- as.data.table(sanity_check[which(is.na(sanity_check$Dx_date_closest_germline)), c("avatar_id", "Dx_date_closest_germline")]) write.csv(missing_diag, paste0(path, "/sanity check output/missing_diag date.csv")) wrong_progression <- as.data.table(sanity_check[which(sanity_check$diag_BF_progression == "not good"), c("avatar_id", "progression_date", "Dx_date_closest_germline", "date_of_diagnosis_1", "date_of_diagnosis_2", "Disease_Status_germline")]) write.csv(wrong_progression, paste0(path, "/sanity check output/wrong_progression date.csv")) wrong_progression_BF_death <- as.data.table(sanity_check[which(sanity_check$progression_BF_death == "not good"), c("avatar_id", "progression_date", "Dx_date_closest_germline", "date_death" , "Disease_Status_germline")]) write.csv(wrong_progression_BF_death, paste0(path, "/sanity check output/progression_BF_death date.csv")) table(sanity_check$drug_bf_bmt) drug_bf_bmt <- as.data.table(sanity_check[which(sanity_check$drug_bf_bmt == "not good"), c("avatar_id", "date_of_bmt_1", "drug_start_date_1", "line_start_date_1", "drug_name__1", "imids_maintenance")]) %>% left_join(., mrn, by = c("avatar_id")) write.csv(drug_bf_bmt, paste0(path, "/sanity check output/drugs start before bmt.csv")) rm(sanity_check, table_sanity_check, wrong_date, wrong_date_bmt, wrong_date_drug, wrong_date_rad, wrong_diag_or_lastdate, missing_diag) ######## Check long drug name trial_reg_name <- germline_patient_data %>% select(avatar_id, starts_with("regimen_name_"), starts_with("drug_start_date_"), starts_with("drug_stop_date_"), starts_with("line_start_date_"), starts_with("line_stop_date_")) %>% pivot_longer(cols = starts_with("regimen_name_"), names_to = "regimen_number", values_to = "regimen_name_", values_drop_na = TRUE) %>% filter(str_detect(regimen_name_, "clinical")) trial_reg_name <- left_join(trial_reg_name, mrn, by = "avatar_id") write_csv(trial_reg_name, paste0(path, "/sanity check output/clinical trial as reg_name.csv")) # trial_reg_name <- germline_patient_data %>% # select(avatar_id, starts_with("regimen_name_"), starts_with("drug_start_date_"), # starts_with("drug_stop_date_"), # starts_with("line_start_date_"), starts_with("line_stop_date_")) %>% # filter(.vars == vars(starts_with("regimen_name_") ) , # .vars_predicate == any_vars(str_detect(. , "clinical"))) trial_reg_name <- treatment %>% filter(str_detect(drug_name_, "clinical")) %>% left_join(., mrn, by = "avatar_id") write_csv(trial_reg_name, paste0(path, "/sanity check output/clinical trial as reg_name.csv")) # How many patients had HCT but no melphalan---- hct_data <- germline_patient_data %>% distinct(avatar_id, .keep_all = TRUE) %>% filter(!is.na(date_of_bmt_1)) hct_data1 <- hct_data %>% pivot_longer(cols = starts_with("drug_name_"), values_to = "drug_name_") %>% filter(str_detect(drug_name_, "melphalan")) %>% distinct(avatar_id, .keep_all = TRUE) uid <- paste(unique(hct_data1$avatar_id), collapse = '|') hct_data_no_mel <- hct_data[(!grepl(uid, hct_data$avatar_id)),] %>% select(avatar_id, starts_with("date_of_bmt_"), starts_with("drug_name")) write_csv(hct_data_no_mel, paste0(path, "/sanity check output/patients received HCT but no melphalan.csv"))
# -------------------------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See LICENSE.txt in the project root for license information. # -------------------------------------------------------------------------------------------- #' @title Distribution of Collaboration Hours (Fizzy Drink plot) #' #' @description #' Analyze weekly collaboration hours distribution, and returns #' a 'fizzy' scatter plot by default. #' Additional options available to return a table with distribution elements. #' #' @details #' Uses the metric `Collaboration_hours`. #' #' @inheritParams create_fizz #' @inherit create_fizz return #' #' @family Collaboration #' #' @examples #' # Return plot #' collaboration_fizz(sq_data, hrvar = "Organization", return = "plot") #' #' # Return summary table #' collaboration_fizz(sq_data, hrvar = "Organization", return = "table") #' #' @export collaboration_fizz <- function(data, hrvar = "Organization", mingroup = 5, return = "plot"){ create_fizz(data = data, metric = "Collaboration_hours", hrvar = hrvar, mingroup = mingroup, return = return) } #' @rdname collaboration_fizz #' @export collab_fizz <- collaboration_fizz
/R/collaboration_fizz.R
permissive
Global19/wpa
R
false
false
1,391
r
# -------------------------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See LICENSE.txt in the project root for license information. # -------------------------------------------------------------------------------------------- #' @title Distribution of Collaboration Hours (Fizzy Drink plot) #' #' @description #' Analyze weekly collaboration hours distribution, and returns #' a 'fizzy' scatter plot by default. #' Additional options available to return a table with distribution elements. #' #' @details #' Uses the metric `Collaboration_hours`. #' #' @inheritParams create_fizz #' @inherit create_fizz return #' #' @family Collaboration #' #' @examples #' # Return plot #' collaboration_fizz(sq_data, hrvar = "Organization", return = "plot") #' #' # Return summary table #' collaboration_fizz(sq_data, hrvar = "Organization", return = "table") #' #' @export collaboration_fizz <- function(data, hrvar = "Organization", mingroup = 5, return = "plot"){ create_fizz(data = data, metric = "Collaboration_hours", hrvar = hrvar, mingroup = mingroup, return = return) } #' @rdname collaboration_fizz #' @export collab_fizz <- collaboration_fizz
#Read data to workspace data <- read.csv("household_power_consumption.txt", header = TRUE, sep = ";", na.strings = "?", check.names=F, stringsAsFactors=F, comment.char="", quote='\"') # Convert date to R date format data$Date <- as.Date(data$Date, format="%d/%m/%Y") #Get the subset of data which is between 2007-02-01 and 2007-02-02 data_sub <- subset(data, subset=(Date >= "2007-02-01" & Date <= "2007-02-02")) # Combine date and times and put into them in another column newDateTime <- paste(as.Date(data_sub$Date), data_sub$Time) data_sub$newDateTime <- as.POSIXct(newDateTime) # Plot 3 with(data_sub, { plot(Sub_metering_1~newDateTime, type="l", ylab="Global Active Power (kilowatts)", xlab="") lines(Sub_metering_2~newDateTime,col='Red') lines(Sub_metering_3~newDateTime,col='Blue') }) legend("topright", col=c("black", "red", "blue"), lty=1, lwd=2, legend=c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3")) # Save to file dev.copy(png, file="GitHub/ExData_Plotting1/plots/plot3.png", height=480, width=480) dev.off()
/plot3.R
no_license
olcaysah/ExData_Plotting1
R
false
false
1,058
r
#Read data to workspace data <- read.csv("household_power_consumption.txt", header = TRUE, sep = ";", na.strings = "?", check.names=F, stringsAsFactors=F, comment.char="", quote='\"') # Convert date to R date format data$Date <- as.Date(data$Date, format="%d/%m/%Y") #Get the subset of data which is between 2007-02-01 and 2007-02-02 data_sub <- subset(data, subset=(Date >= "2007-02-01" & Date <= "2007-02-02")) # Combine date and times and put into them in another column newDateTime <- paste(as.Date(data_sub$Date), data_sub$Time) data_sub$newDateTime <- as.POSIXct(newDateTime) # Plot 3 with(data_sub, { plot(Sub_metering_1~newDateTime, type="l", ylab="Global Active Power (kilowatts)", xlab="") lines(Sub_metering_2~newDateTime,col='Red') lines(Sub_metering_3~newDateTime,col='Blue') }) legend("topright", col=c("black", "red", "blue"), lty=1, lwd=2, legend=c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3")) # Save to file dev.copy(png, file="GitHub/ExData_Plotting1/plots/plot3.png", height=480, width=480) dev.off()
#' Create a traffic light plot #' #' This function creates for multiple time series an image plot where the color #' code is based on selected quantiles or evenly spaced intervals. #' #' @name trafficlight #' @param x matrix or data frame containing time series of multiple variables. #' @param time vector of time units that will be used for the x axis. #' @param sort_5yrmean logical; should the variables be sorted by the first 5yr mean? Default #' is set to TRUE. #' @param sort_vec integer vector; if specific order of variable is desired the sorting index should be #' provided here. #' @param method character; the type of methods to create the color code. Choose #' between using "quantiles" (default) or "intervals". #' @param probs a vector of probabilities to calculate the quantiles using the function #' \code{quantile}. Should include the probabilities 0 and 1. #' numeric vector of probabilities with values in [0,1]. (Values up to #' 2e-14 outside that range are accepted and moved to the nearby endpoint.) #' @param quantile_type an integer between 1 and 9 selecting one of the nine quantile #' algorithms detailed below to be used. Default is 7 (see also \code{\link{quantile}}). #' @param intervals logical; number of evenly spaced intervals. Default is 5. #' @param cols a character vector with colors for each quantile or interval. #' @param main a title. #' @param xlab The x axis title. Default is none. #' @param ylab The y axis title. Default is none. #' @param adj_xlab integer; vertical adjustment of the x axis title. #' @param adj_ylab integer; horizontal adjustment of the y axis title. #' @param hadj_x double; horizontal adjustment of the x labels. #' @param vadj_x double; vertical adjustment of the x labels. #' @param hadj_y double; horizontal adjustment of the y labels. #' @param vadj_y double; vertical adjustment of the y labels. #' @param madj_bottom double; adjustment of bottom margin relative to the #' current setting. #' @param madj_left double; adjustment of left margin relative to the #' current setting. #' @param madj_top double; adjustment of top margin relative to the #' current setting. #' @param madj_right double; adjustment of right margin relative to the #' current setting. #' @param legend_pos character; the legend is shown on the "top" (default), #' "center", or "bottom" right of the plot. #' @param legend_intersp_x double; character interspacing factor for horizontal (x) spacing in legend. #' @param legend_intersp_y double; interspacing factor for vertical (y) line distances in legend. #' @param tick_x logical; set to TRUE if ticks on x-axis should be displayed. #' @param tick_y logical; set to TRUE if ticks on x-axis should be displayed. #' @param cex_x double; the magnification to be used for the x labels relative to the #' current setting of cex. #' @param cex_y double; the magnification to be used for the y labels relative to the #' current setting of cex. #' @param cex_xlab double; the magnification to be used for the x axis title relative #' to the current setting of cex. #' @param cex_ylab double; the magnification to be used for the y axis title relative #' to the current setting of cex. #' @param cex_legend double; the magnification to be used for the legend relative to #' the current setting of cex. #' @param cex_main double; the magnification to be used for the title relative to the #' current setting of cex. #' @param respect logical; this argument controls whether a unit column-width is the #' same physical measurement on the device as a unit row-height (default is TRUE). #' #' @export #' @author Saskia A. Otto #' @examples #' df <- data.frame(variable1 = 1:20, variable2= rnorm(20, 100, 30), variable3 = 1:20 + rnorm(20)) #' trafficlight(x = df, time = 1981:2000) #' df <- matrix(rnorm(100), ncol = 5) #' colnames(df) <- letters[1:5] #' trafficlight(x = df, time = 1981:2000, legend_pos = "bottom", method = "intervals") trafficlight <- function(x, time = NULL, sort_5yrmean = TRUE, sort_vec = NULL, method = "quantiles", probs = seq(0, 1, 0.2), quantile_type = 7, intervals = 5, cols = c("green3", "greenyellow", "yellow", "gold","red"), main = "", xlab = "", ylab = "", adj_xlab = NULL, adj_ylab = NULL, hadj_x = 0.5, vadj_x = -0.7, hadj_y = -0.7, vadj_y = 0.3, madj_bottom = 0, madj_left = 0, madj_top = 0, madj_right = 0, legend_pos = "top", legend_intersp_x = 1, legend_intersp_y = 1, tick_x = FALSE, tick_y = FALSE, cex_x = 0.8, cex_y = 0.8, cex_xlab = 1, cex_ylab = 1, cex_legend = 0.8, cex_main = 1, respect = TRUE) { ### Data input validation if (!method %in% c("quantiles", "intervals")) { stop("Choose as method either 'quantiles' or 'intervals'.") } if (method == "intervals") { if (length(cols) != intervals) { stop("The number of colours need to match your chosen interval number.") } } # -------------------------- z <- x # Save the dimensions (number of rows[1] and columns[2] ) in a vector n <- dim(z) # Get names for the y-axis ylabel <- colnames(z) if (!is.null(time)) { xlabel <- time } else { xlabel <- 1:n[1] } # Converting the original values into quantiles or even intervals convert2quantiles <- function(v, probs, type, var) { br <- stats::quantile(v, probs = probs, na.rm = TRUE, type = type) if (any(diff(br) == 0)) { sel <- which(diff(br) == 0) br[sel+1] <- br[sel+1] + 0.0001 # br[sel]/10000 print(paste0("For variable '", var, "' the ", probs[sel+1]*100, "%-quantile is the same as the ", probs[sel]*100, "%-quantile. All values are grouped under the lower quantile!")) } qv <- cut(v, breaks = br, include.lowest = TRUE, labels = FALSE) return(qv) } convert2intervals <- function(v, intervals) { qv <- cut(v, breaks = intervals, include.lowest = TRUE, labels = FALSE) return(qv) } zc <- z if (method == "quantiles") { for (i in 1:n[2]) { zc[,i] <- convert2quantiles(zc[,i], probs, type = quantile_type, var = names(z)[i]) legend_txt <- paste0("< ", probs[-1]*100, "%") nl <- length(legend_txt) legend_txt[nl] <- paste0("> ", probs[nl]*100, "%") legend_txt <- c(legend_txt, "missing value") } } else { if (intervals %% 1 == 0) { # is full number? for (i in 1:n[2]) { zc[,i] <- convert2intervals(zc[,i], intervals) legend_txt <- as.character(1:intervals) legend_txt[1] <- paste0(legend_txt[1], " (low)") nl <- length(legend_txt) legend_txt[nl] <- paste0(legend_txt[nl], " (high)") legend_txt <- c(legend_txt, "missing value") } } else { stop(" If you want to use evenly spaced intervals, provide an integer for the number of intervals!") } } ### Sort variables according to settings if (isTRUE(sort_5yrmean)) { m5 <- vector(length = n[2]) # Then fill the vector with the standardised variable averages over the # first five data points (mean of first 5 years - mean of full time series/ # standard deviation of full time series) for (i in 1:(n[2])) { m5[i] <- (mean(x[c(1:5), i], na.rm = TRUE) - mean(x[, i], na.rm = TRUE)) / stats::sd(x[, i], na.rm = TRUE) } # Finally, order the variable and create an index vector ordvar <- order(m5) } else { if (is.null(sort_vec)) { ordvar <- n[2]:1 } else { ordvar <- rev(sort_vec) } } zc_sort <- as.matrix(zc[ ,ordvar]) ### Plot settings x <- 1:n[1] y <- 1:n[2] # Position of legend: if (legend_pos == "top") { xleg <- max(x)+1 yleg <- max(y)+.5 yjustleg <- 1 } else { if (legend_pos == "center") { xleg <- max(x)+1 yleg <- max(y)/2+.5 yjustleg <- 0.5 } else { if (legend_pos == "bottom") { xleg <- max(x)+1 yleg <- min(y)-.5 yjustleg <- 0 } else { stop("You need to choose as legend position 'bottom', 'center' or 'top'.") } } } mar <- c(2+madj_bottom, 5+madj_left, 1+madj_top, 8+madj_right) if (nchar(xlab) > 0) mar[1] <- mar[1] + 1 if (nchar(ylab) > 0) mar[2] <- mar[2] + 1 if (nchar(main) > 0) mar[3] <- mar[3] + 2 if (is.null(adj_xlab)) { adj_xlab <- mar[1]-1 } else { adj_xlab <- adj_xlab } if (is.null(adj_ylab)) { adj_ylab <- mar[2]-1 } else { adj_ylab <- adj_ylab } ### Plot graphics::layout(matrix(c(1,1,1,1), ncol = 2), widths = c(3.5,3.5), heights = c(2,2), respect = respect) graphics::par(mar = c(mar[1], mar[2], mar[3], mar[4]), oma = c(0.5,.5,0,0), xpd = TRUE) graphics::image(x, y, z = zc_sort, zlim = c(1,5), col = cols, axes = FALSE, xlab = "", ylab = "") if (isTRUE(tick_x)) graphics::axis(1, at = x, tick = -.015, labels = NA) if (isTRUE(tick_y)) graphics::axis(2, at = y, tick = -.015, labels = NA) graphics::axis(1, at = x, tick = FALSE, labels = xlabel, cex.axis = cex_x, las = 3, line = vadj_x, padj = hadj_x) graphics::axis(2, at = y, tick = FALSE, labels = ylabel[ordvar], cex.axis = cex_y, las = 1, line = hadj_y, padj = vadj_y) graphics::box() graphics::legend(x = xleg, y = yleg, legend = legend_txt, fill = c(cols, "white"), cex = cex_legend, bty = "n", xjust = .1, yjust = yjustleg, x.intersp = legend_intersp_x, y.intersp = legend_intersp_y, ) graphics::title(main, cex.main = cex_main) if (nchar(xlab) > 0) { graphics::mtext(text = xlab, side = 1, line = adj_xlab, cex = cex_xlab) } if (nchar(ylab) > 0) { graphics::mtext(text = ylab, side = 2, line = adj_ylab, cex = cex_ylab) } }
/R/trafficlight.R
no_license
saskiaotto/IEAtools
R
false
false
9,599
r
#' Create a traffic light plot #' #' This function creates for multiple time series an image plot where the color #' code is based on selected quantiles or evenly spaced intervals. #' #' @name trafficlight #' @param x matrix or data frame containing time series of multiple variables. #' @param time vector of time units that will be used for the x axis. #' @param sort_5yrmean logical; should the variables be sorted by the first 5yr mean? Default #' is set to TRUE. #' @param sort_vec integer vector; if specific order of variable is desired the sorting index should be #' provided here. #' @param method character; the type of methods to create the color code. Choose #' between using "quantiles" (default) or "intervals". #' @param probs a vector of probabilities to calculate the quantiles using the function #' \code{quantile}. Should include the probabilities 0 and 1. #' numeric vector of probabilities with values in [0,1]. (Values up to #' 2e-14 outside that range are accepted and moved to the nearby endpoint.) #' @param quantile_type an integer between 1 and 9 selecting one of the nine quantile #' algorithms detailed below to be used. Default is 7 (see also \code{\link{quantile}}). #' @param intervals logical; number of evenly spaced intervals. Default is 5. #' @param cols a character vector with colors for each quantile or interval. #' @param main a title. #' @param xlab The x axis title. Default is none. #' @param ylab The y axis title. Default is none. #' @param adj_xlab integer; vertical adjustment of the x axis title. #' @param adj_ylab integer; horizontal adjustment of the y axis title. #' @param hadj_x double; horizontal adjustment of the x labels. #' @param vadj_x double; vertical adjustment of the x labels. #' @param hadj_y double; horizontal adjustment of the y labels. #' @param vadj_y double; vertical adjustment of the y labels. #' @param madj_bottom double; adjustment of bottom margin relative to the #' current setting. #' @param madj_left double; adjustment of left margin relative to the #' current setting. #' @param madj_top double; adjustment of top margin relative to the #' current setting. #' @param madj_right double; adjustment of right margin relative to the #' current setting. #' @param legend_pos character; the legend is shown on the "top" (default), #' "center", or "bottom" right of the plot. #' @param legend_intersp_x double; character interspacing factor for horizontal (x) spacing in legend. #' @param legend_intersp_y double; interspacing factor for vertical (y) line distances in legend. #' @param tick_x logical; set to TRUE if ticks on x-axis should be displayed. #' @param tick_y logical; set to TRUE if ticks on x-axis should be displayed. #' @param cex_x double; the magnification to be used for the x labels relative to the #' current setting of cex. #' @param cex_y double; the magnification to be used for the y labels relative to the #' current setting of cex. #' @param cex_xlab double; the magnification to be used for the x axis title relative #' to the current setting of cex. #' @param cex_ylab double; the magnification to be used for the y axis title relative #' to the current setting of cex. #' @param cex_legend double; the magnification to be used for the legend relative to #' the current setting of cex. #' @param cex_main double; the magnification to be used for the title relative to the #' current setting of cex. #' @param respect logical; this argument controls whether a unit column-width is the #' same physical measurement on the device as a unit row-height (default is TRUE). #' #' @export #' @author Saskia A. Otto #' @examples #' df <- data.frame(variable1 = 1:20, variable2= rnorm(20, 100, 30), variable3 = 1:20 + rnorm(20)) #' trafficlight(x = df, time = 1981:2000) #' df <- matrix(rnorm(100), ncol = 5) #' colnames(df) <- letters[1:5] #' trafficlight(x = df, time = 1981:2000, legend_pos = "bottom", method = "intervals") trafficlight <- function(x, time = NULL, sort_5yrmean = TRUE, sort_vec = NULL, method = "quantiles", probs = seq(0, 1, 0.2), quantile_type = 7, intervals = 5, cols = c("green3", "greenyellow", "yellow", "gold","red"), main = "", xlab = "", ylab = "", adj_xlab = NULL, adj_ylab = NULL, hadj_x = 0.5, vadj_x = -0.7, hadj_y = -0.7, vadj_y = 0.3, madj_bottom = 0, madj_left = 0, madj_top = 0, madj_right = 0, legend_pos = "top", legend_intersp_x = 1, legend_intersp_y = 1, tick_x = FALSE, tick_y = FALSE, cex_x = 0.8, cex_y = 0.8, cex_xlab = 1, cex_ylab = 1, cex_legend = 0.8, cex_main = 1, respect = TRUE) { ### Data input validation if (!method %in% c("quantiles", "intervals")) { stop("Choose as method either 'quantiles' or 'intervals'.") } if (method == "intervals") { if (length(cols) != intervals) { stop("The number of colours need to match your chosen interval number.") } } # -------------------------- z <- x # Save the dimensions (number of rows[1] and columns[2] ) in a vector n <- dim(z) # Get names for the y-axis ylabel <- colnames(z) if (!is.null(time)) { xlabel <- time } else { xlabel <- 1:n[1] } # Converting the original values into quantiles or even intervals convert2quantiles <- function(v, probs, type, var) { br <- stats::quantile(v, probs = probs, na.rm = TRUE, type = type) if (any(diff(br) == 0)) { sel <- which(diff(br) == 0) br[sel+1] <- br[sel+1] + 0.0001 # br[sel]/10000 print(paste0("For variable '", var, "' the ", probs[sel+1]*100, "%-quantile is the same as the ", probs[sel]*100, "%-quantile. All values are grouped under the lower quantile!")) } qv <- cut(v, breaks = br, include.lowest = TRUE, labels = FALSE) return(qv) } convert2intervals <- function(v, intervals) { qv <- cut(v, breaks = intervals, include.lowest = TRUE, labels = FALSE) return(qv) } zc <- z if (method == "quantiles") { for (i in 1:n[2]) { zc[,i] <- convert2quantiles(zc[,i], probs, type = quantile_type, var = names(z)[i]) legend_txt <- paste0("< ", probs[-1]*100, "%") nl <- length(legend_txt) legend_txt[nl] <- paste0("> ", probs[nl]*100, "%") legend_txt <- c(legend_txt, "missing value") } } else { if (intervals %% 1 == 0) { # is full number? for (i in 1:n[2]) { zc[,i] <- convert2intervals(zc[,i], intervals) legend_txt <- as.character(1:intervals) legend_txt[1] <- paste0(legend_txt[1], " (low)") nl <- length(legend_txt) legend_txt[nl] <- paste0(legend_txt[nl], " (high)") legend_txt <- c(legend_txt, "missing value") } } else { stop(" If you want to use evenly spaced intervals, provide an integer for the number of intervals!") } } ### Sort variables according to settings if (isTRUE(sort_5yrmean)) { m5 <- vector(length = n[2]) # Then fill the vector with the standardised variable averages over the # first five data points (mean of first 5 years - mean of full time series/ # standard deviation of full time series) for (i in 1:(n[2])) { m5[i] <- (mean(x[c(1:5), i], na.rm = TRUE) - mean(x[, i], na.rm = TRUE)) / stats::sd(x[, i], na.rm = TRUE) } # Finally, order the variable and create an index vector ordvar <- order(m5) } else { if (is.null(sort_vec)) { ordvar <- n[2]:1 } else { ordvar <- rev(sort_vec) } } zc_sort <- as.matrix(zc[ ,ordvar]) ### Plot settings x <- 1:n[1] y <- 1:n[2] # Position of legend: if (legend_pos == "top") { xleg <- max(x)+1 yleg <- max(y)+.5 yjustleg <- 1 } else { if (legend_pos == "center") { xleg <- max(x)+1 yleg <- max(y)/2+.5 yjustleg <- 0.5 } else { if (legend_pos == "bottom") { xleg <- max(x)+1 yleg <- min(y)-.5 yjustleg <- 0 } else { stop("You need to choose as legend position 'bottom', 'center' or 'top'.") } } } mar <- c(2+madj_bottom, 5+madj_left, 1+madj_top, 8+madj_right) if (nchar(xlab) > 0) mar[1] <- mar[1] + 1 if (nchar(ylab) > 0) mar[2] <- mar[2] + 1 if (nchar(main) > 0) mar[3] <- mar[3] + 2 if (is.null(adj_xlab)) { adj_xlab <- mar[1]-1 } else { adj_xlab <- adj_xlab } if (is.null(adj_ylab)) { adj_ylab <- mar[2]-1 } else { adj_ylab <- adj_ylab } ### Plot graphics::layout(matrix(c(1,1,1,1), ncol = 2), widths = c(3.5,3.5), heights = c(2,2), respect = respect) graphics::par(mar = c(mar[1], mar[2], mar[3], mar[4]), oma = c(0.5,.5,0,0), xpd = TRUE) graphics::image(x, y, z = zc_sort, zlim = c(1,5), col = cols, axes = FALSE, xlab = "", ylab = "") if (isTRUE(tick_x)) graphics::axis(1, at = x, tick = -.015, labels = NA) if (isTRUE(tick_y)) graphics::axis(2, at = y, tick = -.015, labels = NA) graphics::axis(1, at = x, tick = FALSE, labels = xlabel, cex.axis = cex_x, las = 3, line = vadj_x, padj = hadj_x) graphics::axis(2, at = y, tick = FALSE, labels = ylabel[ordvar], cex.axis = cex_y, las = 1, line = hadj_y, padj = vadj_y) graphics::box() graphics::legend(x = xleg, y = yleg, legend = legend_txt, fill = c(cols, "white"), cex = cex_legend, bty = "n", xjust = .1, yjust = yjustleg, x.intersp = legend_intersp_x, y.intersp = legend_intersp_y, ) graphics::title(main, cex.main = cex_main) if (nchar(xlab) > 0) { graphics::mtext(text = xlab, side = 1, line = adj_xlab, cex = cex_xlab) } if (nchar(ylab) > 0) { graphics::mtext(text = ylab, side = 2, line = adj_ylab, cex = cex_ylab) } }
rm(list=ls()) setwd("~") #################### # Machine Learning # # Decision Trees # # # #################### #install.packages(c("C50", "gmodels")) library(tidyverse) library(gmodels) # Herramientas para evaluar árbol de decisión library('C50') # Genera el árbol de decisión ## ---------- Decisiones de árboles # Entropia de un segmento de dos clases -0.50 * log2(0.50) - 0.50 * log2(0.50) -0.60 * log2(0.60) - 0.40 * log2(0.40) -0.70 * log2(0.70) - 0.30 * log2(0.30) -0.80 * log2(0.80) - 0.20 * log2(0.20) -0.90 * log2(0.90) - 0.10 * log2(0.10) #Graficamos la curva curve(-x * log2(x) - (1 - x) * log2(1 - x), col = "red", xlab = "x", ylab = "Entropy", lwd = 1) #Leer datos #Base de datos de Wisconsin Breast Cancer Diagnostic #569 muestras, dir1 <- "dataScience" # "/home/krax7/ClasesDevf/IDS/DecisionTrees/wisc_bc_data.csv" wbcd <- read.csv(paste(dir1, "wisc_bc_data.csv", sep="/"), stringsAsFactors = FALSE) # credit <- read.csv(paste(dir1, "credit.csv", sep="/"), stringsAsFactors = TRUE) # ver al estructura #32 propiedades, 1 con un identificador unico, 1 para el diagnóstico, 30 númericas str(wbcd) view(wbcd) ## Quedarnos únicamente con las columnas necesarias # Quitamos la variable id # no es necesaria para el clasificador. Si no la quitamos, dará resultados irrelevantes wbcd <- wbcd[-1] str(wbcd) # Generamos una tabla con los diagnósticos table(wbcd$diagnosis) # recodificamos la columna diagnosis como factor # los algoritmos requieren que el valor "objetivo" (columna de respuestas) sea un factor wbcd$diagnosis <- factor(wbcd$diagnosis, levels = c("B", "M"), labels = c("Benigno", "Maligno")) # Transformamos la tabla a porcentajes round(prop.table(table(wbcd$diagnosis)) * 100, digits = 2) ## Entrenamiento # separamos la DB en un set como entrenamiento y otro como prueba nfilas <- floor(nrow(wbcd) * .80) set.seed(123) index <- sample(1:nrow(wbcd), nfilas) # 80% wbcd_train <- wbcd[index, -1] # Obtener solo las muestras wbcd_test <- wbcd[-index, -1] # Todo menos las muestras wbcd_train_labels <- wbcd[index, 1] wbcd_test_labels <- wbcd[-index, 1] # Guardamos la clasificación de cada uno (B o M) de la primera columna #wbcd_train_labels <- wbcd[1:nfilas, 1] #wbcd_test_labels <- wbcd[(nfilas+1):nfilas, 1] str(wbcd_train_labels) # Generando el modelo wbcd_model <- C5.0(wbcd_train, wbcd_train_labels) wbcd_model summary(wbcd_model) ## ------------- ------------- ------------- ------------- ------------- # Evaluamos el modelo # Creamos un vector con las predicciones sobre nuestos datos de pruebas wbcd_pred <- predict(wbcd_model, wbcd_test) CrossTable(wbcd_test_labels, wbcd_pred, prop.chisq = FALSE, prop.c = FALSE, prop.r = FALSE) ## ------------- ------------- ------------- ------------- ------------- # boosts # wbcd_boost10_model <- C5.0(wbcd_train, wbcd_train_labels,trials = 10) wbcd_boost10_model summary(wbcd_boost10_model) wbcd_boost_pred10 <- predict(wbcd_boost10_model, wbcd_test) CrossTable(wbcd_test_labels, wbcd_boost_pred10, prop.chisq = FALSE, prop.c = FALSE, prop.r = FALSE, dnn = c('actual', 'predicción'))
/cancer.R
no_license
IVANRA7714/dataScience
R
false
false
3,141
r
rm(list=ls()) setwd("~") #################### # Machine Learning # # Decision Trees # # # #################### #install.packages(c("C50", "gmodels")) library(tidyverse) library(gmodels) # Herramientas para evaluar árbol de decisión library('C50') # Genera el árbol de decisión ## ---------- Decisiones de árboles # Entropia de un segmento de dos clases -0.50 * log2(0.50) - 0.50 * log2(0.50) -0.60 * log2(0.60) - 0.40 * log2(0.40) -0.70 * log2(0.70) - 0.30 * log2(0.30) -0.80 * log2(0.80) - 0.20 * log2(0.20) -0.90 * log2(0.90) - 0.10 * log2(0.10) #Graficamos la curva curve(-x * log2(x) - (1 - x) * log2(1 - x), col = "red", xlab = "x", ylab = "Entropy", lwd = 1) #Leer datos #Base de datos de Wisconsin Breast Cancer Diagnostic #569 muestras, dir1 <- "dataScience" # "/home/krax7/ClasesDevf/IDS/DecisionTrees/wisc_bc_data.csv" wbcd <- read.csv(paste(dir1, "wisc_bc_data.csv", sep="/"), stringsAsFactors = FALSE) # credit <- read.csv(paste(dir1, "credit.csv", sep="/"), stringsAsFactors = TRUE) # ver al estructura #32 propiedades, 1 con un identificador unico, 1 para el diagnóstico, 30 númericas str(wbcd) view(wbcd) ## Quedarnos únicamente con las columnas necesarias # Quitamos la variable id # no es necesaria para el clasificador. Si no la quitamos, dará resultados irrelevantes wbcd <- wbcd[-1] str(wbcd) # Generamos una tabla con los diagnósticos table(wbcd$diagnosis) # recodificamos la columna diagnosis como factor # los algoritmos requieren que el valor "objetivo" (columna de respuestas) sea un factor wbcd$diagnosis <- factor(wbcd$diagnosis, levels = c("B", "M"), labels = c("Benigno", "Maligno")) # Transformamos la tabla a porcentajes round(prop.table(table(wbcd$diagnosis)) * 100, digits = 2) ## Entrenamiento # separamos la DB en un set como entrenamiento y otro como prueba nfilas <- floor(nrow(wbcd) * .80) set.seed(123) index <- sample(1:nrow(wbcd), nfilas) # 80% wbcd_train <- wbcd[index, -1] # Obtener solo las muestras wbcd_test <- wbcd[-index, -1] # Todo menos las muestras wbcd_train_labels <- wbcd[index, 1] wbcd_test_labels <- wbcd[-index, 1] # Guardamos la clasificación de cada uno (B o M) de la primera columna #wbcd_train_labels <- wbcd[1:nfilas, 1] #wbcd_test_labels <- wbcd[(nfilas+1):nfilas, 1] str(wbcd_train_labels) # Generando el modelo wbcd_model <- C5.0(wbcd_train, wbcd_train_labels) wbcd_model summary(wbcd_model) ## ------------- ------------- ------------- ------------- ------------- # Evaluamos el modelo # Creamos un vector con las predicciones sobre nuestos datos de pruebas wbcd_pred <- predict(wbcd_model, wbcd_test) CrossTable(wbcd_test_labels, wbcd_pred, prop.chisq = FALSE, prop.c = FALSE, prop.r = FALSE) ## ------------- ------------- ------------- ------------- ------------- # boosts # wbcd_boost10_model <- C5.0(wbcd_train, wbcd_train_labels,trials = 10) wbcd_boost10_model summary(wbcd_boost10_model) wbcd_boost_pred10 <- predict(wbcd_boost10_model, wbcd_test) CrossTable(wbcd_test_labels, wbcd_boost_pred10, prop.chisq = FALSE, prop.c = FALSE, prop.r = FALSE, dnn = c('actual', 'predicción'))
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/paws.comprehend_operations.R \name{delete_document_classifier} \alias{delete_document_classifier} \title{Deletes a previously created document classifier} \usage{ delete_document_classifier(DocumentClassifierArn) } \arguments{ \item{DocumentClassifierArn}{[required] The Amazon Resource Name (ARN) that identifies the document classifier.} } \description{ Deletes a previously created document classifier } \details{ Only those classifiers that are in terminated states (IN\_ERROR, TRAINED) will be deleted. If an active inference job is using the model, a \code{ResourceInUseException} will be returned. This is an asynchronous action that puts the classifier into a DELETING state, and it is then removed by a background job. Once removed, the classifier disappears from your account and is no longer available for use. } \section{Accepted Parameters}{ \preformatted{delete_document_classifier( DocumentClassifierArn = "string" ) } }
/service/paws.comprehend/man/delete_document_classifier.Rd
permissive
CR-Mercado/paws
R
false
true
1,018
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/paws.comprehend_operations.R \name{delete_document_classifier} \alias{delete_document_classifier} \title{Deletes a previously created document classifier} \usage{ delete_document_classifier(DocumentClassifierArn) } \arguments{ \item{DocumentClassifierArn}{[required] The Amazon Resource Name (ARN) that identifies the document classifier.} } \description{ Deletes a previously created document classifier } \details{ Only those classifiers that are in terminated states (IN\_ERROR, TRAINED) will be deleted. If an active inference job is using the model, a \code{ResourceInUseException} will be returned. This is an asynchronous action that puts the classifier into a DELETING state, and it is then removed by a background job. Once removed, the classifier disappears from your account and is no longer available for use. } \section{Accepted Parameters}{ \preformatted{delete_document_classifier( DocumentClassifierArn = "string" ) } }