content
large_stringlengths 0
6.46M
| path
large_stringlengths 3
331
| license_type
large_stringclasses 2
values | repo_name
large_stringlengths 5
125
| language
large_stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 4
6.46M
| extension
large_stringclasses 75
values | text
stringlengths 0
6.46M
|
|---|---|---|---|---|---|---|---|---|---|
m6 <- read.zoo("./Data/UK/Yield/6m.csv", header = TRUE, sep = ",", index.column = 1, format = "%m/%d/%Y")
m6 <- xts(m6, order.by = time(m6))
uk_yield_curve$'6'[is.na(uk_yield_curve$'6')] = m6[time(uk_yield_curve)[is.na(uk_yield_curve$`6`)]]
remove(m6)
|
/Utils/missing_uk_6m.R
|
no_license
|
Stanimir-Ivanov/Diebold-et-al-2008-Replication
|
R
| false
| false
| 253
|
r
|
m6 <- read.zoo("./Data/UK/Yield/6m.csv", header = TRUE, sep = ",", index.column = 1, format = "%m/%d/%Y")
m6 <- xts(m6, order.by = time(m6))
uk_yield_curve$'6'[is.na(uk_yield_curve$'6')] = m6[time(uk_yield_curve)[is.na(uk_yield_curve$`6`)]]
remove(m6)
|
# Calculate key temperature parameters for each 100m cell
# Process by year and block
# Output by block x risk: xyz file of seasonal values with z as year
# Carson job variables
args <-commandArgs(trailingOnly = TRUE)
print(args)
start.day <- as.integer(args[1])
start.month<-as.integer(args[2])
start.year<-as.integer(args[3] )
end.day<-as.integer(args[4] )
end.month<-as.integer(args[5] )
end.year<-as.integer(args[6] )
ukcpcell<-as.integer(args[7])
# Get dem.block for cell
print(paste("UKCP cell= ", ukcpcell,sep=""))
print("Get Cell Coordinates")
gridmask.r<-land5km.r # land5km.r defined in setup
vals<-getValues(gridmask.r)
xy<-xyFromCell(gridmask.r,1:ncell(gridmask.r))
sel<-which(vals==1)
landcells<-xy[sel,1:2] # = coordinates for middle of each ukcp09 cell
print(dim(landcells))
x<-landcells[ukcpcell,1]
y<-landcells[ukcpcell,2]
e.block<-extent(x-2500,x+2500,y-2500,y+2500)
dem.block<-crop(demuk,e.block)
plot(dem.block)
#source("/home/ISAD/jm622/rscripts/setup_carson.R") # loads & runs setup file (jd functions, dem etc)
## ASSUME start end end correspond to start and end of single YEAR or GROWING SEASON
#start.day<-1; start.month<-7; start.year<-1992
#end.day<-2; end.month<-7; end.year<-1992
# dir_temp<-"C:/Data2015/Temp5km/extract/"
# dir_results<-"C:/Results2015/year_stats_5km/"
# dir_allyr<-"C:/Results2015/allyear_stats_5km"
# Uses 5km grid mask: grid5km.r
# dir_grids<-"C:/Data2015/Templates/"
# dir_finalt<-"C:/Data2015/Temp5km/hourly/"
dir_finalt<-"~/Documents/Exeter/Data2015/Temp100m/"
start.year<-1991
end.year<-1999
ukcpcell<-900
#year<-start.year
start.jd<-JDdmy(1,1,start.year)
end.jd<-JDdmy(31,12,end.year)
print(start.jd)
print(end.jd)
#jd<-start.jd
####################################################################################
# for (ukcpcell in cells[1]:cells[length(cells)]){}
# for (year in start.year:end.year){}
print(paste("Analysing data for Year= ",year,sep=""))
# load this year's temperature data at 100m and hourly resolution
infile<-paste(dir_finalt,"block-",ukcpcell,"-",year,".R",sep="")
print(infile)
load(infile) # = tmodel.r
# calculate number of days in yr
days.in.yr<-dim(tmodel.r)[3]/24
print(days.in.yr)
####################################################################################
# Calculate daily stats for whole year- ie daily min/max etc
####################################################################################
tmin.year<-stack()
tmax.year<-stack()
tmean.year<-stack()
for (doy in 1:days.in.yr){
# Extract 24h of temperature data
start<-1+(doy-1)*24
end<-doy*24
#print (paste(start," ",end))
t.24h<-tmodel.r[,,start:end]
# Calculate daily statistics
tmin.24h<-apply(t.24h, c(1,2), min)
tmax.24h<-apply(t.24h, c(1,2), max)
tmean.24h<-apply(t.24h, c(1,2), mean)
# Add layers to stack of summary variables STACK or ARRAY?
tmin.year<-addLayer(tmin.year,raster(as.matrix(tmin.24h),template=dem.block))
tmax.year<-addLayer(tmax.year,raster(as.matrix(tmax.24h),template=dem.block))
tmean.year<-addLayer(tmean.year,raster(as.matrix(tmean.24h),template=dem.block))
}
####################################################################################
# Calculate Seasonal Rasters
####################################################################################
# Define Growing Season (or part of year of interest)
start.gs<-1; end.gs<-nlayers(tmean.year)
# 1. Calculate last spring and first fall frost - not limited to growing season
# Calc last spring frost <= 1 C
# Assumes no frost between end of May & early September - so reduces vector to 1-150 doy
spfrdata.s<-subset(tmin.year,1:150)
start.v<-rep(start.gs,(nlayers(spfrdata.s)))
spfrost.r<-calc(spfrdata.s,fun=function(x){ifelse(length(which(x<=2))>0,tail(which(x<=2),1)+start.v,1)}) # extract layer of last frost day
spfrost.r<-mask(spfrost.r,dem.block,maskvalue=NA)
plot(spfrost.r,main=paste("Last spring frost day ",DMYjd(jd)$year,sep=""))
# Calculate first autumn frost (after early sept)
autfrdata.s<-subset(tmin.year,240:nlayers(tmin.year))
start.v<-rep(240,(nlayers(autfrdata.s)))
autfrost.r<-calc(autfrdata.s,fun=function(x){ifelse(length(which(x<=2))>0,head(which(x<=2),1)+start.v,nlayers(tmin.year))}) # extract layer of last frost day
autfrost.r<-mask(autfrost.r,dem.block,maskvalue=NA)
plot(autfrost.r,main=paste("First autumn frost day ",DMYjd(jd)$year,sep=""))
# Calculate frost free period
frostfree.r<-overlay(spfrost.r,autfrost.r,fun=function(x,y){return(y-x)})
plot(frostfree.r,main=paste("Frost free period of year ",DMYjd(jd)$year,sep=""))
# 2. Calculate growing season stats - temperature extremes, gdd etc
#start.gs<-90; end.gs=305
# correct start and end dates to reflect zone of interest
#end.jd<-start.jd+end.gs-1
#start.jd<-start.jd+start.gs-1
# OR SET GS to first and last frosts??
# Calc gdd
Tbase<-10; tbase.v<-rep(Tbase,(end.gs-start.gs+1))
gdd10.r<-calc(subset(tmean.year,start.gs:end.gs),fun=function(x){sum(x-tbase.v)})
gdd10.r<-mask(gdd10.r,dem.block,maskvalue=NA)
plot(gdd10.r,main=paste("GDD10 ",year," Tbase= ",Tbase,sep=""))
Tbase<-5; tbase.v<-rep(Tbase,(end.gs-start.gs+1))
gdd5.r<-calc(subset(tmean.year,start.gs:end.gs),fun=function(x){sum(x-tbase.v)})
plot(gdd5.r,main=paste("GDD5 ",year," Tbase= ",Tbase,sep=""))
# Calc mean T
meangst.r<-calc(subset(tmean.year,start.gs:end.gs),fun=function(x){mean(x)})
plot(meangst.r,main=paste("MeanT ",year,sep=""))
# Calc max T
maxgst.r<-calc(subset(tmax.year,start.gs:end.gs),fun=function(x){max(x)})
plot(maxgst.r,main=paste("Max T ",year,sep=""))
# Calc #days where max temp> 20C, 25C, 30C from April-Oct
days20.r<-calc(subset(tmax.s,start.gs:end.gs),fun=function(x){ifelse(length(which(x>=20))>0,length(which(x>=20)),0)} )
days20.r<-mask(days20.r,grid5km.r,maskvalue=NA)
days25.r<-calc(subset(tmax.s,start.gs:end.gs),fun=function(x){ifelse(length(which(x>=25))>0,length(which(x>=25)),0)} )
days25.r<-mask(days25.r,grid5km.r,maskvalue=NA)
days30.r<-calc(subset(tmax.s,start.gs:end.gs),fun=function(x){ifelse(length(which(x>=30))>0,length(which(x>=30)),0)} )
days30.r<-mask(days30.r,grid5km.r,maskvalue=NA)
plot(days20.r,main=paste("Days=>20 ",DMYjd(jd)$year,sep=""))
plot(days25.r,main=paste("Days=>25 ",DMYjd(jd)$year,sep=""))
plot(days30.r,main=paste("Days=>30 ",DMYjd(jd)$year,sep=""))
# Calc min T in growing season
mingst.r<-calc(subset(tmin.s,start.gs:end.gs),fun=function(x){min(x)})
plot(mingst.r,main=paste("Min T ",DMYjd(jd)$year,sep=""))
####################################################################################
# Output raster files for year - seasonal characteristics by block
####################################################################################
gdd10.fileout<-paste(dir_results,"block-",ukcpcell,"-gdd10-",year,".tif" ,sep="")
gdd5.fileout<-paste(dir_results,"block-",ukcpcell,"-gdd5-",year,".tif" ,sep="")
meant.fileout<-paste(dir_results,"block-",ukcpcell,"-meant-",year ,".tif" ,sep="")
maxt.fileout<-paste(dir_results,"block-",ukcpcell,"-maxt-",year ,".tif" ,sep="")
days20.fileout<-paste(dir_results,"block-",ukcpcell,"-days20-",year ,".tif" ,sep="")
days25.fileout<-paste(dir_results,"block-",ukcpcell,"-days25-",year ,".tif" ,sep="")
days30.fileout<-paste(dir_results,"block-",ukcpcell,"-days30-",year ,".tif" ,sep="")
mint.fileout<-paste(dir_results,"block-",ukcpcell,"-mint-",year ,".tif" ,sep="")
spfrost.fileout<-paste(dir_results,"block-",ukcpcell,"-spfrost-",year ,".tif" ,sep="")
autfrost.fileout<-paste(dir_results,"block-",ukcpcell,"-autfrost-",year ,".tif" ,sep="")
frostfree.fileout<-paste(dir_results,"block-",ukcpcell,"-frostfree-",year ,".tif" ,sep="")
writeRaster(gdd10.r,file=gdd10.fileout,format="GTiff")
writeRaster(gdd5.r,file=gdd5.fileout,format="GTiff")
writeRaster(maxt.r,file=maxt.fileout,format="GTiff")
writeRaster(days20.r,file=days20.fileout,format="GTiff")
writeRaster(days25.r,file=days25.fileout,format="GTiff")
writeRaster(days30.r,file=days30.fileout,format="GTiff")
writeRaster(mint.r,file=mint.fileout,format="GTiff")
writeRaster(spfrost.r,file=spfrost.fileout,format="GTiff")
writeRaster(autfrost.r,file=autfrost.fileout,format="GTiff")
writeRaster(frostfree.r,file=frostfree.fileout,format="GTiff")
# CUT OUTS
# TESTING ON INTERPOLATED 5km DATA
#day.file<-paste(dir_hrtemp,"HrTemp_", DMYjd(jd)$year[1], "-",sprintf("%02d",DMYjd(jd)$month[1],sep=""),"-", sprintf("%02d",DMYjd(jd)$day[1],sep=""),"_100m.r", sep="")
#print(day.file)
#load( file=file.out) # loads: t100m.day
# Calculate daily statistics
day.tmin<-apply(t100m.day, c(1,2), min)
day.tmax<-apply(t100m.day, c(1,2), max)
day.tmean<-apply(t100m.day, c(1,2), mean)
day.GDH10<-
day.GDH0<-
day.GDH15<-
day.GDH20<-
day.GDH25<-
day.GDH30<-
day.FDH0<-
day.FDH2<-
day.H25<-
day.H30<-
# Daily summary stats= max, min, mean T, GDhr (2 ver - 0C, 10C, 15C??), ~hrs>maxT, #hrs<minT, frost degree hrs = FDhr,
# ...
# Add layers to stack of summary variables
tmin.year<-addLayer(tmin.year,tmin.day)
# Calculate by cell - vector analysis after extracting from stack??
for (cell in 1:ncell(t100.24[[1]])){
}
# Or for every day calculate summary stats and create yearly stack with 365 layers - apply functions to bricks
|
/R/Analysis/t100_year_stats.R
|
no_license
|
jrmosedale/microclimates
|
R
| false
| false
| 9,198
|
r
|
# Calculate key temperature parameters for each 100m cell
# Process by year and block
# Output by block x risk: xyz file of seasonal values with z as year
# Carson job variables
args <-commandArgs(trailingOnly = TRUE)
print(args)
start.day <- as.integer(args[1])
start.month<-as.integer(args[2])
start.year<-as.integer(args[3] )
end.day<-as.integer(args[4] )
end.month<-as.integer(args[5] )
end.year<-as.integer(args[6] )
ukcpcell<-as.integer(args[7])
# Get dem.block for cell
print(paste("UKCP cell= ", ukcpcell,sep=""))
print("Get Cell Coordinates")
gridmask.r<-land5km.r # land5km.r defined in setup
vals<-getValues(gridmask.r)
xy<-xyFromCell(gridmask.r,1:ncell(gridmask.r))
sel<-which(vals==1)
landcells<-xy[sel,1:2] # = coordinates for middle of each ukcp09 cell
print(dim(landcells))
x<-landcells[ukcpcell,1]
y<-landcells[ukcpcell,2]
e.block<-extent(x-2500,x+2500,y-2500,y+2500)
dem.block<-crop(demuk,e.block)
plot(dem.block)
#source("/home/ISAD/jm622/rscripts/setup_carson.R") # loads & runs setup file (jd functions, dem etc)
## ASSUME start end end correspond to start and end of single YEAR or GROWING SEASON
#start.day<-1; start.month<-7; start.year<-1992
#end.day<-2; end.month<-7; end.year<-1992
# dir_temp<-"C:/Data2015/Temp5km/extract/"
# dir_results<-"C:/Results2015/year_stats_5km/"
# dir_allyr<-"C:/Results2015/allyear_stats_5km"
# Uses 5km grid mask: grid5km.r
# dir_grids<-"C:/Data2015/Templates/"
# dir_finalt<-"C:/Data2015/Temp5km/hourly/"
dir_finalt<-"~/Documents/Exeter/Data2015/Temp100m/"
start.year<-1991
end.year<-1999
ukcpcell<-900
#year<-start.year
start.jd<-JDdmy(1,1,start.year)
end.jd<-JDdmy(31,12,end.year)
print(start.jd)
print(end.jd)
#jd<-start.jd
####################################################################################
# for (ukcpcell in cells[1]:cells[length(cells)]){}
# for (year in start.year:end.year){}
print(paste("Analysing data for Year= ",year,sep=""))
# load this year's temperature data at 100m and hourly resolution
infile<-paste(dir_finalt,"block-",ukcpcell,"-",year,".R",sep="")
print(infile)
load(infile) # = tmodel.r
# calculate number of days in yr
days.in.yr<-dim(tmodel.r)[3]/24
print(days.in.yr)
####################################################################################
# Calculate daily stats for whole year- ie daily min/max etc
####################################################################################
tmin.year<-stack()
tmax.year<-stack()
tmean.year<-stack()
for (doy in 1:days.in.yr){
# Extract 24h of temperature data
start<-1+(doy-1)*24
end<-doy*24
#print (paste(start," ",end))
t.24h<-tmodel.r[,,start:end]
# Calculate daily statistics
tmin.24h<-apply(t.24h, c(1,2), min)
tmax.24h<-apply(t.24h, c(1,2), max)
tmean.24h<-apply(t.24h, c(1,2), mean)
# Add layers to stack of summary variables STACK or ARRAY?
tmin.year<-addLayer(tmin.year,raster(as.matrix(tmin.24h),template=dem.block))
tmax.year<-addLayer(tmax.year,raster(as.matrix(tmax.24h),template=dem.block))
tmean.year<-addLayer(tmean.year,raster(as.matrix(tmean.24h),template=dem.block))
}
####################################################################################
# Calculate Seasonal Rasters
####################################################################################
# Define Growing Season (or part of year of interest)
start.gs<-1; end.gs<-nlayers(tmean.year)
# 1. Calculate last spring and first fall frost - not limited to growing season
# Calc last spring frost <= 1 C
# Assumes no frost between end of May & early September - so reduces vector to 1-150 doy
spfrdata.s<-subset(tmin.year,1:150)
start.v<-rep(start.gs,(nlayers(spfrdata.s)))
spfrost.r<-calc(spfrdata.s,fun=function(x){ifelse(length(which(x<=2))>0,tail(which(x<=2),1)+start.v,1)}) # extract layer of last frost day
spfrost.r<-mask(spfrost.r,dem.block,maskvalue=NA)
plot(spfrost.r,main=paste("Last spring frost day ",DMYjd(jd)$year,sep=""))
# Calculate first autumn frost (after early sept)
autfrdata.s<-subset(tmin.year,240:nlayers(tmin.year))
start.v<-rep(240,(nlayers(autfrdata.s)))
autfrost.r<-calc(autfrdata.s,fun=function(x){ifelse(length(which(x<=2))>0,head(which(x<=2),1)+start.v,nlayers(tmin.year))}) # extract layer of last frost day
autfrost.r<-mask(autfrost.r,dem.block,maskvalue=NA)
plot(autfrost.r,main=paste("First autumn frost day ",DMYjd(jd)$year,sep=""))
# Calculate frost free period
frostfree.r<-overlay(spfrost.r,autfrost.r,fun=function(x,y){return(y-x)})
plot(frostfree.r,main=paste("Frost free period of year ",DMYjd(jd)$year,sep=""))
# 2. Calculate growing season stats - temperature extremes, gdd etc
#start.gs<-90; end.gs=305
# correct start and end dates to reflect zone of interest
#end.jd<-start.jd+end.gs-1
#start.jd<-start.jd+start.gs-1
# OR SET GS to first and last frosts??
# Calc gdd
Tbase<-10; tbase.v<-rep(Tbase,(end.gs-start.gs+1))
gdd10.r<-calc(subset(tmean.year,start.gs:end.gs),fun=function(x){sum(x-tbase.v)})
gdd10.r<-mask(gdd10.r,dem.block,maskvalue=NA)
plot(gdd10.r,main=paste("GDD10 ",year," Tbase= ",Tbase,sep=""))
Tbase<-5; tbase.v<-rep(Tbase,(end.gs-start.gs+1))
gdd5.r<-calc(subset(tmean.year,start.gs:end.gs),fun=function(x){sum(x-tbase.v)})
plot(gdd5.r,main=paste("GDD5 ",year," Tbase= ",Tbase,sep=""))
# Calc mean T
meangst.r<-calc(subset(tmean.year,start.gs:end.gs),fun=function(x){mean(x)})
plot(meangst.r,main=paste("MeanT ",year,sep=""))
# Calc max T
maxgst.r<-calc(subset(tmax.year,start.gs:end.gs),fun=function(x){max(x)})
plot(maxgst.r,main=paste("Max T ",year,sep=""))
# Calc #days where max temp> 20C, 25C, 30C from April-Oct
days20.r<-calc(subset(tmax.s,start.gs:end.gs),fun=function(x){ifelse(length(which(x>=20))>0,length(which(x>=20)),0)} )
days20.r<-mask(days20.r,grid5km.r,maskvalue=NA)
days25.r<-calc(subset(tmax.s,start.gs:end.gs),fun=function(x){ifelse(length(which(x>=25))>0,length(which(x>=25)),0)} )
days25.r<-mask(days25.r,grid5km.r,maskvalue=NA)
days30.r<-calc(subset(tmax.s,start.gs:end.gs),fun=function(x){ifelse(length(which(x>=30))>0,length(which(x>=30)),0)} )
days30.r<-mask(days30.r,grid5km.r,maskvalue=NA)
plot(days20.r,main=paste("Days=>20 ",DMYjd(jd)$year,sep=""))
plot(days25.r,main=paste("Days=>25 ",DMYjd(jd)$year,sep=""))
plot(days30.r,main=paste("Days=>30 ",DMYjd(jd)$year,sep=""))
# Calc min T in growing season
mingst.r<-calc(subset(tmin.s,start.gs:end.gs),fun=function(x){min(x)})
plot(mingst.r,main=paste("Min T ",DMYjd(jd)$year,sep=""))
####################################################################################
# Output raster files for year - seasonal characteristics by block
####################################################################################
gdd10.fileout<-paste(dir_results,"block-",ukcpcell,"-gdd10-",year,".tif" ,sep="")
gdd5.fileout<-paste(dir_results,"block-",ukcpcell,"-gdd5-",year,".tif" ,sep="")
meant.fileout<-paste(dir_results,"block-",ukcpcell,"-meant-",year ,".tif" ,sep="")
maxt.fileout<-paste(dir_results,"block-",ukcpcell,"-maxt-",year ,".tif" ,sep="")
days20.fileout<-paste(dir_results,"block-",ukcpcell,"-days20-",year ,".tif" ,sep="")
days25.fileout<-paste(dir_results,"block-",ukcpcell,"-days25-",year ,".tif" ,sep="")
days30.fileout<-paste(dir_results,"block-",ukcpcell,"-days30-",year ,".tif" ,sep="")
mint.fileout<-paste(dir_results,"block-",ukcpcell,"-mint-",year ,".tif" ,sep="")
spfrost.fileout<-paste(dir_results,"block-",ukcpcell,"-spfrost-",year ,".tif" ,sep="")
autfrost.fileout<-paste(dir_results,"block-",ukcpcell,"-autfrost-",year ,".tif" ,sep="")
frostfree.fileout<-paste(dir_results,"block-",ukcpcell,"-frostfree-",year ,".tif" ,sep="")
writeRaster(gdd10.r,file=gdd10.fileout,format="GTiff")
writeRaster(gdd5.r,file=gdd5.fileout,format="GTiff")
writeRaster(maxt.r,file=maxt.fileout,format="GTiff")
writeRaster(days20.r,file=days20.fileout,format="GTiff")
writeRaster(days25.r,file=days25.fileout,format="GTiff")
writeRaster(days30.r,file=days30.fileout,format="GTiff")
writeRaster(mint.r,file=mint.fileout,format="GTiff")
writeRaster(spfrost.r,file=spfrost.fileout,format="GTiff")
writeRaster(autfrost.r,file=autfrost.fileout,format="GTiff")
writeRaster(frostfree.r,file=frostfree.fileout,format="GTiff")
# CUT OUTS
# TESTING ON INTERPOLATED 5km DATA
#day.file<-paste(dir_hrtemp,"HrTemp_", DMYjd(jd)$year[1], "-",sprintf("%02d",DMYjd(jd)$month[1],sep=""),"-", sprintf("%02d",DMYjd(jd)$day[1],sep=""),"_100m.r", sep="")
#print(day.file)
#load( file=file.out) # loads: t100m.day
# Calculate daily statistics
day.tmin<-apply(t100m.day, c(1,2), min)
day.tmax<-apply(t100m.day, c(1,2), max)
day.tmean<-apply(t100m.day, c(1,2), mean)
day.GDH10<-
day.GDH0<-
day.GDH15<-
day.GDH20<-
day.GDH25<-
day.GDH30<-
day.FDH0<-
day.FDH2<-
day.H25<-
day.H30<-
# Daily summary stats= max, min, mean T, GDhr (2 ver - 0C, 10C, 15C??), ~hrs>maxT, #hrs<minT, frost degree hrs = FDhr,
# ...
# Add layers to stack of summary variables
tmin.year<-addLayer(tmin.year,tmin.day)
# Calculate by cell - vector analysis after extracting from stack??
for (cell in 1:ncell(t100.24[[1]])){
}
# Or for every day calculate summary stats and create yearly stack with 365 layers - apply functions to bricks
|
library(data.table)
setwd("d:/SVN/ExData_Plotting1/")
#readData
data <- readRDS("FilteredData.rds")
#Set locale to get proper week days
Sys.setlocale("LC_TIME", "English")
#create PNG file
png("plot2.png", width = 480, height = 480)
plot(data$DateTime, data$Global_active_power,type = "l", xlab = "", ylab = "Global Active Power (kilowatts)")
dev.off()
|
/Plot2.r
|
no_license
|
SergeyAshikhmin/ExData_Plotting1
|
R
| false
| false
| 356
|
r
|
library(data.table)
setwd("d:/SVN/ExData_Plotting1/")
#readData
data <- readRDS("FilteredData.rds")
#Set locale to get proper week days
Sys.setlocale("LC_TIME", "English")
#create PNG file
png("plot2.png", width = 480, height = 480)
plot(data$DateTime, data$Global_active_power,type = "l", xlab = "", ylab = "Global Active Power (kilowatts)")
dev.off()
|
## Species.spec organizes data. After using getspec, use species.spec to divide your specs up by species and place everything as tab-delimited text documents in one folder. Necessary precursor for the dimorphism measurement scripts.
species.spec <- function(specs) {
## Create folder for the spec files
dir.create('Specs_by_Species')
setwd("Specs_by_Species")
##Get species vector
species <- gsub('([A-Z]+)([0-9]+)([A-Za-z]+)([0-9])$', '', names(specs))[-1]
## Create separate files for each species
for(x in names(table(species))){
curr <- specs[c(1,grep(x, names(specs)))]
filename <- paste(x, ".txt", sep="")
write.table(curr, filename, col.names=TRUE, row.names=TRUE, sep="\t")
}
}
|
/R/species.spec.r
|
no_license
|
craneon/pavo
|
R
| false
| false
| 718
|
r
|
## Species.spec organizes data. After using getspec, use species.spec to divide your specs up by species and place everything as tab-delimited text documents in one folder. Necessary precursor for the dimorphism measurement scripts.
species.spec <- function(specs) {
## Create folder for the spec files
dir.create('Specs_by_Species')
setwd("Specs_by_Species")
##Get species vector
species <- gsub('([A-Z]+)([0-9]+)([A-Za-z]+)([0-9])$', '', names(specs))[-1]
## Create separate files for each species
for(x in names(table(species))){
curr <- specs[c(1,grep(x, names(specs)))]
filename <- paste(x, ".txt", sep="")
write.table(curr, filename, col.names=TRUE, row.names=TRUE, sep="\t")
}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/hello.R
\name{hello}
\alias{hello}
\title{Hello}
\usage{
hello()
}
\description{
Hello
}
|
/hello/man/hello.Rd
|
no_license
|
grahamrp/ci_test
|
R
| false
| true
| 167
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/hello.R
\name{hello}
\alias{hello}
\title{Hello}
\usage{
hello()
}
\description{
Hello
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/geographicmaphelper.R
\name{GeographicRegionTypes}
\alias{GeographicRegionTypes}
\title{\code{GeographicRegionTypes} Types of Geographic Regions}
\usage{
GeographicRegionTypes()
}
\description{
The geographic region types that are available for referring in a map. E.g.,
\code{name}, \code{continent}
}
\examples{
GeographicRegionTypes()
}
|
/man/GeographicRegionTypes.Rd
|
no_license
|
Displayr/flipStandardCharts
|
R
| false
| true
| 419
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/geographicmaphelper.R
\name{GeographicRegionTypes}
\alias{GeographicRegionTypes}
\title{\code{GeographicRegionTypes} Types of Geographic Regions}
\usage{
GeographicRegionTypes()
}
\description{
The geographic region types that are available for referring in a map. E.g.,
\code{name}, \code{continent}
}
\examples{
GeographicRegionTypes()
}
|
## Functions makeCacheMatrix and cacheSolve work together to
## enable the cacheing of inverse matrices, in order to
## efficiently store these data objects for future reuse,
## minimizing use of processing resources.
## makeCacheMatrix a form of the matrix x, that can be cached
## Function setInv() saves the inverse of the matrix in the parent environment
## Function getInv() returns the inverse of the matrix
makeCacheMatrix <- function(x = matrix()) {
invm <- NULL
set <- function(y){
x <<- y
invm <<- NULL
}
get <- function() x
setInv <- function(inverse) invm <<- inverse
getInv <- function() invm
list(set = set, get = get, setInv = setInv,
getInv = getInv)
}
## cacheSolve takes the cacheable form of the matrix 'x', checks if its inverse
## 'inv.x' has been cached, and returns the inverse 'inv.x'; if not cached, generate
## 'inv.x', caches it and returns it.
## This function assumes that x can be inverted
cacheSolve <- function(x, ...) {
## Return a matrix 'inv.x' that is the inverse of 'x'
inv.x <- x$getInv()
if(!is.null(inv.x)) {
message("getting cached data")
return(inv.x)
}
data <- x$get()
inv.x <- solve(data, ...)
x$setInv(inv.x)
inv.x
}
|
/cachematrix.R
|
no_license
|
johndgalleyne/ProgrammingAssignment2
|
R
| false
| false
| 1,415
|
r
|
## Functions makeCacheMatrix and cacheSolve work together to
## enable the cacheing of inverse matrices, in order to
## efficiently store these data objects for future reuse,
## minimizing use of processing resources.
## makeCacheMatrix a form of the matrix x, that can be cached
## Function setInv() saves the inverse of the matrix in the parent environment
## Function getInv() returns the inverse of the matrix
makeCacheMatrix <- function(x = matrix()) {
invm <- NULL
set <- function(y){
x <<- y
invm <<- NULL
}
get <- function() x
setInv <- function(inverse) invm <<- inverse
getInv <- function() invm
list(set = set, get = get, setInv = setInv,
getInv = getInv)
}
## cacheSolve takes the cacheable form of the matrix 'x', checks if its inverse
## 'inv.x' has been cached, and returns the inverse 'inv.x'; if not cached, generate
## 'inv.x', caches it and returns it.
## This function assumes that x can be inverted
cacheSolve <- function(x, ...) {
## Return a matrix 'inv.x' that is the inverse of 'x'
inv.x <- x$getInv()
if(!is.null(inv.x)) {
message("getting cached data")
return(inv.x)
}
data <- x$get()
inv.x <- solve(data, ...)
x$setInv(inv.x)
inv.x
}
|
#Author: Jian Shi, Univ. of Michigan.
setwd("/Data/Coursera/proj")
df=read.table("household_power_consumption.txt", sep=";",header=TRUE,stringsAsFactors=FALSE)
#Only use the data of this time per the assignment
data <- df[df$Date %in% c("1/2/2007","2/2/2007") ,]
dim(data)
png("plot1.png",width=480,height=480)
hist(as.numeric(data$Global_active_power), col='red',main="Global Active Power",xlab="Global Active Power (kilowatts)", ylab="Frequency")
dev.off()
|
/plot1.R
|
no_license
|
Jskywalkergh/ExData_Plotting1
|
R
| false
| false
| 461
|
r
|
#Author: Jian Shi, Univ. of Michigan.
setwd("/Data/Coursera/proj")
df=read.table("household_power_consumption.txt", sep=";",header=TRUE,stringsAsFactors=FALSE)
#Only use the data of this time per the assignment
data <- df[df$Date %in% c("1/2/2007","2/2/2007") ,]
dim(data)
png("plot1.png",width=480,height=480)
hist(as.numeric(data$Global_active_power), col='red',main="Global Active Power",xlab="Global Active Power (kilowatts)", ylab="Frequency")
dev.off()
|
.onLoad= function(libname, pkgname){
options( "ggiwid" = list( svgid = 0 ) )
invisible()
}
setGrobName <- function (prefix, grob)
{
grob$name <- grobName(grob, prefix)
grob
}
|
/R/utils.R
|
no_license
|
trinker/ggiraph
|
R
| false
| false
| 185
|
r
|
.onLoad= function(libname, pkgname){
options( "ggiwid" = list( svgid = 0 ) )
invisible()
}
setGrobName <- function (prefix, grob)
{
grob$name <- grobName(grob, prefix)
grob
}
|
rm(list = ls())
# Check for the requried packages and install them
if(!require("rpart"))
{
install.packages("rpart")
library("rpart")
}
if (!require("randomForest")) {
install.packages("randomForest")
library("randomForest")
}
if(!require("kknn"))
{
install.packages("kknn")
library("kknn")
}
if(!require("ipred"))
{
install.packages("ipred")
library("ipred")
}
if(!require("ada"))
{
install.packages("ada")
library("ada")
}
if(!require("gbm"))
{
install.packages("gbm")
library("gbm")
}
# Read from URL
dataURL<-as.character('https://archive.ics.uci.edu/ml/machine-learning-databases/breast-cancer-wisconsin/wdbc.data')
d<-read.csv(dataURL,header = FALSE,sep = ",")
# Remove NA and ? in dataset
d <- na.omit(d)
d[d=="NA"] <- 0
d[d=="?"] <- 0
# Init arrays to store 10 sample accuracies
kNNArray<- numeric(10)
baggingArray<- numeric(10)
rFArray<- numeric(10)
boostingArray<- numeric(10)
gbmArray<- numeric(10)
# Class attribute is column 2 ie V2
# Run 10 samples
for(i in 1:10)
{
cat("Running sample ",i,"\n")
# Generate Train and Test datasets 70/30 split
sampleInstances<-sample(nrow(d),size = 0.7*nrow(d))
trainingData <- d[sampleInstances,]
testData <- d[-sampleInstances,]
# KNN
method <- "kNN"
# Model
knn.model <- kknn(formula = formula(V2~.),train=trainingData,test=testData,k = 3,distance=1)
# Predict
fit <- fitted(knn.model)
tab3<-table(testData$V2, fit)
sum3=tab3[1,2]+tab3[2,1]
sum4=tab3[1,1]+tab3[1,2]+tab3[2,1]+tab3[2,2]
# Accuracy
kNNArray[i] = ((1-(sum3/sum4))*100)
cat("Method = ", method,", accuracy= ", kNNArray[i],"\n")
# BAGGING
method <- "Bagging"
# Model
control <- rpart.control(cp=-1, maxdepth=1, minsplit=0)
bagTrainModel <- bagging(V2~., data=trainingData, control=control, nbagg=10, coob=TRUE)
# Predict
bagPredict <-predict(bagTrainModel, testData)
# Accuracy
baggingTable <- table(testData$V2, bagPredict)
baggingCorrect <- sum(diag(baggingTable))
baggingError <- sum(baggingTable)-baggingCorrect
baggingArray[i]<- (baggingCorrect / (baggingCorrect+baggingError))*100
cat("Method = ", method,", accuracy= ", baggingArray[i],"\n")
# RANDOM FOREST
method <- "Random Forest"
# Model
rFTrainmodel <- randomForest(as.factor(V2) ~ ., data=trainingData, ntree=500, keep.forest=TRUE, importance=TRUE)
# Predict
rFPredict <- predict(rFTrainmodel,testData)
# Accuracy
rFTable <- table(testData$V2, rFPredict)
rFCorrect <- sum(diag(rFTable))
rFError <- sum(rFTable)-rFCorrect
rFArray[i] <- (rFCorrect / (rFCorrect+rFError))*100
cat("Method = ", method,", accuracy= ", rFArray[i],"\n")
# BOOSTING
method <- "Boosting"
# Model
boostingTrainModel <- ada(V2 ~ ., data = trainingData, type="discrete")
# Predict
boostingPredict=predict(boostingTrainModel,testData)
# Accuracy
boostingTable <- table(testData$V2, boostingPredict)
boostingCorrect <- sum(diag(boostingTable))
boostingError <- sum(boostingTable)-boostingCorrect
boostingArray[i]<- (boostingCorrect / (boostingCorrect+boostingError))*100
cat("Method = ", method,", accuracy= ", boostingArray[i],"\n")
# GBM BOOSTING
method <- "GBM Boosting"
# Model
trainingData$V2 = ifelse(trainingData$V2 == "2", 1, 0)
testData$V2 = ifelse(testData$V2 == "2", 1, 0)
# Model
gbmTrainModel <- gbm(V2~ ., data=trainingData, dist="bernoulli", n.tree = 400,shrinkage = 1, train.fraction = 1)
# Predict
gbmPredict=predict(gbmTrainModel,testData,n.trees = 400)
# Accuracy
gbmTable <- table(testData$V2>0, gbmPredict>0)
gbmCorrect <- sum(diag(gbmTable))
gbmError <- sum(gbmTable)-gbmCorrect
gbmArray[i]<- (gbmCorrect / (gbmCorrect+gbmError))*100
cat("Method = ", method,", accuracy= ", gbmArray[i],"\n")
}
# Writing to file
sink("Output 3.txt")
cat("No. of Instances: ",nrow(d))
cat("\n")
cat("No. of Atrributes: ",ncol(d))
cat("\n")
cat("\nOutput for ILP Dataset: ",dataURL)
cat("\n\nKNN Accuracy Array: ",kNNArray)
cat("\n")
cat("\nBaggin Accuracy Array: ",baggingArray)
cat("\n")
cat("\nBoosting Accuracy Array: ",boostingArray)
cat("\n")
cat("\nRandom Forest Accuracy Array: ",rFArray)
cat("\n")
cat("\nGradient Boosting Accuracy Array: ",gbmArray)
cat("\n")
# Accuracy of 10 iterations
cat("\nAccuracy for 10 iterations")
knnAvg <- mean(kNNArray)
cat("\n\nAverage accuracy of 10 samples in kNN Classifier: ",knnAvg)
cat("\n")
bagAvg <- mean(baggingArray)
cat("\nAverage accuracy of 10 samples in Bagging Classifier: ",bagAvg)
cat("\n")
boostAvg <- mean(boostingArray)
cat("\nAverage accuracy of 10 samples in Boosting Classifier: ",boostAvg)
cat("\n")
rFAvg <- mean(rFArray)
cat("\nAverage accuracy of 10 samples in Random Forest Classifier: ",rFAvg)
cat("\n")
gbmAvg <- mean(gbmArray)
cat("\nAverage accuracy of 10 samples in Gradient Boosting Classifier: ",gbmAvg)
cat("\n")
sink()
|
/HW5/MLHW5/Dataset3.R
|
no_license
|
nagabharan/CS6375_ML
|
R
| false
| false
| 5,022
|
r
|
rm(list = ls())
# Check for the requried packages and install them
if(!require("rpart"))
{
install.packages("rpart")
library("rpart")
}
if (!require("randomForest")) {
install.packages("randomForest")
library("randomForest")
}
if(!require("kknn"))
{
install.packages("kknn")
library("kknn")
}
if(!require("ipred"))
{
install.packages("ipred")
library("ipred")
}
if(!require("ada"))
{
install.packages("ada")
library("ada")
}
if(!require("gbm"))
{
install.packages("gbm")
library("gbm")
}
# Read from URL
dataURL<-as.character('https://archive.ics.uci.edu/ml/machine-learning-databases/breast-cancer-wisconsin/wdbc.data')
d<-read.csv(dataURL,header = FALSE,sep = ",")
# Remove NA and ? in dataset
d <- na.omit(d)
d[d=="NA"] <- 0
d[d=="?"] <- 0
# Init arrays to store 10 sample accuracies
kNNArray<- numeric(10)
baggingArray<- numeric(10)
rFArray<- numeric(10)
boostingArray<- numeric(10)
gbmArray<- numeric(10)
# Class attribute is column 2 ie V2
# Run 10 samples
for(i in 1:10)
{
cat("Running sample ",i,"\n")
# Generate Train and Test datasets 70/30 split
sampleInstances<-sample(nrow(d),size = 0.7*nrow(d))
trainingData <- d[sampleInstances,]
testData <- d[-sampleInstances,]
# KNN
method <- "kNN"
# Model
knn.model <- kknn(formula = formula(V2~.),train=trainingData,test=testData,k = 3,distance=1)
# Predict
fit <- fitted(knn.model)
tab3<-table(testData$V2, fit)
sum3=tab3[1,2]+tab3[2,1]
sum4=tab3[1,1]+tab3[1,2]+tab3[2,1]+tab3[2,2]
# Accuracy
kNNArray[i] = ((1-(sum3/sum4))*100)
cat("Method = ", method,", accuracy= ", kNNArray[i],"\n")
# BAGGING
method <- "Bagging"
# Model
control <- rpart.control(cp=-1, maxdepth=1, minsplit=0)
bagTrainModel <- bagging(V2~., data=trainingData, control=control, nbagg=10, coob=TRUE)
# Predict
bagPredict <-predict(bagTrainModel, testData)
# Accuracy
baggingTable <- table(testData$V2, bagPredict)
baggingCorrect <- sum(diag(baggingTable))
baggingError <- sum(baggingTable)-baggingCorrect
baggingArray[i]<- (baggingCorrect / (baggingCorrect+baggingError))*100
cat("Method = ", method,", accuracy= ", baggingArray[i],"\n")
# RANDOM FOREST
method <- "Random Forest"
# Model
rFTrainmodel <- randomForest(as.factor(V2) ~ ., data=trainingData, ntree=500, keep.forest=TRUE, importance=TRUE)
# Predict
rFPredict <- predict(rFTrainmodel,testData)
# Accuracy
rFTable <- table(testData$V2, rFPredict)
rFCorrect <- sum(diag(rFTable))
rFError <- sum(rFTable)-rFCorrect
rFArray[i] <- (rFCorrect / (rFCorrect+rFError))*100
cat("Method = ", method,", accuracy= ", rFArray[i],"\n")
# BOOSTING
method <- "Boosting"
# Model
boostingTrainModel <- ada(V2 ~ ., data = trainingData, type="discrete")
# Predict
boostingPredict=predict(boostingTrainModel,testData)
# Accuracy
boostingTable <- table(testData$V2, boostingPredict)
boostingCorrect <- sum(diag(boostingTable))
boostingError <- sum(boostingTable)-boostingCorrect
boostingArray[i]<- (boostingCorrect / (boostingCorrect+boostingError))*100
cat("Method = ", method,", accuracy= ", boostingArray[i],"\n")
# GBM BOOSTING
method <- "GBM Boosting"
# Model
trainingData$V2 = ifelse(trainingData$V2 == "2", 1, 0)
testData$V2 = ifelse(testData$V2 == "2", 1, 0)
# Model
gbmTrainModel <- gbm(V2~ ., data=trainingData, dist="bernoulli", n.tree = 400,shrinkage = 1, train.fraction = 1)
# Predict
gbmPredict=predict(gbmTrainModel,testData,n.trees = 400)
# Accuracy
gbmTable <- table(testData$V2>0, gbmPredict>0)
gbmCorrect <- sum(diag(gbmTable))
gbmError <- sum(gbmTable)-gbmCorrect
gbmArray[i]<- (gbmCorrect / (gbmCorrect+gbmError))*100
cat("Method = ", method,", accuracy= ", gbmArray[i],"\n")
}
# Writing to file
sink("Output 3.txt")
cat("No. of Instances: ",nrow(d))
cat("\n")
cat("No. of Atrributes: ",ncol(d))
cat("\n")
cat("\nOutput for ILP Dataset: ",dataURL)
cat("\n\nKNN Accuracy Array: ",kNNArray)
cat("\n")
cat("\nBaggin Accuracy Array: ",baggingArray)
cat("\n")
cat("\nBoosting Accuracy Array: ",boostingArray)
cat("\n")
cat("\nRandom Forest Accuracy Array: ",rFArray)
cat("\n")
cat("\nGradient Boosting Accuracy Array: ",gbmArray)
cat("\n")
# Accuracy of 10 iterations
cat("\nAccuracy for 10 iterations")
knnAvg <- mean(kNNArray)
cat("\n\nAverage accuracy of 10 samples in kNN Classifier: ",knnAvg)
cat("\n")
bagAvg <- mean(baggingArray)
cat("\nAverage accuracy of 10 samples in Bagging Classifier: ",bagAvg)
cat("\n")
boostAvg <- mean(boostingArray)
cat("\nAverage accuracy of 10 samples in Boosting Classifier: ",boostAvg)
cat("\n")
rFAvg <- mean(rFArray)
cat("\nAverage accuracy of 10 samples in Random Forest Classifier: ",rFAvg)
cat("\n")
gbmAvg <- mean(gbmArray)
cat("\nAverage accuracy of 10 samples in Gradient Boosting Classifier: ",gbmAvg)
cat("\n")
sink()
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/data_desc.R
\docType{data}
\name{all.genes.lengths}
\alias{all.genes.lengths}
\title{Coding genes length by Ensamble, length is calculated as sum of all coding exons (merged before so each position only once ocunted).}
\format{data frame}
\source{
http://www.genenames.org/ (Most recent HUGO approved names ) and ftp://ftp.ensembl.org/pub/current_gtf/homo_sapiens (info for genes, exons, lenghts)
}
\usage{
all.genes.lengths
}
\description{
This data frame containts information about genes length.
There is 19202 rows and each row represents gene. This set of genes is acquired from HGNC website and only approved and protein coding genes are included..
Rownames are Hugo symbols for genes.
Data frame has 2 columns.
}
\section{Variables}{
\itemize{
\item Hugo_Symbol same as rowname. Hugo symbol as unique indentifier for genes.
\item Length length of genes reported in NCBI
}
}
\keyword{data}
|
/man/all.genes.lengths.Rd
|
no_license
|
luisgls/cDriver
|
R
| false
| false
| 987
|
rd
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/data_desc.R
\docType{data}
\name{all.genes.lengths}
\alias{all.genes.lengths}
\title{Coding genes length by Ensamble, length is calculated as sum of all coding exons (merged before so each position only once ocunted).}
\format{data frame}
\source{
http://www.genenames.org/ (Most recent HUGO approved names ) and ftp://ftp.ensembl.org/pub/current_gtf/homo_sapiens (info for genes, exons, lenghts)
}
\usage{
all.genes.lengths
}
\description{
This data frame containts information about genes length.
There is 19202 rows and each row represents gene. This set of genes is acquired from HGNC website and only approved and protein coding genes are included..
Rownames are Hugo symbols for genes.
Data frame has 2 columns.
}
\section{Variables}{
\itemize{
\item Hugo_Symbol same as rowname. Hugo symbol as unique indentifier for genes.
\item Length length of genes reported in NCBI
}
}
\keyword{data}
|
source("initialize.R")
###############################################################################
###############################################################################
############################ SOLO QUEUE #######################################
###############################################################################
###############################################################################
############################ GENERAL STATS ####################################
## Distribution of Skill Tiers:
tiers_agg <- data.table(tiers_stats)[,list(count = sum(count)), by=list(tier)]
p <- ggplot(data = tiers_agg, aes(x=tier, y=count/1000, fill=tier)) +
geom_bar(stat='Identity') +
labs(x ="Skill Tier", y = "#Players in thousands") +
scale_x_discrete(limits=(tiers_order)) +
theme_bw() +
theme(legend.position="none",
axis.text.x = element_text(size=8)) +
scale_fill_manual(values=c("CHALLENGER"="orange",
"MASTER"="#966F33",
"DIAMOND"="#cbe3f0",
"PLATINUM"="#A0BFB4",
"GOLD"="#e6c200",
"SILVER"="#c0c0c0",
"BRONZE"="#cd7f32",
"UNRANKED"="black"))
p
## with patch
#line
tiers_stats$order <- factor(tiers_stats$tier, levels = tiers_order)
tiers_stats <- tiers_stats[order(tiers_stats$order),]
p <- ggplot(data = tiers_stats, aes(x=patch, y=count, group=order, color=factor(tier, levels=tiers_order))) +
geom_line(stat='Identity') +
scale_x_discrete(limits=patchOrder)
p
#bar
p <- ggplot(data = tiers_stats, aes(x=patch, y=count/1000, group=factor(tier, levels=tiers_order), fill=factor(tier, levels=tiers_order))) +
geom_bar(stat='Identity') +
scale_x_discrete(limits=patchOrder) +
labs( y = "#players in thousands") +
theme(title=element_text(size=20),
legend.title = element_text(size=16),
axis.title = element_text(size=12)) +
scale_fill_manual("Skill Tier", values=c("CHALLENGER"="orange",
"MASTER"="#966F33",
"DIAMOND"="#cbe3f0",
"PLATINUM"="#A0BFB4",
"GOLD"="#e6c200",
"SILVER"="#c0c0c0",
"BRONZE"="#cd7f32",
"UNRANKED"="black"))
p
pop_tier_platform <- data.table(dbReadTable(connection, "population_region_tier"))
pop_tier_platform %>%
group_by(tier, platformId) %>%
summarise(count = sum(count)) -> ptp
ptp$order <- factor(ptp$tier, levels = tiers_order)
ptp <- ptp[order(ptp$order),]
p <- ggplot(data = ptp, aes(x=order, y=count/1000, group=order, fill=order)) +
geom_bar(stat='Identity') +
facet_wrap(~platformId, nrow=2) +
scale_x_discrete(limits=tiers_order) +
labs( y = "#players in thousands",
x = "Skill Tier")+
theme_bw() +
theme(title=element_text(size=12),
legend.title = element_text(size=8),
axis.title = element_text(size=8),
axis.text.x = element_blank(),
legend.position = "bottom") +
scale_fill_manual("Skill Tier", values=c("CHALLENGER"="orange",
"MASTER"="#966F33",
"DIAMOND"="#cbe3f0",
"PLATINUM"="#A0BFB4",
"GOLD"="#e6c200",
"SILVER"="#c0c0c0",
"BRONZE"="#cd7f32",
"UNRANKED"="black"))
p
#bar (per region)
pop_tier_platform_patch <- data.table(dbReadTable(connection, "population_region_tier_patch"))
pop_tier_platform_patch %>%
group_by(tier, platformId, patch) %>%
summarise(count = sum(count)) -> ptpp
p <- ggplot(data = ptpp, aes(x=patch, y=count/1000, group=factor(tier, levels=tiers_order), fill=factor(tier, levels=tiers_order))) +
geom_bar(stat='Identity') +
facet_wrap(~platformId) +
scale_x_discrete(limits=patchOrder) +
labs( y = "#players in thousands") +
theme_bw() +
theme(title=element_text(size=20),
legend.title = element_text(size=16),
axis.title = element_text(size=12),
axis.text.x = element_text(size=8, angle=45, hjust = 1),
legend.position = "bottom") +
scale_fill_manual("Skill Tier", values=c("CHALLENGER"="orange",
"MASTER"="#966F33",
"DIAMOND"="#cbe3f0",
"PLATINUM"="#A0BFB4",
"GOLD"="#e6c200",
"SILVER"="#c0c0c0",
"BRONZE"="#cd7f32",
"UNRANKED"="black"))
p
## game Duration
time_info <- data.table(dbGetQuery(connection, "SELECT * FROM matchdetails"))
maxDuration <- round(max(time_info$gameDuration)/60,2)
meanDuration <- round(mean(time_info$gameDuration/60),2)
minDuration <- round(min(time_info$gameDuration/60),2)
p <- ggplot(data = time_info, aes(round(gameDuration/60), fill = "#00BFFF")) +
geom_histogram(aes(y= ..count.. /1000), breaks = c(seq(0,maxDuration, by=1)),fill = "#56B4E9") +
scale_x_continuous(breaks=seq(0,maxDuration,5)) +
geom_vline(xintercept = round(minDuration), linetype=2) +
geom_vline(xintercept = round(meanDuration), linetype=2) +
geom_vline(xintercept = round(maxDuration), linetype=2) +
annotate("text", x = 70, y = 9, label = paste("Max: ",toString(maxDuration), " Minutes")) +
annotate("text", x = 70, y = 5, label = paste("Mean: ",toString(meanDuration), " Minutes")) +
annotate("text", x = 70, y = 1, label = paste("Min: ",toString(minDuration), " Minutes")) +
labs(x = "Match Duration in Minutes", y = "#matches in thousands") +
theme_bw() +
theme(title=element_text(size=20),
legend.title = element_text(size=16),
axis.title = element_text(size=12))
p
# histograms for each platform
p <- ggplot(data = time_info, aes(round(gameDuration/60))) +
geom_histogram(aes(y = ..count../1000),breaks = c(seq(0,maxDuration, by=1)),fill = "#56B4E9") +
scale_x_continuous(breaks=seq(0,maxDuration,5)) +
labs(x = "Match Duration", y = "#matches in thousands") +
theme_bw() +
theme(title=element_text(size=20),
legend.title = element_text(size=16),
axis.title = element_text(size=12)) +
facet_grid(platformId~.) +
scale_fill_brewer(palette = "Blues")
p
# brazil has more than double the amount of remakes
time_info[gameDuration/60 < 5] %>% group_by(platformId) %>% summarise(count = n())
time_info %>%
summarise(minDuration = min(gameDuration/60),
"10qDuration" = quantile(gameDuration/60 , .10),
"25qDuration" = quantile(gameDuration/60 , .25),
"Median" = quantile(gameDuration/60 , .5),
"75Duration" = quantile(gameDuration/60 , .75),
"90qDuration" = quantile(gameDuration/60 , .90),
maxDuration = max(gameDuration/60)
) -> time_info_overall
time_info_overall
time_info %>%
group_by(platformId) %>%
summarise(minDuration = min(gameDuration/60),
"25qDuration" = quantile(gameDuration/60 , .25),
meanDuration = mean(gameDuration/60),
"75Duration" = quantile(gameDuration/60 , .75),
maxDuration = max(gameDuration/60)
) -> time_info_agg
time_info_agg
p <- ggplot(data = time_info, aes(x = platformId, y=round(gameDuration/60), group=platformId)) +
geom_boxplot() +
labs(x = "Match Duration", y = "#matches",
title = "Distribution of Match Durations") +
theme_bw() +
theme(title=element_text(size=20),
legend.title = element_text(size=16),
axis.title = element_text(size=12)) +
#facet_wrap(~platformId, nrow=2)+
scale_fill_brewer(palette = "Blues")
p
# time by tier
time_by_tier <- data.table(dbGetQuery(connection, "SELECT tier, patch, gameDuration, gameCreation from playerdetails"))
time_by_tier %>%
group_by(tier) %>%
summarise(minDuration = min(gameDuration/60),
"25qDuration" = quantile(gameDuration/60 , .25),
meanDuration = mean(gameDuration/60),
"75Duration" = quantile(gameDuration/60 , .75),
maxDuration = max(gameDuration/60)
) -> time_by_tier_agg
p <- ggplot(data = time_by_tier, aes(x = tier, y=round(gameDuration/60), group=tier, fill=tier)) +
geom_boxplot() +
labs(x = "Skill Tier", y = "Match Duration in Minutes") +
scale_x_discrete(limits=tiers_order) +
scale_y_continuous(breaks=seq(0,maxDuration, 5)) +
theme_bw() +
theme(legend.position = "None",
title=element_text(size=20),
legend.title = element_text(size=16),
axis.title = element_text(size=12)) +
scale_fill_manual("Skill Tier", values=c("CHALLENGER"="orange",
"MASTER"="#966F33",
"DIAMOND"="#cbe3f0",
"PLATINUM"="#A0BFB4",
"GOLD"="#e6c200",
"SILVER"="#c0c0c0",
"BRONZE"="#cd7f32",
"UNRANKED"="black"))
p
# time by tier and region
time_by_tier_region <- data.table(dbGetQuery(connection, "SELECT platformId, tier, patch, gameDuration, gameCreation from playerdetails"))
time_by_tier_region %>%
group_by(tier, platformId) %>%
summarise(minDuration = min(gameDuration/60),
"25qDuration" = quantile(gameDuration/60 , .25),
meanDuration = mean(gameDuration/60),
"75Duration" = quantile(gameDuration/60 , .75),
maxDuration = max(gameDuration/60)
) -> time_by_tier_agg
time_by_tier_region$clr = ifelse(time_by_tier_region$gameDuration %in% c(1200,1260), "red", "white")
p <- ggplot(data = time_by_tier_region, aes(x = tier, y=round(gameDuration/60), group=tier, fill=tier)) +
geom_boxplot() +
facet_wrap(~platformId, nrow=2) +
labs(x = "Skill Tier", y = "Match Duration") +
scale_x_discrete(limits=tiers_order) +
scale_y_continuous(breaks=seq(0,maxDuration, 10)) +
theme_bw() +
theme(legend.position = "None",
title=element_text(size=20),
legend.title = element_text(size=16),
axis.title = element_text(size=12),
axis.text.x = element_blank()) +
scale_fill_manual("Skill Tier", values=c("CHALLENGER"="orange",
"MASTER"="#966F33",
"DIAMOND"="#cbe3f0",
"PLATINUM"="#A0BFB4",
"GOLD"="#e6c200",
"SILVER"="#c0c0c0",
"BRONZE"="#cd7f32",
"UNRANKED"="black"))
p
p <- ggplot(data = time_by_tier_region, aes(round(gameDuration/60),
group=factor(tier, levels=tiers_order),
fill=factor(tier, levels=tiers_order))) +
geom_histogram(aes(y=..count../1000), breaks = c(seq(0,maxDuration, by=1))) +
scale_x_continuous(breaks=seq(0,maxDuration,10)) +
facet_wrap(~factor(tier, levels=tiers_order), ncol=2) +
labs(x = "Skill Tier", y = "Match Duration") +
theme_bw() +
theme(legend.position = "None",
axis.title = element_text(size=12),
axis.text.x = element_text(size=6)) +
scale_fill_manual("Skill Tier", values=c("CHALLENGER"="orange",
"MASTER"="#966F33",
"DIAMOND"="#cbe3f0",
"PLATINUM"="#A0BFB4",
"GOLD"="#e6c200",
"SILVER"="#c0c0c0",
"BRONZE"="#cd7f32",
"UNRANKED"="black"))
p
# time by patch
p <- ggplot(data = time_by_tier, aes(x=round(gameDuration/60))) +
geom_histogram() +
theme_bw() +
facet_wrap(~factor(patch, levels=patchOrder))
p
time_by_tier %>%
group_by(patch) %>%
summarise(count = sum(count))
## game Creation
time_info$gameDates <- as.Date(as.POSIXct(time_info$gameCreation/1000, origin="1970-01-01"))
minDate <- min(time_info$gameDates)
maxDate <- max(time_info$gameDates)
p <- ggplot(data = time_info, aes(gameDates)) +
geom_histogram(breaks = c(seq(as.numeric(minDate),as.numeric(maxDate),by=1))) +
scale_x_date(date_labels = "%b %d", breaks = date_breaks("1 months"), date_minor_breaks = "1 day") +
labs(x = "Match Date", y = "#matches",
title = "Distribution of Matches played through the year") +
theme_bw() +
theme(title=element_text(size=20),
legend.title = element_text(size=16),
axis.title = element_text(size=12),
axis.text.x = element_text(angle = 90)) +
scale_fill_brewer(palette = "Blues")
p
time_info$patch2 <- factor(time_info$patch, levels = patchOrder)
time_info %>%
group_by(gameDates, patch) %>%
summarise(Frequency = n()) -> time_info_patch
time_info %>%
group_by(gameDates, patch, platformId) %>%
summarise(Frequency = n()) -> time_info_regionPatch
p <- ggplot(data = time_info_patch, aes(gameDates, Frequency, group=patch)) +
facet_grid(.~factor(time_info_patch$patch, levels = patchOrder),scale="free", space="free", switch = "x") +
geom_area(breaks = c(seq(as.numeric(minDate),as.numeric(maxDate),by=1))) +
scale_x_date(date_labels = "%b %d", breaks = date_breaks("22 days"), date_minor_breaks = "1 day", expand = c(0,0)) +
labs(x = "Match Date and Patch", y = "#matches",
title = "Distribution of Matches played through the year") +
theme_bw() +
theme(title=element_text(size=20),
legend.title = element_text(size=16),
axis.title = element_text(size=12),
axis.text.x = element_text(size= 16, angle = 90),
axis.text.y = element_text(size= 16),
panel.spacing.x = unit(0,"line"),
strip.placement = "outside") +
scale_fill_manual(values = c("blue"))
p
p <- ggplot(data = time_info_regionPatch, aes(gameDates, Frequency, group=platformId, fill=platformId)) +
facet_grid(.~factor(time_info_regionPatch$patch, levels = patchOrder),scale="free", space="free", switch = "x") +
geom_area(position='identity', alpha=0.5, breaks = c(seq(as.numeric(minDate),as.numeric(maxDate),by=1))) +
scale_x_date(date_labels = "%b %d", breaks = date_breaks("22 days"), date_minor_breaks = "1 day", expand = c(0,0)) +
labs(x = "Match Date and Patch", y = "#matches",
title = "Distribution of Matches played through the year") +
theme_bw() +
theme(title=element_text(size=20),
legend.title = element_text(size=16),
axis.title = element_text(size=12),
axis.text.x = element_text(size= 16, angle = 90),
axis.text.y = element_text(size= 16),
panel.spacing.x = unit(0,"line"),
strip.placement = "outside") #+
#scale_fill_manual(values = c("blue"))
p
p <- ggplot(data = time_info_patch, aes(gameDates, Frequency, group=patch)) +
facet_grid(.~factor(time_info_patch$patch, levels = patchOrder),scale="free", space="free", switch = "x") +
geom_area(breaks = c(seq(as.numeric(minDate),as.numeric(maxDate),by=1))) +
scale_x_date(date_labels = "%b %d", breaks = date_breaks("22 days"), date_minor_breaks = "1 day", expand = c(0,0)) +
labs(x = "Match Date and Patch", y = "#matches",
title = "Distribution of Matches played through the year") +
theme_bw() +
theme(title=element_text(size=20),
legend.title = element_text(size=16),
axis.title = element_text(size=12),
axis.text.x = element_text(size= 16, angle = 90),
axis.text.y = element_text(size= 16),
panel.spacing.x = unit(0,"line"),
strip.placement = "outside") +
scale_fill_manual(values = c("blue"))
p
time_info_patch2 <- time_info_regionPatch#[which(time_info_regionPatch$patch=="7.2"),]
p <- ggplot(data = time_info_patch2, aes(gameDates, Frequency/1000, group=platformId, fill=platformId)) +
facet_wrap(~factor(time_info_patch2$patch, levels = patchOrder),scale="free", nrow=4) +
geom_area(alpha=0.6) +
scale_x_date(date_labels = "%a %m-%d", breaks = date_breaks("7 days"), date_minor_breaks = "1 day", expand = c(0,0)) +
scale_y_continuous(limits = c(0, 4)) +
scale_fill_discrete(name = "Region", labels = c("Brazil", "Europe West", "Korea", "North America")) +
labs(x = "Match Date and Patch", y = "#matches",
title = "Distribution of Matches played through season 2017 in thousands") +
#facet_wrap(~factor(time_info_patch2$patch, levels = patchOrder), nrow=4) +
theme_bw() +
theme(title=element_text(size=20),
legend.title = element_text(size=16),
axis.title = element_text(size=12),
axis.text.x = element_text(size=6),
axis.text.y = element_text(size=8),
panel.spacing.x = unit(1,"line"),
strip.placement = "outside")
p
time_info_tier <- adc[,list(platformId, patch, tier, gameCreation)]
time_info_tier$gameDates <- as.Date(as.POSIXct(time_info_tier$gameCreation/1000, origin="1970-01-01"))
time_info_tier$order <- factor(time_info_tier$tier, levels = tiers_order)
time_info_tier$patch2 <- factor(time_info_tier$patch, levels = patchOrder)
time_info_tier %>%
group_by(gameDates, patch2, order) %>%
summarise(Frequency = n()) -> time_info_tier
p <- ggplot(data = time_info_tier, aes(gameDates, Frequency, group=order, fill=order)) +
#facet_grid(.~factor(time_info_patch$patch, levels = patchOrder),scale="free", space="free", switch = "x") +
geom_area() +
scale_x_date(date_labels = "%a %b %d", breaks = date_breaks("4 days"), date_minor_breaks = "1 day", expand = c(0,0), position="top") +
labs(x = "Match Date and Patch", y = "#matches",
title = "Distribution of Matches played through the year") +
theme_bw() +
theme(title=element_text(size=20),
legend.title = element_text(size=16),
axis.title = element_text(size=12),
axis.text.x = element_text(size= 16, angle = 90),
axis.text.y = element_text(size= 16),
panel.spacing.x = unit(0,"line"),
strip.placement = "outside") +
scale_fill_manual("Skill Tier", values=c("CHALLENGER"="orange",
"MASTER"="#966F33",
"DIAMOND"="#cbe3f0",
"PLATINUM"="#A0BFB4",
"GOLD"="#e6c200",
"SILVER"="#c0c0c0",
"BRONZE"="#cd7f32",
"UNRANKED"="black"))
p
############################ TOP ####################################
### Distribution in Season 7
p <- ggplot(data=top.distribution,aes(x=names, y=gamesPlayed)) +
geom_bar(stat='identity') +
scale_x_discrete(limits=top.distribution[order(by=gamesPlayed,decreasing = T)]$names) +
theme(axis.text.x = element_text(angle = 90, hjust = 1), axis.text.y = element_text(size=6))
p
p <- list()
for(i in 1:length(patchOrder)){
df= top.distribution.patch[patch==patchOrder[i]][order(by=gamesPlayed,decreasing = T)][1:20]
p[[i]] <- ggplot(df, aes(x=name, y=gamesPlayed)) +
geom_bar(stat = "identity") +
scale_x_discrete(limits=df[order(by=gamesPlayed,decreasing = T)]$name) +
scale_y_continuous(limits=c(0,2000)) +
theme(axis.text.x = element_text(angle = 90, hjust = 1), axis.text.y = element_text(size=6)) +
ggtitle("Patch",paste0(patchOrder[i]))
}
do.call("grid.arrange", c(p, ncol= 5))
p <- ggplot(data=top.distribution.patch,aes(x=names, y=gamesPlayed)) +
geom_bar(stat='identity') +
scale_x_discrete(limits=top.distribution[order(by=gamesPlayed,decreasing = T)]$names) +
theme(axis.text.x = element_text(angle = 90, hjust = 1), axis.text.y = element_text(size=6)) +
facet_wrap(.~patch, scales="free")
p
p <- ggplot(data=merge(top[,c("championId", "patch")], champLookUp, "championId") , aes(names)) +
geom_bar() + facet_wrap(~ patch, scales="free")
p
####PER PATCH AND REGION
p <- ggplot(data = top.relevant) +
geom_bar(aes(x=as.vector(champ[as.character(top.relevant$championId)]), fill = championId)) +
facet_wrap( ~ platformId+factor(patch, levels = patchOrder), nrow=4) +
theme(axis.text.x = element_text(angle = 90, hjust = 1), axis.text.y = element_text(size=6)) +
coord_flip() +
guides(fill=F)
p+ ggtitle("Toplane Picks per Patch and Region")
p <- ggplot(data=top.distribution,aes(x=names, y=gamesPlayed)) +
geom_bar(stat='identity') +
scale_x_discrete(limits=top.distribution[order(by=gamesPlayed,decreasing = T)]$names) +
theme(axis.text.x = element_text(angle = 90, hjust = 1), axis.text.y = element_text(size=6))
p
#PER CHAMP
p <- ggplot(data = top.relevant, aes(x = patch, group=platformId, color=platformId)) +
geom_line(stat = "count") +
scale_x_discrete(limits=patchOrder) +
facet_wrap(~ championId, ncol = 5) +
theme_igray() + scale_colour_tableau("colorblind10")
p
p <- ggplot(data = top.relevant[, championId == 39], aes(x = patch, group=platformId, color=platformId)) +
geom_line(stat = "count") +
geom_line(y = mean(win) ) +
scale_x_discrete(limits=patchOrder) +
facet_wrap(~ championId, ncol = 5) +
theme_dark() +
theme(plot.background = element_rect("grey"))
p
p <- ggplot(data = top.performance, aes(x = patch, y=winrate, group=platformId, color=platformId)) +
geom_line(linetype = 2) +
geom_line(data = top.performance , aes(y = games/2000)) +
scale_x_discrete(limits=patchOrder) +
facet_wrap(~ name, ncol = 5) +
theme_dark() +
theme(plot.background = element_rect("grey"))
p + ggtitle("top Picks per Patch and Region")
########################### JUNGLE ##################################
#overall distribution in season 7
p <- ggplot(data=jungle.distribution,aes(x=names, y=gamesPlayed)) +
geom_bar(stat='identity') +
scale_x_discrete(limits=jungle.distribution[order(by=gamesPlayed,decreasing = T)]$names) +
theme(axis.text.x = element_text(angle = 90, hjust = 1), axis.text.y = element_text(size=6))
p
#PER PATCH AND REGION
p <- ggplot(data = jungle.relevant) +
geom_bar(aes(x=as.vector(champ[as.character(jungle.relevant$championId)]), fill = championId)) +
facet_wrap( ~ platformId+factor(patch, levels = patchOrder), nrow=4) +
theme(axis.text.x = element_text(angle = 90, hjust = 1), axis.text.y = element_text(size=6)) +
coord_flip() +
guides(fill=F)
p+ ggtitle("Jungle Picks per Patch and Region")
p <- ggplot(data = jungle.performance, aes(x = patch, y=winrate, group=platformId, color=platformId)) +
geom_line(linetype = 2) +
geom_line(data = jungle.performance , aes(y = games/2000)) +
scale_x_discrete(limits=patchOrder) +
facet_wrap(~ name, ncol = 5) +
theme_dark() +
theme(plot.background = element_rect("grey"))
p + ggtitle("jungle Picks per Patch and Region")
########################### MID ##################################
#overall distribution in season 7
p <- ggplot(data=mid.distribution,aes(x=names, y=gamesPlayed)) +
geom_bar(stat='identity') +
scale_x_discrete(limits=mid.distribution[order(by=gamesPlayed,decreasing = T)]$names) +
theme(axis.text.x = element_text(angle = 90, hjust = 1), axis.text.y = element_text(size=6))
p
#PER PATCH AND REGION
p <- ggplot(data = mid.relevant) +
geom_bar(aes(x=as.vector(champ[as.character(mid.relevant$championId)]), fill = championId)) +
facet_wrap( ~ platformId+factor(patch, levels = patchOrder), nrow=4) +
theme(axis.text.x = element_text(angle = 90, hjust = 1), axis.text.y = element_text(size=6)) +
coord_flip() +
guides(fill=F)
p+ ggtitle("Jungle Picks per Patch and Region")
p <- ggplot(data = mid.performance, aes(x = patch, y=winrate, group=platformId, color=platformId)) +
geom_line(linetype = 2) +
geom_line(data = mid.performance , aes(y = games/2000)) +
scale_x_discrete(limits=patchOrder) +
facet_wrap(~ name, ncol = 5) +
theme_dark() +
theme(plot.background = element_rect("grey"))
p + ggtitle("Mid Lane Picks per Patch and Region")
############################ ADC ####################################
####DISTRIBUTIONS OF CHAMPIONS
#overall distribution in season 7
p <- ggplot(data=adc.distribution,aes(x=names, y=gamesPlayed)) +
geom_bar(stat='identity') +
scale_x_discrete(limits=adc.distribution[order(by=gamesPlayed,decreasing = T)]$names) +
theme(axis.text.x = element_text(angle = 90, hjust = 1), axis.text.y = element_text(size=6))
p
#Barchart
p <- ggplot(data = adc.performance, aes(x=name, y=games/2000 *100, fill = name)) +
geom_bar(stat="Identity") +
facet_wrap( ~ platformId+factor(patch, levels = patchOrder), nrow=4) +
theme(axis.text.x = element_text(angle = 90, hjust = 1), axis.text.y = element_text(size=6)) +
labs(x = "Champion", y = "Playrate in Percentages") +
coord_flip() +
guides(fill=F)
p+ ggtitle("ADC Picks per Patch and Region")
#Linechart
p <- ggplot(data = adc.performance[championId %in% relchamps.adc], aes(x = patch, y=games/10000 * 100, group=platformId, color=platformId)) +
geom_line(linetype = 1) +
#geom_line(data = adc.performance , aes(y = winrate), linetype = 2) +
scale_x_discrete(limits=patchOrder) +
theme(axis.text.x = element_text(size=5)) +
labs(x = "Patch", y = "Playrate in Percentage") +
facet_wrap(~ name, ncol = 4)
p + ggtitle("ADC Picks per Patch and Region")
p <- list()
for(i in 1:length(patchOrder)){
df= adc.distribution.patch[patch==patchOrder[i]][order(by=gamesPlayed,decreasing = T)][1:20]
p[[i]] <- ggplot(df, aes(x=name, y=gamesPlayed)) +
geom_bar(stat = "identity") +
scale_x_discrete(limits=df[order(by=gamesPlayed,decreasing = T)]$name) +
scale_y_continuous(limits=c(0,5000)) +
theme(axis.text.x = element_text(angle = 90, hjust = 1), axis.text.y = element_text(size=6)) +
ggtitle("Patch",paste0(patchOrder[i]))
}
do.call("grid.arrange", c(p, ncol= 5))
adc.set1 <- c("Ashe","Caitlyn", "Draven", "Ezreal", "Kog'Maw", "Lucian", "Jhin", "Tristana", "Vayne", "Varus", "Xayah")
adc.set <- adc.set1
df <- items.adc[championName %in% adc.set[1:4]][, list(count=.N), by = c("championName", "patch", "itemName")]
setkeyv(df, c("championName", "patch"))
championCount <- items.adc[championName %in% adc.set,list(championName, patch)][,list(gamesPlayed = .N), by = c("championName", "patch")]
setkeyv(championCount, c("championName", "patch"))
df <- merge(df, championCount, by= c("championName","patch"))
df$perc <- df$count/df$gamesPlayed
p <- ggplot(data = df) +
geom_bar(stat= "identity", aes(x=itemName, y=perc)) +
facet_grid(championName ~ factor(patch, levels = patchOrder)) +
theme(axis.text.x = element_text(angle = 90, hjust = 1, size = 6), axis.text.y = element_text(size=6)) +
scale_x_discrete(limits=relItems.ADC) +
coord_flip() +
guides(fill=FALSE)
p
#nur ne Idee, aber da erkennt leider keiner etwas.
p <- ggplot(data = df[itemName %in% relItems.ADC],aes(x=factor(patch, levels = patchOrder), fill = factor(itemName))) +
geom_bar(width=0.9, position="fill") +
facet_grid(. ~ championName) +
theme_igray() + scale_fill_manual(values = colorRampPalette(brewer.pal(8, "Accent"))(length(relItems.ADC)),
guide = guide_legend(nrow=2)) +
theme(legend.position="bottom")
p + coord_polar(theta = "y")
### attempt to illustrate specific traits of adcs
# OVERALL
dfprep = adc.performance.patch %>%
mutate(DPS = totalDamageToChampions/gameDuration) %>%
select(
name, patch,
games,
summoners,
winrate,
DPS,
DmgDealt = totalDamageToChampions,
kills,
assists,
deaths,
DmgTaken = totalDamageTaken,
cs = csPerGame,
gold = goldEarned
)
dfprep = data.table(dfprep)
df = dfprep %>%
rownames_to_column( var = "champ" ) %>%
mutate_each(funs(rescale), -c(champ,name,patch)) %>%
melt(id.vars=c('champ','name','patch'), measure.vars=colnames(dfprep[,-c("name","patch")])) %>%
arrange(champ)
df = data.table(df)
#radar charts: better filter out some champs
df[name %in% c(adc.set1,adc.set2)] %>%
ggplot(aes(x=variable, y=value, group=name, color=name)) +
geom_polygon()
geom_polygon(fill=NA) +
coord_radar() + theme_bw() + facet_grid(name~factor(patch, levels=patchOrder)) +
#scale_x_discrete(labels = abbreviate) +
theme(axis.text.x = element_text(size = 5), legend.position="none")
#bar chart perspective
df %>%
ggplot(aes(x=variable, y=value, group= name, fill = name)) +
geom_bar(stat="identity") +
geom_line(y = 0.5, linetype =2, color = "black") +
facet_grid(factor(patch, levels=patchOrder)~name) +
coord_flip() +
theme_igray() + scale_fill_manual(values = colorRampPalette(brewer.pal(8, "Accent"))(length(unique(df$name)))) +
theme(axis.text.y = element_text(size = 5), legend.position="none")
dfprep %>%
mutate_each(funs(rescale), -c(name, patch)) %>% data.table() -> df_radar
p <- list()
for(j in 1:length(patchOrder))
p[[j]] <- ggRadar(df_radar[name %in% adc.set & patch == patchOrder[j]], aes(color=name), rescale = F, ylim=c(0,1))
do.call("grid.arrange", c(p, nrow = 4 ))
## attempt to scale into 3d and illustrate
adc.performance.patch %>%
rownames_to_column(var="id") -> adc.cmd
adc.cmd.numerics = adc.cmd[,-c("name", "championId", "lane", "role", "patch")]
setkey(adc.cmd.numerics, "id")
rownames(adc.cmd.numerics) = adc.cmd.numerics$id
adc.cmd.dist <- dist(adc.cmd.numerics[,-1])
colnames(adc.cmd.dist) = rownames
fit <- cmdscale(adc.cmd.dist, k = 3)
data.table(fit) %>%
rownames_to_column(var = "id") %>%
merge(adc.cmd[,c("id","name", "patch")], by="id") -> fit2
fit2$detailedName = paste0(fit2$name, " ", fit2$patch)
kmeans3 = kmeans(x = fit2[,2:4], centers = 3)
kmeans4 = kmeans(x = fit2[,2:4], centers = 4)
kmeans5 = kmeans(x = fit2[,2:4], centers = 5)
kmeans6 = kmeans(x = fit2[,2:4], centers = 6)
fit2$cluster3 = kmeans3$cluster
fit2$cluster4 = kmeans4$cluster
fit2$cluster5 = kmeans5$cluster
fit2$cluster6 = kmeans6$cluster
plot3d(fit2[,2:4], size = 10, col = fit2$cluster6)
text3d(fit2[,2:4], texts = fit2$detailedName, size=2)
## GAME DURATION, DPS AND DAMAGE DONE
# duration of games where champions participated
adc.performance.patch[championId %in% relchamps.adc] %>%
ggplot(aes(y = gameDuration, x = patch, group=name)) +
geom_line() +
scale_x_discrete(limits=patchOrder) +
facet_wrap(~name, ncol = 4)
# damage done to champions
adc.performance.patch[championId %in% relchamps.adc] %>%
ggplot(aes(y = totalDamageToChampions, x = patch, group=name)) +
geom_line() +
scale_x_discrete(limits=patchOrder) +
facet_wrap(~name, ncol = 4)
# dps
adc.performance.patch[championId %in% relchamps.adc] %>%
ggplot(aes(y = totalDamageToChampions/(gameDuration/60), x = patch, group=name)) +
geom_line() +
scale_x_discrete(limits=patchOrder) +
facet_wrap(~name, ncol = 4)
# dps by win/loss
df = adc.performance.patch.win[championId %in% relchamps.adc]
ggplot(df,aes(y = totalDamageToChampions/(gameDuration/60), x = patch, group=as.factor(win), color = as.factor(win))) +
geom_line() +
scale_x_discrete(limits=patchOrder) +
facet_wrap(~name, ncol = 4)
## gold earned and gold per min
adc.performance.patch[championId %in% relchamps.adc] %>%
ggplot(aes(y = goldEarned, x = patch, group=name)) +
geom_line() +
scale_x_discrete(limits=patchOrder) +
facet_wrap(~name, ncol = 4)
adc.performance.patch[championId %in% relchamps.adc] %>%
ggplot(aes(y = goldEarned/(gameDuration/60), x = patch, group=name)) +
geom_line() +
scale_x_discrete(limits=patchOrder) +
facet_wrap(~name, ncol = 4)
# win and loss
df = adc.performance.patch.win[championId %in% relchamps.adc]
ggplot(df,aes(y = goldEarned/(gameDuration/60), x = patch, group=as.factor(win), color = as.factor(win))) +
geom_line() +
scale_x_discrete(limits=patchOrder) +
facet_wrap(~name, ncol = 4)
### cs and xp development
#cs diffs
df = adc.performance.patch[championId %in% relchamps.adc, list(name, patch, csDiffPerMinTen, csDiffPerMinTwenty, csDiffPerMinThirty)]
melt(df, id.vars=c('name','patch'), measure.vars=colnames(df[,-c("name","patch")])) %>%
ggplot(aes(y = value, x = variable, group = name)) +
geom_line() + scale_x_discrete(labels=c("10","20","30")) +
facet_grid(name~factor(patch, levels = patchOrder)) +
ggtitle("Creep Score Differential for each Champion and Patch")
#xp Diffs
df = adc.performance.patch[championId %in% relchamps.adc, list(name, patch, xpDiffPerMinTen, xpDiffPerMinTwenty, xpDiffPerMinThirty)]
melt(df, id.vars=c('name','patch'), measure.vars=colnames(df[,-c("name","patch")])) %>%
ggplot(aes(y = value, x = variable, group = name)) +
geom_line() + scale_x_discrete(labels=c("10","20","30")) +
facet_grid(name~factor(patch, levels = patchOrder)) +
ggtitle("Experience Differential for each Champion and Patch")
# cs deltas
df = adc.performance.patch[championId %in% relchamps.adc, list(name, patch, csPerMinDeltaTen, csPerMinDeltaTwenty, csPerMinDeltaThirty)]
melt(df, id.vars=c('name','patch'), measure.vars=colnames(df[,-c("name","patch")])) %>%
ggplot(aes(y = value, x = variable, group = name)) +
geom_line() + scale_x_discrete(labels=c("0-10","10-20","20-30")) +
facet_grid(name~factor(patch, levels = patchOrder)) +
ggtitle("Creep Score Per Minute Delta for each Champion and Patch")
## first blood
adc.performance.patch[championId %in% relchamps.adc] %>%
ggplot(aes(y = firstBloodKill + firstBloodAssist, x = patch, group=name)) +
geom_line() +
scale_x_discrete(limits=patchOrder) +
facet_wrap(~name, ncol = 4)
adc.performance.patch.win[championId %in% relchamps.adc] %>%
ggplot(aes(y = firstBloodKill + firstBloodAssist, x = patch, group=as.factor(win), color=as.factor(win))) +
geom_line() +
scale_x_discrete(limits=patchOrder) +
facet_wrap(~name, ncol = 4)
# first tower
adc.performance.patch[championId %in% relchamps.adc] %>%
ggplot(aes(y = firstTowerKill + firstTowerAssist, x = patch, group=name)) +
geom_line() +
scale_x_discrete(limits=patchOrder) +
facet_wrap(~name, ncol = 4)
#first inhib
adc.performance.patch[championId %in% relchamps.adc] %>%
ggplot(aes(y = firstInhibitorKill + firstInhibitorAssist, x = patch, group=name)) +
geom_line() +
scale_x_discrete(limits=patchOrder) +
facet_wrap(~name, ncol = 4)
## winrate by game duration
df = adc.performance.patch[,list(name, patch, winrate, winrateAt25, winrateAt30, winrateAt35, winrateAt40, winrateAt45, winrateOver45)]
df1 = df %>%
rownames_to_column(var = "champ") %>%
melt(id.vars=c('champ', 'name', 'patch'), measure.vars = colnames(df[,-c("name", "patch")]))
df1$variableNew <- c(0,25,30,35,40,45,50)[match(df1$variable, c("winrate", "winrateAt25", "winrateAt30", "winrateAt35", "winrateAt40","winrateAt45", "winrateOver45"))]
winrate_scale = c("winrateAt25", "winrateAt30", "winrateAt35", "winrateAt40","winrateAt45", "winrateOver45")
p = ggplot(data = df1[variableNew!=0], aes(x = variableNew, y=value, color = name)) +
geom_point() +
geom_smooth(method="auto") +
facet_grid(name ~ factor(patch, levels = patchOrder)) +
scale_y_continuous(limits=c(0,1)) +
scale_x_continuous(limits = c(20,55), breaks = seq(25,50,5), labels=c( "25", "30", "35" ,"40", "45", ">45")) +
theme(legend.position="none")
p
############################ SUP ####################################
#overall distribution in season 7
p <- ggplot(data=sup.distribution,aes(x=names, y=gamesPlayed)) +
geom_bar(stat='identity') +
scale_x_discrete(limits=sup.distribution[order(by=gamesPlayed,decreasing = T)]$names) +
theme(axis.text.x = element_text(angle = 90, hjust = 1), axis.text.y = element_text(size=6))
p
#Barchart
p <- ggplot(data = sup.performance, aes(x=name, y=games/2000 *100, fill = name)) +
geom_bar(stat="Identity") +
facet_wrap( ~ platformId+factor(patch, levels = patchOrder), nrow=4) +
theme(axis.text.x = element_text(angle = 90, hjust = 1), axis.text.y = element_text(size=6)) +
labs(x = "Champion", y = "Playrate in Percentages") +
coord_flip() +
guides(fill=F)
p+ ggtitle("Support Picks per Patch and Region")
#Linechart
p <- ggplot(data = sup.performance[championId %in% relchamps.sup], aes(x = patch, y=games/2000 * 100, group=platformId, color=platformId)) +
geom_line(linetype = 1) +
#geom_line(data = adc.performance , aes(y = winrate), linetype = 2) +
scale_x_discrete(limits=patchOrder) +
theme(axis.text.x = element_text(size=5)) +
labs(x = "Patch", y = "Playrate in Percentage") +
facet_wrap(~ name, ncol = 4)
p + ggtitle("Support Picks per Patch and Region")
#botlane sup and adc combined:
botlane %>%
filter(sup.Id %in% relchamps.sup) %>%
group_by(ad,sup,patch) %>%
summarise(count =n()) %>%
left_join(botlane %>% group_by(ad, patch) %>% summarise(adCount = n())) %>%
mutate(perc = count/adCount) %>%
ggplot(aes(x = sup, y = perc)) + geom_bar(stat="identity") +
theme(axis.text.x = element_text(angle = 90, hjust = 1), axis.text.y = element_text(size=6)) +
coord_flip() +
facet_grid(ad~factor(patch, levels=patchOrder))
###############################################################################
###############################################################################
############################ PRO GAMES #######################################
###############################################################################
###############################################################################
###adc
####DISTRIBUTIONS OF CHAMPIONS
#Barchart
majorRegions = c("TRLH1", #naLCS
"TRLH3", #euLCS
"TRTW", #lms
"ESPORTSTMNT06", #lck
"ESPORTSTMNT03" #cbLoL and TCL
)
p <- ggplot(data = adc.pro.performance[platformId %in% majorRegions], aes(x=name, y=playrate *100, fill = name)) +
geom_bar(stat="Identity") +
facet_wrap( ~ platformId+factor(patch, levels = patchOrder), nrow=5) +
theme(axis.text.x = element_text(angle = 90, hjust = 1), axis.text.y = element_text(size=6)) +
labs(x = "Champion", y = "Playrate in Percentages") +
coord_flip() +
guides(fill=F)
p+ ggtitle("ADC Picks per Patch and Region")
#Linechart
p <- ggplot(data = adc.pro.performance[platformId %in% majorRegions], aes(x = patch, y=playrate * 100, group=platformId, color=platformId)) +
geom_line(linetype = 1) +
#geom_line(data = adc.performance , aes(y = winrate), linetype = 2) +
scale_x_discrete(limits=patchOrder) +
theme(axis.text.x = element_text(size=5)) +
labs(x = "Patch", y = "Playrate in Percentage") +
facet_wrap(~ name, ncol = 4)
p + ggtitle("ADC Picks per Patch and Region")
p <- list()
for(i in 1:length(patchOrder)){
df= adc.distribution.patch[patch==patchOrder[i]][order(by=gamesPlayed,decreasing = T)][1:20]
p[[i]] <- ggplot(df, aes(x=name, y=gamesPlayed)) +
geom_bar(stat = "identity") +
scale_x_discrete(limits=df[order(by=gamesPlayed,decreasing = T)]$name) +
scale_y_continuous(limits=c(0,5000)) +
theme(axis.text.x = element_text(angle = 90, hjust = 1), axis.text.y = element_text(size=6)) +
ggtitle("Patch",paste0(patchOrder[i]))
}
do.call("grid.arrange", c(p, ncol= 5))
adc.set1 <- c("Ashe","Caitlyn", "Draven", "Ezreal", "Kog'Maw", "Lucian", "Jhin", "Tristana", "Vayne", "Varus", "Xayah")
adc.set <- adc.set1
df <- items.adc[championName %in% adc.set[1:4]][, list(count=.N), by = c("championName", "patch", "itemName")]
setkeyv(df, c("championName", "patch"))
championCount <- items.adc[championName %in% adc.set,list(championName, patch)][,list(gamesPlayed = .N), by = c("championName", "patch")]
setkeyv(championCount, c("championName", "patch"))
df <- merge(df, championCount, by= c("championName","patch"))
df$perc <- df$count/df$gamesPlayed
p <- ggplot(data = df) +
geom_bar(stat= "identity", aes(x=itemName, y=perc)) +
facet_grid(championName ~ factor(patch, levels = patchOrder)) +
theme(axis.text.x = element_text(angle = 90, hjust = 1, size = 6), axis.text.y = element_text(size=6)) +
scale_x_discrete(limits=relItems.ADC) +
coord_flip() +
guides(fill=FALSE)
p
#nur ne Idee, aber da erkennt leider keiner etwas.
p <- ggplot(data = df[itemName %in% relItems.ADC],aes(x=factor(patch, levels = patchOrder), fill = factor(itemName))) +
geom_bar(width=0.9, position="fill") +
facet_grid(. ~ championName) +
theme_igray() + scale_fill_manual(values = colorRampPalette(brewer.pal(8, "Accent"))(length(relItems.ADC)),
guide = guide_legend(nrow=2)) +
theme(legend.position="bottom")
p + coord_polar(theta = "y")
### attempt to illustrate specific traits of adcs
# OVERALL
dfprep = adc.performance.patch %>%
mutate(DPS = totalDamageToChampions/gameDuration) %>%
select(
name, patch,
games,
summoners,
winrate,
DPS,
DmgDealt = totalDamageToChampions,
kills,
assists,
deaths,
DmgTaken = totalDamageTaken,
cs = csPerGame,
gold = goldEarned
)
dfprep = data.table(dfprep)
df = dfprep %>%
rownames_to_column( var = "champ" ) %>%
mutate_each(funs(rescale), -c(champ,name,patch)) %>%
melt(id.vars=c('champ','name','patch'), measure.vars=colnames(dfprep[,-c("name","patch")])) %>%
arrange(champ)
df = data.table(df)
#radar charts: better filter out some champs
df[name %in% c(adc.set1,adc.set2)] %>%
ggplot(aes(x=variable, y=value, group=name, color=name)) +
geom_polygon()
geom_polygon(fill=NA) +
coord_radar() + theme_bw() + facet_grid(name~factor(patch, levels=patchOrder)) +
#scale_x_discrete(labels = abbreviate) +
theme(axis.text.x = element_text(size = 5), legend.position="none")
#bar chart perspective
df %>%
ggplot(aes(x=variable, y=value, group= name, fill = name)) +
geom_bar(stat="identity") +
geom_line(y = 0.5, linetype =2, color = "black") +
facet_grid(factor(patch, levels=patchOrder)~name) +
coord_flip() +
theme_igray() + scale_fill_manual(values = colorRampPalette(brewer.pal(8, "Accent"))(length(unique(df$name)))) +
theme(axis.text.y = element_text(size = 5), legend.position="none")
dfprep %>%
mutate_each(funs(rescale), -c(name, patch)) %>% data.table() -> df_radar
p <- list()
for(j in 1:length(patchOrder))
p[[j]] <- ggRadar(df_radar[name %in% adc.set & patch == patchOrder[j]], aes(color=name), rescale = F, ylim=c(0,1))
do.call("grid.arrange", c(p, nrow = 4 ))
## attempt to scale into 3d and illustrate
adc.performance.patch %>%
rownames_to_column(var="id") -> adc.cmd
adc.cmd.numerics = adc.cmd[,-c("name", "championId", "lane", "role", "patch")]
setkey(adc.cmd.numerics, "id")
rownames(adc.cmd.numerics) = adc.cmd.numerics$id
adc.cmd.dist <- dist(adc.cmd.numerics[,-1])
colnames(adc.cmd.dist) = rownames
fit <- cmdscale(adc.cmd.dist, k = 3)
data.table(fit) %>%
rownames_to_column(var = "id") %>%
merge(adc.cmd[,c("id","name", "patch")], by="id") -> fit2
fit2$detailedName = paste0(fit2$name, " ", fit2$patch)
kmeans3 = kmeans(x = fit2[,2:4], centers = 3)
kmeans4 = kmeans(x = fit2[,2:4], centers = 4)
kmeans5 = kmeans(x = fit2[,2:4], centers = 5)
kmeans6 = kmeans(x = fit2[,2:4], centers = 6)
fit2$cluster3 = kmeans3$cluster
fit2$cluster4 = kmeans4$cluster
fit2$cluster5 = kmeans5$cluster
fit2$cluster6 = kmeans6$cluster
plot3d(fit2[,2:4], size = 10, col = fit2$cluster6)
text3d(fit2[,2:4], texts = fit2$detailedName, size=2)
## GAME DURATION, DPS AND DAMAGE DONE
# duration of games where champions participated
adc.performance.patch[championId %in% relchamps.adc] %>%
ggplot(aes(y = gameDuration, x = patch, group=name)) +
geom_line() +
scale_x_discrete(limits=patchOrder) +
facet_wrap(~name, ncol = 4)
# damage done to champions
adc.performance.patch[championId %in% relchamps.adc] %>%
ggplot(aes(y = totalDamageToChampions, x = patch, group=name)) +
geom_line() +
scale_x_discrete(limits=patchOrder) +
facet_wrap(~name, ncol = 4)
# dps
adc.performance.patch[championId %in% relchamps.adc] %>%
ggplot(aes(y = totalDamageToChampions/(gameDuration/60), x = patch, group=name)) +
geom_line() +
scale_x_discrete(limits=patchOrder) +
facet_wrap(~name, ncol = 4)
# dps by win/loss
df = adc.performance.patch.win[championId %in% relchamps.adc]
ggplot(df,aes(y = totalDamageToChampions/(gameDuration/60), x = patch, group=as.factor(win), color = as.factor(win))) +
geom_line() +
scale_x_discrete(limits=patchOrder) +
facet_wrap(~name, ncol = 4)
## gold earned and gold per min
adc.performance.patch[championId %in% relchamps.adc] %>%
ggplot(aes(y = goldEarned, x = patch, group=name)) +
geom_line() +
scale_x_discrete(limits=patchOrder) +
facet_wrap(~name, ncol = 4)
adc.performance.patch[championId %in% relchamps.adc] %>%
ggplot(aes(y = goldEarned/(gameDuration/60), x = patch, group=name)) +
geom_line() +
scale_x_discrete(limits=patchOrder) +
facet_wrap(~name, ncol = 4)
# win and loss
df = adc.performance.patch.win[championId %in% relchamps.adc]
ggplot(df,aes(y = goldEarned/(gameDuration/60), x = patch, group=as.factor(win), color = as.factor(win))) +
geom_line() +
scale_x_discrete(limits=patchOrder) +
facet_wrap(~name, ncol = 4)
### cs and xp development
#cs diffs
df = adc.performance.patch[championId %in% relchamps.adc, list(name, patch, csDiffPerMinTen, csDiffPerMinTwenty, csDiffPerMinThirty)]
melt(df, id.vars=c('name','patch'), measure.vars=colnames(df[,-c("name","patch")])) %>%
ggplot(aes(y = value, x = variable, group = name)) +
geom_line() + scale_x_discrete(labels=c("10","20","30")) +
facet_grid(name~factor(patch, levels = patchOrder)) +
ggtitle("Creep Score Differential for each Champion and Patch")
#xp Diffs
df = adc.performance.patch[championId %in% relchamps.adc, list(name, patch, xpDiffPerMinTen, xpDiffPerMinTwenty, xpDiffPerMinThirty)]
melt(df, id.vars=c('name','patch'), measure.vars=colnames(df[,-c("name","patch")])) %>%
ggplot(aes(y = value, x = variable, group = name)) +
geom_line() + scale_x_discrete(labels=c("10","20","30")) +
facet_grid(name~factor(patch, levels = patchOrder)) +
ggtitle("Experience Differential for each Champion and Patch")
# cs deltas
df = adc.performance.patch[championId %in% relchamps.adc, list(name, patch, csPerMinDeltaTen, csPerMinDeltaTwenty, csPerMinDeltaThirty)]
melt(df, id.vars=c('name','patch'), measure.vars=colnames(df[,-c("name","patch")])) %>%
ggplot(aes(y = value, x = variable, group = name)) +
geom_line() + scale_x_discrete(labels=c("0-10","10-20","20-30")) +
facet_grid(name~factor(patch, levels = patchOrder)) +
ggtitle("Creep Score Per Minute Delta for each Champion and Patch")
## first blood
adc.performance.patch[championId %in% relchamps.adc] %>%
ggplot(aes(y = firstBloodKill + firstBloodAssist, x = patch, group=name)) +
geom_line() +
scale_x_discrete(limits=patchOrder) +
facet_wrap(~name, ncol = 4)
adc.performance.patch.win[championId %in% relchamps.adc] %>%
ggplot(aes(y = firstBloodKill + firstBloodAssist, x = patch, group=as.factor(win), color=as.factor(win))) +
geom_line() +
scale_x_discrete(limits=patchOrder) +
facet_wrap(~name, ncol = 4)
# first tower
adc.performance.patch[championId %in% relchamps.adc] %>%
ggplot(aes(y = firstTowerKill + firstTowerAssist, x = patch, group=name)) +
geom_line() +
scale_x_discrete(limits=patchOrder) +
facet_wrap(~name, ncol = 4)
#first inhib
adc.performance.patch[championId %in% relchamps.adc] %>%
ggplot(aes(y = firstInhibitorKill + firstInhibitorAssist, x = patch, group=name)) +
geom_line() +
scale_x_discrete(limits=patchOrder) +
facet_wrap(~name, ncol = 4)
## winrate by game duration
df = adc.performance.patch[,list(name, patch, winrate, winrateAt25, winrateAt30, winrateAt35, winrateAt40, winrateAt45, winrateOver45)]
df1 = df %>%
rownames_to_column(var = "champ") %>%
melt(id.vars=c('champ', 'name', 'patch'), measure.vars = colnames(df[,-c("name", "patch")]))
df1$variableNew <- c(0,25,30,35,40,45,50)[match(df1$variable, c("winrate", "winrateAt25", "winrateAt30", "winrateAt35", "winrateAt40","winrateAt45", "winrateOver45"))]
winrate_scale = c("winrateAt25", "winrateAt30", "winrateAt35", "winrateAt40","winrateAt45", "winrateOver45")
p = ggplot(data = df1[variableNew!=0], aes(x = variableNew, y=value, color = name)) +
geom_point() +
geom_smooth(method="auto") +
facet_grid(name ~ factor(patch, levels = patchOrder)) +
scale_y_continuous(limits=c(0,1)) +
scale_x_continuous(limits = c(20,55), breaks = seq(25,50,5), labels=c( "25", "30", "35" ,"40", "45", ">45")) +
theme(legend.position="none")
p
|
/R-Project/viz.R
|
no_license
|
ShabdizGUni/MasterThesis
|
R
| false
| false
| 49,138
|
r
|
source("initialize.R")
###############################################################################
###############################################################################
############################ SOLO QUEUE #######################################
###############################################################################
###############################################################################
############################ GENERAL STATS ####################################
## Distribution of Skill Tiers:
tiers_agg <- data.table(tiers_stats)[,list(count = sum(count)), by=list(tier)]
p <- ggplot(data = tiers_agg, aes(x=tier, y=count/1000, fill=tier)) +
geom_bar(stat='Identity') +
labs(x ="Skill Tier", y = "#Players in thousands") +
scale_x_discrete(limits=(tiers_order)) +
theme_bw() +
theme(legend.position="none",
axis.text.x = element_text(size=8)) +
scale_fill_manual(values=c("CHALLENGER"="orange",
"MASTER"="#966F33",
"DIAMOND"="#cbe3f0",
"PLATINUM"="#A0BFB4",
"GOLD"="#e6c200",
"SILVER"="#c0c0c0",
"BRONZE"="#cd7f32",
"UNRANKED"="black"))
p
## with patch
#line
tiers_stats$order <- factor(tiers_stats$tier, levels = tiers_order)
tiers_stats <- tiers_stats[order(tiers_stats$order),]
p <- ggplot(data = tiers_stats, aes(x=patch, y=count, group=order, color=factor(tier, levels=tiers_order))) +
geom_line(stat='Identity') +
scale_x_discrete(limits=patchOrder)
p
#bar
p <- ggplot(data = tiers_stats, aes(x=patch, y=count/1000, group=factor(tier, levels=tiers_order), fill=factor(tier, levels=tiers_order))) +
geom_bar(stat='Identity') +
scale_x_discrete(limits=patchOrder) +
labs( y = "#players in thousands") +
theme(title=element_text(size=20),
legend.title = element_text(size=16),
axis.title = element_text(size=12)) +
scale_fill_manual("Skill Tier", values=c("CHALLENGER"="orange",
"MASTER"="#966F33",
"DIAMOND"="#cbe3f0",
"PLATINUM"="#A0BFB4",
"GOLD"="#e6c200",
"SILVER"="#c0c0c0",
"BRONZE"="#cd7f32",
"UNRANKED"="black"))
p
pop_tier_platform <- data.table(dbReadTable(connection, "population_region_tier"))
pop_tier_platform %>%
group_by(tier, platformId) %>%
summarise(count = sum(count)) -> ptp
ptp$order <- factor(ptp$tier, levels = tiers_order)
ptp <- ptp[order(ptp$order),]
p <- ggplot(data = ptp, aes(x=order, y=count/1000, group=order, fill=order)) +
geom_bar(stat='Identity') +
facet_wrap(~platformId, nrow=2) +
scale_x_discrete(limits=tiers_order) +
labs( y = "#players in thousands",
x = "Skill Tier")+
theme_bw() +
theme(title=element_text(size=12),
legend.title = element_text(size=8),
axis.title = element_text(size=8),
axis.text.x = element_blank(),
legend.position = "bottom") +
scale_fill_manual("Skill Tier", values=c("CHALLENGER"="orange",
"MASTER"="#966F33",
"DIAMOND"="#cbe3f0",
"PLATINUM"="#A0BFB4",
"GOLD"="#e6c200",
"SILVER"="#c0c0c0",
"BRONZE"="#cd7f32",
"UNRANKED"="black"))
p
#bar (per region)
pop_tier_platform_patch <- data.table(dbReadTable(connection, "population_region_tier_patch"))
pop_tier_platform_patch %>%
group_by(tier, platformId, patch) %>%
summarise(count = sum(count)) -> ptpp
p <- ggplot(data = ptpp, aes(x=patch, y=count/1000, group=factor(tier, levels=tiers_order), fill=factor(tier, levels=tiers_order))) +
geom_bar(stat='Identity') +
facet_wrap(~platformId) +
scale_x_discrete(limits=patchOrder) +
labs( y = "#players in thousands") +
theme_bw() +
theme(title=element_text(size=20),
legend.title = element_text(size=16),
axis.title = element_text(size=12),
axis.text.x = element_text(size=8, angle=45, hjust = 1),
legend.position = "bottom") +
scale_fill_manual("Skill Tier", values=c("CHALLENGER"="orange",
"MASTER"="#966F33",
"DIAMOND"="#cbe3f0",
"PLATINUM"="#A0BFB4",
"GOLD"="#e6c200",
"SILVER"="#c0c0c0",
"BRONZE"="#cd7f32",
"UNRANKED"="black"))
p
## game Duration
time_info <- data.table(dbGetQuery(connection, "SELECT * FROM matchdetails"))
maxDuration <- round(max(time_info$gameDuration)/60,2)
meanDuration <- round(mean(time_info$gameDuration/60),2)
minDuration <- round(min(time_info$gameDuration/60),2)
p <- ggplot(data = time_info, aes(round(gameDuration/60), fill = "#00BFFF")) +
geom_histogram(aes(y= ..count.. /1000), breaks = c(seq(0,maxDuration, by=1)),fill = "#56B4E9") +
scale_x_continuous(breaks=seq(0,maxDuration,5)) +
geom_vline(xintercept = round(minDuration), linetype=2) +
geom_vline(xintercept = round(meanDuration), linetype=2) +
geom_vline(xintercept = round(maxDuration), linetype=2) +
annotate("text", x = 70, y = 9, label = paste("Max: ",toString(maxDuration), " Minutes")) +
annotate("text", x = 70, y = 5, label = paste("Mean: ",toString(meanDuration), " Minutes")) +
annotate("text", x = 70, y = 1, label = paste("Min: ",toString(minDuration), " Minutes")) +
labs(x = "Match Duration in Minutes", y = "#matches in thousands") +
theme_bw() +
theme(title=element_text(size=20),
legend.title = element_text(size=16),
axis.title = element_text(size=12))
p
# histograms for each platform
p <- ggplot(data = time_info, aes(round(gameDuration/60))) +
geom_histogram(aes(y = ..count../1000),breaks = c(seq(0,maxDuration, by=1)),fill = "#56B4E9") +
scale_x_continuous(breaks=seq(0,maxDuration,5)) +
labs(x = "Match Duration", y = "#matches in thousands") +
theme_bw() +
theme(title=element_text(size=20),
legend.title = element_text(size=16),
axis.title = element_text(size=12)) +
facet_grid(platformId~.) +
scale_fill_brewer(palette = "Blues")
p
# brazil has more than double the amount of remakes
time_info[gameDuration/60 < 5] %>% group_by(platformId) %>% summarise(count = n())
time_info %>%
summarise(minDuration = min(gameDuration/60),
"10qDuration" = quantile(gameDuration/60 , .10),
"25qDuration" = quantile(gameDuration/60 , .25),
"Median" = quantile(gameDuration/60 , .5),
"75Duration" = quantile(gameDuration/60 , .75),
"90qDuration" = quantile(gameDuration/60 , .90),
maxDuration = max(gameDuration/60)
) -> time_info_overall
time_info_overall
time_info %>%
group_by(platformId) %>%
summarise(minDuration = min(gameDuration/60),
"25qDuration" = quantile(gameDuration/60 , .25),
meanDuration = mean(gameDuration/60),
"75Duration" = quantile(gameDuration/60 , .75),
maxDuration = max(gameDuration/60)
) -> time_info_agg
time_info_agg
p <- ggplot(data = time_info, aes(x = platformId, y=round(gameDuration/60), group=platformId)) +
geom_boxplot() +
labs(x = "Match Duration", y = "#matches",
title = "Distribution of Match Durations") +
theme_bw() +
theme(title=element_text(size=20),
legend.title = element_text(size=16),
axis.title = element_text(size=12)) +
#facet_wrap(~platformId, nrow=2)+
scale_fill_brewer(palette = "Blues")
p
# time by tier
time_by_tier <- data.table(dbGetQuery(connection, "SELECT tier, patch, gameDuration, gameCreation from playerdetails"))
time_by_tier %>%
group_by(tier) %>%
summarise(minDuration = min(gameDuration/60),
"25qDuration" = quantile(gameDuration/60 , .25),
meanDuration = mean(gameDuration/60),
"75Duration" = quantile(gameDuration/60 , .75),
maxDuration = max(gameDuration/60)
) -> time_by_tier_agg
p <- ggplot(data = time_by_tier, aes(x = tier, y=round(gameDuration/60), group=tier, fill=tier)) +
geom_boxplot() +
labs(x = "Skill Tier", y = "Match Duration in Minutes") +
scale_x_discrete(limits=tiers_order) +
scale_y_continuous(breaks=seq(0,maxDuration, 5)) +
theme_bw() +
theme(legend.position = "None",
title=element_text(size=20),
legend.title = element_text(size=16),
axis.title = element_text(size=12)) +
scale_fill_manual("Skill Tier", values=c("CHALLENGER"="orange",
"MASTER"="#966F33",
"DIAMOND"="#cbe3f0",
"PLATINUM"="#A0BFB4",
"GOLD"="#e6c200",
"SILVER"="#c0c0c0",
"BRONZE"="#cd7f32",
"UNRANKED"="black"))
p
# time by tier and region
time_by_tier_region <- data.table(dbGetQuery(connection, "SELECT platformId, tier, patch, gameDuration, gameCreation from playerdetails"))
time_by_tier_region %>%
group_by(tier, platformId) %>%
summarise(minDuration = min(gameDuration/60),
"25qDuration" = quantile(gameDuration/60 , .25),
meanDuration = mean(gameDuration/60),
"75Duration" = quantile(gameDuration/60 , .75),
maxDuration = max(gameDuration/60)
) -> time_by_tier_agg
time_by_tier_region$clr = ifelse(time_by_tier_region$gameDuration %in% c(1200,1260), "red", "white")
p <- ggplot(data = time_by_tier_region, aes(x = tier, y=round(gameDuration/60), group=tier, fill=tier)) +
geom_boxplot() +
facet_wrap(~platformId, nrow=2) +
labs(x = "Skill Tier", y = "Match Duration") +
scale_x_discrete(limits=tiers_order) +
scale_y_continuous(breaks=seq(0,maxDuration, 10)) +
theme_bw() +
theme(legend.position = "None",
title=element_text(size=20),
legend.title = element_text(size=16),
axis.title = element_text(size=12),
axis.text.x = element_blank()) +
scale_fill_manual("Skill Tier", values=c("CHALLENGER"="orange",
"MASTER"="#966F33",
"DIAMOND"="#cbe3f0",
"PLATINUM"="#A0BFB4",
"GOLD"="#e6c200",
"SILVER"="#c0c0c0",
"BRONZE"="#cd7f32",
"UNRANKED"="black"))
p
p <- ggplot(data = time_by_tier_region, aes(round(gameDuration/60),
group=factor(tier, levels=tiers_order),
fill=factor(tier, levels=tiers_order))) +
geom_histogram(aes(y=..count../1000), breaks = c(seq(0,maxDuration, by=1))) +
scale_x_continuous(breaks=seq(0,maxDuration,10)) +
facet_wrap(~factor(tier, levels=tiers_order), ncol=2) +
labs(x = "Skill Tier", y = "Match Duration") +
theme_bw() +
theme(legend.position = "None",
axis.title = element_text(size=12),
axis.text.x = element_text(size=6)) +
scale_fill_manual("Skill Tier", values=c("CHALLENGER"="orange",
"MASTER"="#966F33",
"DIAMOND"="#cbe3f0",
"PLATINUM"="#A0BFB4",
"GOLD"="#e6c200",
"SILVER"="#c0c0c0",
"BRONZE"="#cd7f32",
"UNRANKED"="black"))
p
# time by patch
p <- ggplot(data = time_by_tier, aes(x=round(gameDuration/60))) +
geom_histogram() +
theme_bw() +
facet_wrap(~factor(patch, levels=patchOrder))
p
time_by_tier %>%
group_by(patch) %>%
summarise(count = sum(count))
## game Creation
time_info$gameDates <- as.Date(as.POSIXct(time_info$gameCreation/1000, origin="1970-01-01"))
minDate <- min(time_info$gameDates)
maxDate <- max(time_info$gameDates)
p <- ggplot(data = time_info, aes(gameDates)) +
geom_histogram(breaks = c(seq(as.numeric(minDate),as.numeric(maxDate),by=1))) +
scale_x_date(date_labels = "%b %d", breaks = date_breaks("1 months"), date_minor_breaks = "1 day") +
labs(x = "Match Date", y = "#matches",
title = "Distribution of Matches played through the year") +
theme_bw() +
theme(title=element_text(size=20),
legend.title = element_text(size=16),
axis.title = element_text(size=12),
axis.text.x = element_text(angle = 90)) +
scale_fill_brewer(palette = "Blues")
p
time_info$patch2 <- factor(time_info$patch, levels = patchOrder)
time_info %>%
group_by(gameDates, patch) %>%
summarise(Frequency = n()) -> time_info_patch
time_info %>%
group_by(gameDates, patch, platformId) %>%
summarise(Frequency = n()) -> time_info_regionPatch
p <- ggplot(data = time_info_patch, aes(gameDates, Frequency, group=patch)) +
facet_grid(.~factor(time_info_patch$patch, levels = patchOrder),scale="free", space="free", switch = "x") +
geom_area(breaks = c(seq(as.numeric(minDate),as.numeric(maxDate),by=1))) +
scale_x_date(date_labels = "%b %d", breaks = date_breaks("22 days"), date_minor_breaks = "1 day", expand = c(0,0)) +
labs(x = "Match Date and Patch", y = "#matches",
title = "Distribution of Matches played through the year") +
theme_bw() +
theme(title=element_text(size=20),
legend.title = element_text(size=16),
axis.title = element_text(size=12),
axis.text.x = element_text(size= 16, angle = 90),
axis.text.y = element_text(size= 16),
panel.spacing.x = unit(0,"line"),
strip.placement = "outside") +
scale_fill_manual(values = c("blue"))
p
p <- ggplot(data = time_info_regionPatch, aes(gameDates, Frequency, group=platformId, fill=platformId)) +
facet_grid(.~factor(time_info_regionPatch$patch, levels = patchOrder),scale="free", space="free", switch = "x") +
geom_area(position='identity', alpha=0.5, breaks = c(seq(as.numeric(minDate),as.numeric(maxDate),by=1))) +
scale_x_date(date_labels = "%b %d", breaks = date_breaks("22 days"), date_minor_breaks = "1 day", expand = c(0,0)) +
labs(x = "Match Date and Patch", y = "#matches",
title = "Distribution of Matches played through the year") +
theme_bw() +
theme(title=element_text(size=20),
legend.title = element_text(size=16),
axis.title = element_text(size=12),
axis.text.x = element_text(size= 16, angle = 90),
axis.text.y = element_text(size= 16),
panel.spacing.x = unit(0,"line"),
strip.placement = "outside") #+
#scale_fill_manual(values = c("blue"))
p
p <- ggplot(data = time_info_patch, aes(gameDates, Frequency, group=patch)) +
facet_grid(.~factor(time_info_patch$patch, levels = patchOrder),scale="free", space="free", switch = "x") +
geom_area(breaks = c(seq(as.numeric(minDate),as.numeric(maxDate),by=1))) +
scale_x_date(date_labels = "%b %d", breaks = date_breaks("22 days"), date_minor_breaks = "1 day", expand = c(0,0)) +
labs(x = "Match Date and Patch", y = "#matches",
title = "Distribution of Matches played through the year") +
theme_bw() +
theme(title=element_text(size=20),
legend.title = element_text(size=16),
axis.title = element_text(size=12),
axis.text.x = element_text(size= 16, angle = 90),
axis.text.y = element_text(size= 16),
panel.spacing.x = unit(0,"line"),
strip.placement = "outside") +
scale_fill_manual(values = c("blue"))
p
time_info_patch2 <- time_info_regionPatch#[which(time_info_regionPatch$patch=="7.2"),]
p <- ggplot(data = time_info_patch2, aes(gameDates, Frequency/1000, group=platformId, fill=platformId)) +
facet_wrap(~factor(time_info_patch2$patch, levels = patchOrder),scale="free", nrow=4) +
geom_area(alpha=0.6) +
scale_x_date(date_labels = "%a %m-%d", breaks = date_breaks("7 days"), date_minor_breaks = "1 day", expand = c(0,0)) +
scale_y_continuous(limits = c(0, 4)) +
scale_fill_discrete(name = "Region", labels = c("Brazil", "Europe West", "Korea", "North America")) +
labs(x = "Match Date and Patch", y = "#matches",
title = "Distribution of Matches played through season 2017 in thousands") +
#facet_wrap(~factor(time_info_patch2$patch, levels = patchOrder), nrow=4) +
theme_bw() +
theme(title=element_text(size=20),
legend.title = element_text(size=16),
axis.title = element_text(size=12),
axis.text.x = element_text(size=6),
axis.text.y = element_text(size=8),
panel.spacing.x = unit(1,"line"),
strip.placement = "outside")
p
time_info_tier <- adc[,list(platformId, patch, tier, gameCreation)]
time_info_tier$gameDates <- as.Date(as.POSIXct(time_info_tier$gameCreation/1000, origin="1970-01-01"))
time_info_tier$order <- factor(time_info_tier$tier, levels = tiers_order)
time_info_tier$patch2 <- factor(time_info_tier$patch, levels = patchOrder)
time_info_tier %>%
group_by(gameDates, patch2, order) %>%
summarise(Frequency = n()) -> time_info_tier
p <- ggplot(data = time_info_tier, aes(gameDates, Frequency, group=order, fill=order)) +
#facet_grid(.~factor(time_info_patch$patch, levels = patchOrder),scale="free", space="free", switch = "x") +
geom_area() +
scale_x_date(date_labels = "%a %b %d", breaks = date_breaks("4 days"), date_minor_breaks = "1 day", expand = c(0,0), position="top") +
labs(x = "Match Date and Patch", y = "#matches",
title = "Distribution of Matches played through the year") +
theme_bw() +
theme(title=element_text(size=20),
legend.title = element_text(size=16),
axis.title = element_text(size=12),
axis.text.x = element_text(size= 16, angle = 90),
axis.text.y = element_text(size= 16),
panel.spacing.x = unit(0,"line"),
strip.placement = "outside") +
scale_fill_manual("Skill Tier", values=c("CHALLENGER"="orange",
"MASTER"="#966F33",
"DIAMOND"="#cbe3f0",
"PLATINUM"="#A0BFB4",
"GOLD"="#e6c200",
"SILVER"="#c0c0c0",
"BRONZE"="#cd7f32",
"UNRANKED"="black"))
p
############################ TOP ####################################
### Distribution in Season 7
p <- ggplot(data=top.distribution,aes(x=names, y=gamesPlayed)) +
geom_bar(stat='identity') +
scale_x_discrete(limits=top.distribution[order(by=gamesPlayed,decreasing = T)]$names) +
theme(axis.text.x = element_text(angle = 90, hjust = 1), axis.text.y = element_text(size=6))
p
p <- list()
for(i in 1:length(patchOrder)){
df= top.distribution.patch[patch==patchOrder[i]][order(by=gamesPlayed,decreasing = T)][1:20]
p[[i]] <- ggplot(df, aes(x=name, y=gamesPlayed)) +
geom_bar(stat = "identity") +
scale_x_discrete(limits=df[order(by=gamesPlayed,decreasing = T)]$name) +
scale_y_continuous(limits=c(0,2000)) +
theme(axis.text.x = element_text(angle = 90, hjust = 1), axis.text.y = element_text(size=6)) +
ggtitle("Patch",paste0(patchOrder[i]))
}
do.call("grid.arrange", c(p, ncol= 5))
p <- ggplot(data=top.distribution.patch,aes(x=names, y=gamesPlayed)) +
geom_bar(stat='identity') +
scale_x_discrete(limits=top.distribution[order(by=gamesPlayed,decreasing = T)]$names) +
theme(axis.text.x = element_text(angle = 90, hjust = 1), axis.text.y = element_text(size=6)) +
facet_wrap(.~patch, scales="free")
p
p <- ggplot(data=merge(top[,c("championId", "patch")], champLookUp, "championId") , aes(names)) +
geom_bar() + facet_wrap(~ patch, scales="free")
p
####PER PATCH AND REGION
p <- ggplot(data = top.relevant) +
geom_bar(aes(x=as.vector(champ[as.character(top.relevant$championId)]), fill = championId)) +
facet_wrap( ~ platformId+factor(patch, levels = patchOrder), nrow=4) +
theme(axis.text.x = element_text(angle = 90, hjust = 1), axis.text.y = element_text(size=6)) +
coord_flip() +
guides(fill=F)
p+ ggtitle("Toplane Picks per Patch and Region")
p <- ggplot(data=top.distribution,aes(x=names, y=gamesPlayed)) +
geom_bar(stat='identity') +
scale_x_discrete(limits=top.distribution[order(by=gamesPlayed,decreasing = T)]$names) +
theme(axis.text.x = element_text(angle = 90, hjust = 1), axis.text.y = element_text(size=6))
p
#PER CHAMP
p <- ggplot(data = top.relevant, aes(x = patch, group=platformId, color=platformId)) +
geom_line(stat = "count") +
scale_x_discrete(limits=patchOrder) +
facet_wrap(~ championId, ncol = 5) +
theme_igray() + scale_colour_tableau("colorblind10")
p
p <- ggplot(data = top.relevant[, championId == 39], aes(x = patch, group=platformId, color=platformId)) +
geom_line(stat = "count") +
geom_line(y = mean(win) ) +
scale_x_discrete(limits=patchOrder) +
facet_wrap(~ championId, ncol = 5) +
theme_dark() +
theme(plot.background = element_rect("grey"))
p
p <- ggplot(data = top.performance, aes(x = patch, y=winrate, group=platformId, color=platformId)) +
geom_line(linetype = 2) +
geom_line(data = top.performance , aes(y = games/2000)) +
scale_x_discrete(limits=patchOrder) +
facet_wrap(~ name, ncol = 5) +
theme_dark() +
theme(plot.background = element_rect("grey"))
p + ggtitle("top Picks per Patch and Region")
########################### JUNGLE ##################################
#overall distribution in season 7
p <- ggplot(data=jungle.distribution,aes(x=names, y=gamesPlayed)) +
geom_bar(stat='identity') +
scale_x_discrete(limits=jungle.distribution[order(by=gamesPlayed,decreasing = T)]$names) +
theme(axis.text.x = element_text(angle = 90, hjust = 1), axis.text.y = element_text(size=6))
p
#PER PATCH AND REGION
p <- ggplot(data = jungle.relevant) +
geom_bar(aes(x=as.vector(champ[as.character(jungle.relevant$championId)]), fill = championId)) +
facet_wrap( ~ platformId+factor(patch, levels = patchOrder), nrow=4) +
theme(axis.text.x = element_text(angle = 90, hjust = 1), axis.text.y = element_text(size=6)) +
coord_flip() +
guides(fill=F)
p+ ggtitle("Jungle Picks per Patch and Region")
p <- ggplot(data = jungle.performance, aes(x = patch, y=winrate, group=platformId, color=platformId)) +
geom_line(linetype = 2) +
geom_line(data = jungle.performance , aes(y = games/2000)) +
scale_x_discrete(limits=patchOrder) +
facet_wrap(~ name, ncol = 5) +
theme_dark() +
theme(plot.background = element_rect("grey"))
p + ggtitle("jungle Picks per Patch and Region")
########################### MID ##################################
#overall distribution in season 7
p <- ggplot(data=mid.distribution,aes(x=names, y=gamesPlayed)) +
geom_bar(stat='identity') +
scale_x_discrete(limits=mid.distribution[order(by=gamesPlayed,decreasing = T)]$names) +
theme(axis.text.x = element_text(angle = 90, hjust = 1), axis.text.y = element_text(size=6))
p
#PER PATCH AND REGION
p <- ggplot(data = mid.relevant) +
geom_bar(aes(x=as.vector(champ[as.character(mid.relevant$championId)]), fill = championId)) +
facet_wrap( ~ platformId+factor(patch, levels = patchOrder), nrow=4) +
theme(axis.text.x = element_text(angle = 90, hjust = 1), axis.text.y = element_text(size=6)) +
coord_flip() +
guides(fill=F)
p+ ggtitle("Jungle Picks per Patch and Region")
p <- ggplot(data = mid.performance, aes(x = patch, y=winrate, group=platformId, color=platformId)) +
geom_line(linetype = 2) +
geom_line(data = mid.performance , aes(y = games/2000)) +
scale_x_discrete(limits=patchOrder) +
facet_wrap(~ name, ncol = 5) +
theme_dark() +
theme(plot.background = element_rect("grey"))
p + ggtitle("Mid Lane Picks per Patch and Region")
############################ ADC ####################################
####DISTRIBUTIONS OF CHAMPIONS
#overall distribution in season 7
p <- ggplot(data=adc.distribution,aes(x=names, y=gamesPlayed)) +
geom_bar(stat='identity') +
scale_x_discrete(limits=adc.distribution[order(by=gamesPlayed,decreasing = T)]$names) +
theme(axis.text.x = element_text(angle = 90, hjust = 1), axis.text.y = element_text(size=6))
p
#Barchart
p <- ggplot(data = adc.performance, aes(x=name, y=games/2000 *100, fill = name)) +
geom_bar(stat="Identity") +
facet_wrap( ~ platformId+factor(patch, levels = patchOrder), nrow=4) +
theme(axis.text.x = element_text(angle = 90, hjust = 1), axis.text.y = element_text(size=6)) +
labs(x = "Champion", y = "Playrate in Percentages") +
coord_flip() +
guides(fill=F)
p+ ggtitle("ADC Picks per Patch and Region")
#Linechart
p <- ggplot(data = adc.performance[championId %in% relchamps.adc], aes(x = patch, y=games/10000 * 100, group=platformId, color=platformId)) +
geom_line(linetype = 1) +
#geom_line(data = adc.performance , aes(y = winrate), linetype = 2) +
scale_x_discrete(limits=patchOrder) +
theme(axis.text.x = element_text(size=5)) +
labs(x = "Patch", y = "Playrate in Percentage") +
facet_wrap(~ name, ncol = 4)
p + ggtitle("ADC Picks per Patch and Region")
p <- list()
for(i in 1:length(patchOrder)){
df= adc.distribution.patch[patch==patchOrder[i]][order(by=gamesPlayed,decreasing = T)][1:20]
p[[i]] <- ggplot(df, aes(x=name, y=gamesPlayed)) +
geom_bar(stat = "identity") +
scale_x_discrete(limits=df[order(by=gamesPlayed,decreasing = T)]$name) +
scale_y_continuous(limits=c(0,5000)) +
theme(axis.text.x = element_text(angle = 90, hjust = 1), axis.text.y = element_text(size=6)) +
ggtitle("Patch",paste0(patchOrder[i]))
}
do.call("grid.arrange", c(p, ncol= 5))
adc.set1 <- c("Ashe","Caitlyn", "Draven", "Ezreal", "Kog'Maw", "Lucian", "Jhin", "Tristana", "Vayne", "Varus", "Xayah")
adc.set <- adc.set1
df <- items.adc[championName %in% adc.set[1:4]][, list(count=.N), by = c("championName", "patch", "itemName")]
setkeyv(df, c("championName", "patch"))
championCount <- items.adc[championName %in% adc.set,list(championName, patch)][,list(gamesPlayed = .N), by = c("championName", "patch")]
setkeyv(championCount, c("championName", "patch"))
df <- merge(df, championCount, by= c("championName","patch"))
df$perc <- df$count/df$gamesPlayed
p <- ggplot(data = df) +
geom_bar(stat= "identity", aes(x=itemName, y=perc)) +
facet_grid(championName ~ factor(patch, levels = patchOrder)) +
theme(axis.text.x = element_text(angle = 90, hjust = 1, size = 6), axis.text.y = element_text(size=6)) +
scale_x_discrete(limits=relItems.ADC) +
coord_flip() +
guides(fill=FALSE)
p
#nur ne Idee, aber da erkennt leider keiner etwas.
p <- ggplot(data = df[itemName %in% relItems.ADC],aes(x=factor(patch, levels = patchOrder), fill = factor(itemName))) +
geom_bar(width=0.9, position="fill") +
facet_grid(. ~ championName) +
theme_igray() + scale_fill_manual(values = colorRampPalette(brewer.pal(8, "Accent"))(length(relItems.ADC)),
guide = guide_legend(nrow=2)) +
theme(legend.position="bottom")
p + coord_polar(theta = "y")
### attempt to illustrate specific traits of adcs
# OVERALL
dfprep = adc.performance.patch %>%
mutate(DPS = totalDamageToChampions/gameDuration) %>%
select(
name, patch,
games,
summoners,
winrate,
DPS,
DmgDealt = totalDamageToChampions,
kills,
assists,
deaths,
DmgTaken = totalDamageTaken,
cs = csPerGame,
gold = goldEarned
)
dfprep = data.table(dfprep)
df = dfprep %>%
rownames_to_column( var = "champ" ) %>%
mutate_each(funs(rescale), -c(champ,name,patch)) %>%
melt(id.vars=c('champ','name','patch'), measure.vars=colnames(dfprep[,-c("name","patch")])) %>%
arrange(champ)
df = data.table(df)
#radar charts: better filter out some champs
df[name %in% c(adc.set1,adc.set2)] %>%
ggplot(aes(x=variable, y=value, group=name, color=name)) +
geom_polygon()
geom_polygon(fill=NA) +
coord_radar() + theme_bw() + facet_grid(name~factor(patch, levels=patchOrder)) +
#scale_x_discrete(labels = abbreviate) +
theme(axis.text.x = element_text(size = 5), legend.position="none")
#bar chart perspective
df %>%
ggplot(aes(x=variable, y=value, group= name, fill = name)) +
geom_bar(stat="identity") +
geom_line(y = 0.5, linetype =2, color = "black") +
facet_grid(factor(patch, levels=patchOrder)~name) +
coord_flip() +
theme_igray() + scale_fill_manual(values = colorRampPalette(brewer.pal(8, "Accent"))(length(unique(df$name)))) +
theme(axis.text.y = element_text(size = 5), legend.position="none")
dfprep %>%
mutate_each(funs(rescale), -c(name, patch)) %>% data.table() -> df_radar
p <- list()
for(j in 1:length(patchOrder))
p[[j]] <- ggRadar(df_radar[name %in% adc.set & patch == patchOrder[j]], aes(color=name), rescale = F, ylim=c(0,1))
do.call("grid.arrange", c(p, nrow = 4 ))
## attempt to scale into 3d and illustrate
adc.performance.patch %>%
rownames_to_column(var="id") -> adc.cmd
adc.cmd.numerics = adc.cmd[,-c("name", "championId", "lane", "role", "patch")]
setkey(adc.cmd.numerics, "id")
rownames(adc.cmd.numerics) = adc.cmd.numerics$id
adc.cmd.dist <- dist(adc.cmd.numerics[,-1])
colnames(adc.cmd.dist) = rownames
fit <- cmdscale(adc.cmd.dist, k = 3)
data.table(fit) %>%
rownames_to_column(var = "id") %>%
merge(adc.cmd[,c("id","name", "patch")], by="id") -> fit2
fit2$detailedName = paste0(fit2$name, " ", fit2$patch)
kmeans3 = kmeans(x = fit2[,2:4], centers = 3)
kmeans4 = kmeans(x = fit2[,2:4], centers = 4)
kmeans5 = kmeans(x = fit2[,2:4], centers = 5)
kmeans6 = kmeans(x = fit2[,2:4], centers = 6)
fit2$cluster3 = kmeans3$cluster
fit2$cluster4 = kmeans4$cluster
fit2$cluster5 = kmeans5$cluster
fit2$cluster6 = kmeans6$cluster
plot3d(fit2[,2:4], size = 10, col = fit2$cluster6)
text3d(fit2[,2:4], texts = fit2$detailedName, size=2)
## GAME DURATION, DPS AND DAMAGE DONE
# duration of games where champions participated
adc.performance.patch[championId %in% relchamps.adc] %>%
ggplot(aes(y = gameDuration, x = patch, group=name)) +
geom_line() +
scale_x_discrete(limits=patchOrder) +
facet_wrap(~name, ncol = 4)
# damage done to champions
adc.performance.patch[championId %in% relchamps.adc] %>%
ggplot(aes(y = totalDamageToChampions, x = patch, group=name)) +
geom_line() +
scale_x_discrete(limits=patchOrder) +
facet_wrap(~name, ncol = 4)
# dps
adc.performance.patch[championId %in% relchamps.adc] %>%
ggplot(aes(y = totalDamageToChampions/(gameDuration/60), x = patch, group=name)) +
geom_line() +
scale_x_discrete(limits=patchOrder) +
facet_wrap(~name, ncol = 4)
# dps by win/loss
df = adc.performance.patch.win[championId %in% relchamps.adc]
ggplot(df,aes(y = totalDamageToChampions/(gameDuration/60), x = patch, group=as.factor(win), color = as.factor(win))) +
geom_line() +
scale_x_discrete(limits=patchOrder) +
facet_wrap(~name, ncol = 4)
## gold earned and gold per min
adc.performance.patch[championId %in% relchamps.adc] %>%
ggplot(aes(y = goldEarned, x = patch, group=name)) +
geom_line() +
scale_x_discrete(limits=patchOrder) +
facet_wrap(~name, ncol = 4)
adc.performance.patch[championId %in% relchamps.adc] %>%
ggplot(aes(y = goldEarned/(gameDuration/60), x = patch, group=name)) +
geom_line() +
scale_x_discrete(limits=patchOrder) +
facet_wrap(~name, ncol = 4)
# win and loss
df = adc.performance.patch.win[championId %in% relchamps.adc]
ggplot(df,aes(y = goldEarned/(gameDuration/60), x = patch, group=as.factor(win), color = as.factor(win))) +
geom_line() +
scale_x_discrete(limits=patchOrder) +
facet_wrap(~name, ncol = 4)
### cs and xp development
#cs diffs
df = adc.performance.patch[championId %in% relchamps.adc, list(name, patch, csDiffPerMinTen, csDiffPerMinTwenty, csDiffPerMinThirty)]
melt(df, id.vars=c('name','patch'), measure.vars=colnames(df[,-c("name","patch")])) %>%
ggplot(aes(y = value, x = variable, group = name)) +
geom_line() + scale_x_discrete(labels=c("10","20","30")) +
facet_grid(name~factor(patch, levels = patchOrder)) +
ggtitle("Creep Score Differential for each Champion and Patch")
#xp Diffs
df = adc.performance.patch[championId %in% relchamps.adc, list(name, patch, xpDiffPerMinTen, xpDiffPerMinTwenty, xpDiffPerMinThirty)]
melt(df, id.vars=c('name','patch'), measure.vars=colnames(df[,-c("name","patch")])) %>%
ggplot(aes(y = value, x = variable, group = name)) +
geom_line() + scale_x_discrete(labels=c("10","20","30")) +
facet_grid(name~factor(patch, levels = patchOrder)) +
ggtitle("Experience Differential for each Champion and Patch")
# cs deltas
df = adc.performance.patch[championId %in% relchamps.adc, list(name, patch, csPerMinDeltaTen, csPerMinDeltaTwenty, csPerMinDeltaThirty)]
melt(df, id.vars=c('name','patch'), measure.vars=colnames(df[,-c("name","patch")])) %>%
ggplot(aes(y = value, x = variable, group = name)) +
geom_line() + scale_x_discrete(labels=c("0-10","10-20","20-30")) +
facet_grid(name~factor(patch, levels = patchOrder)) +
ggtitle("Creep Score Per Minute Delta for each Champion and Patch")
## first blood
adc.performance.patch[championId %in% relchamps.adc] %>%
ggplot(aes(y = firstBloodKill + firstBloodAssist, x = patch, group=name)) +
geom_line() +
scale_x_discrete(limits=patchOrder) +
facet_wrap(~name, ncol = 4)
adc.performance.patch.win[championId %in% relchamps.adc] %>%
ggplot(aes(y = firstBloodKill + firstBloodAssist, x = patch, group=as.factor(win), color=as.factor(win))) +
geom_line() +
scale_x_discrete(limits=patchOrder) +
facet_wrap(~name, ncol = 4)
# first tower
adc.performance.patch[championId %in% relchamps.adc] %>%
ggplot(aes(y = firstTowerKill + firstTowerAssist, x = patch, group=name)) +
geom_line() +
scale_x_discrete(limits=patchOrder) +
facet_wrap(~name, ncol = 4)
#first inhib
adc.performance.patch[championId %in% relchamps.adc] %>%
ggplot(aes(y = firstInhibitorKill + firstInhibitorAssist, x = patch, group=name)) +
geom_line() +
scale_x_discrete(limits=patchOrder) +
facet_wrap(~name, ncol = 4)
## winrate by game duration
df = adc.performance.patch[,list(name, patch, winrate, winrateAt25, winrateAt30, winrateAt35, winrateAt40, winrateAt45, winrateOver45)]
df1 = df %>%
rownames_to_column(var = "champ") %>%
melt(id.vars=c('champ', 'name', 'patch'), measure.vars = colnames(df[,-c("name", "patch")]))
df1$variableNew <- c(0,25,30,35,40,45,50)[match(df1$variable, c("winrate", "winrateAt25", "winrateAt30", "winrateAt35", "winrateAt40","winrateAt45", "winrateOver45"))]
winrate_scale = c("winrateAt25", "winrateAt30", "winrateAt35", "winrateAt40","winrateAt45", "winrateOver45")
p = ggplot(data = df1[variableNew!=0], aes(x = variableNew, y=value, color = name)) +
geom_point() +
geom_smooth(method="auto") +
facet_grid(name ~ factor(patch, levels = patchOrder)) +
scale_y_continuous(limits=c(0,1)) +
scale_x_continuous(limits = c(20,55), breaks = seq(25,50,5), labels=c( "25", "30", "35" ,"40", "45", ">45")) +
theme(legend.position="none")
p
############################ SUP ####################################
#overall distribution in season 7
p <- ggplot(data=sup.distribution,aes(x=names, y=gamesPlayed)) +
geom_bar(stat='identity') +
scale_x_discrete(limits=sup.distribution[order(by=gamesPlayed,decreasing = T)]$names) +
theme(axis.text.x = element_text(angle = 90, hjust = 1), axis.text.y = element_text(size=6))
p
#Barchart
p <- ggplot(data = sup.performance, aes(x=name, y=games/2000 *100, fill = name)) +
geom_bar(stat="Identity") +
facet_wrap( ~ platformId+factor(patch, levels = patchOrder), nrow=4) +
theme(axis.text.x = element_text(angle = 90, hjust = 1), axis.text.y = element_text(size=6)) +
labs(x = "Champion", y = "Playrate in Percentages") +
coord_flip() +
guides(fill=F)
p+ ggtitle("Support Picks per Patch and Region")
#Linechart
p <- ggplot(data = sup.performance[championId %in% relchamps.sup], aes(x = patch, y=games/2000 * 100, group=platformId, color=platformId)) +
geom_line(linetype = 1) +
#geom_line(data = adc.performance , aes(y = winrate), linetype = 2) +
scale_x_discrete(limits=patchOrder) +
theme(axis.text.x = element_text(size=5)) +
labs(x = "Patch", y = "Playrate in Percentage") +
facet_wrap(~ name, ncol = 4)
p + ggtitle("Support Picks per Patch and Region")
#botlane sup and adc combined:
botlane %>%
filter(sup.Id %in% relchamps.sup) %>%
group_by(ad,sup,patch) %>%
summarise(count =n()) %>%
left_join(botlane %>% group_by(ad, patch) %>% summarise(adCount = n())) %>%
mutate(perc = count/adCount) %>%
ggplot(aes(x = sup, y = perc)) + geom_bar(stat="identity") +
theme(axis.text.x = element_text(angle = 90, hjust = 1), axis.text.y = element_text(size=6)) +
coord_flip() +
facet_grid(ad~factor(patch, levels=patchOrder))
###############################################################################
###############################################################################
############################ PRO GAMES #######################################
###############################################################################
###############################################################################
###adc
####DISTRIBUTIONS OF CHAMPIONS
#Barchart
majorRegions = c("TRLH1", #naLCS
"TRLH3", #euLCS
"TRTW", #lms
"ESPORTSTMNT06", #lck
"ESPORTSTMNT03" #cbLoL and TCL
)
p <- ggplot(data = adc.pro.performance[platformId %in% majorRegions], aes(x=name, y=playrate *100, fill = name)) +
geom_bar(stat="Identity") +
facet_wrap( ~ platformId+factor(patch, levels = patchOrder), nrow=5) +
theme(axis.text.x = element_text(angle = 90, hjust = 1), axis.text.y = element_text(size=6)) +
labs(x = "Champion", y = "Playrate in Percentages") +
coord_flip() +
guides(fill=F)
p+ ggtitle("ADC Picks per Patch and Region")
#Linechart
p <- ggplot(data = adc.pro.performance[platformId %in% majorRegions], aes(x = patch, y=playrate * 100, group=platformId, color=platformId)) +
geom_line(linetype = 1) +
#geom_line(data = adc.performance , aes(y = winrate), linetype = 2) +
scale_x_discrete(limits=patchOrder) +
theme(axis.text.x = element_text(size=5)) +
labs(x = "Patch", y = "Playrate in Percentage") +
facet_wrap(~ name, ncol = 4)
p + ggtitle("ADC Picks per Patch and Region")
p <- list()
for(i in 1:length(patchOrder)){
df= adc.distribution.patch[patch==patchOrder[i]][order(by=gamesPlayed,decreasing = T)][1:20]
p[[i]] <- ggplot(df, aes(x=name, y=gamesPlayed)) +
geom_bar(stat = "identity") +
scale_x_discrete(limits=df[order(by=gamesPlayed,decreasing = T)]$name) +
scale_y_continuous(limits=c(0,5000)) +
theme(axis.text.x = element_text(angle = 90, hjust = 1), axis.text.y = element_text(size=6)) +
ggtitle("Patch",paste0(patchOrder[i]))
}
do.call("grid.arrange", c(p, ncol= 5))
adc.set1 <- c("Ashe","Caitlyn", "Draven", "Ezreal", "Kog'Maw", "Lucian", "Jhin", "Tristana", "Vayne", "Varus", "Xayah")
adc.set <- adc.set1
df <- items.adc[championName %in% adc.set[1:4]][, list(count=.N), by = c("championName", "patch", "itemName")]
setkeyv(df, c("championName", "patch"))
championCount <- items.adc[championName %in% adc.set,list(championName, patch)][,list(gamesPlayed = .N), by = c("championName", "patch")]
setkeyv(championCount, c("championName", "patch"))
df <- merge(df, championCount, by= c("championName","patch"))
df$perc <- df$count/df$gamesPlayed
p <- ggplot(data = df) +
geom_bar(stat= "identity", aes(x=itemName, y=perc)) +
facet_grid(championName ~ factor(patch, levels = patchOrder)) +
theme(axis.text.x = element_text(angle = 90, hjust = 1, size = 6), axis.text.y = element_text(size=6)) +
scale_x_discrete(limits=relItems.ADC) +
coord_flip() +
guides(fill=FALSE)
p
#nur ne Idee, aber da erkennt leider keiner etwas.
p <- ggplot(data = df[itemName %in% relItems.ADC],aes(x=factor(patch, levels = patchOrder), fill = factor(itemName))) +
geom_bar(width=0.9, position="fill") +
facet_grid(. ~ championName) +
theme_igray() + scale_fill_manual(values = colorRampPalette(brewer.pal(8, "Accent"))(length(relItems.ADC)),
guide = guide_legend(nrow=2)) +
theme(legend.position="bottom")
p + coord_polar(theta = "y")
### attempt to illustrate specific traits of adcs
# OVERALL
dfprep = adc.performance.patch %>%
mutate(DPS = totalDamageToChampions/gameDuration) %>%
select(
name, patch,
games,
summoners,
winrate,
DPS,
DmgDealt = totalDamageToChampions,
kills,
assists,
deaths,
DmgTaken = totalDamageTaken,
cs = csPerGame,
gold = goldEarned
)
dfprep = data.table(dfprep)
df = dfprep %>%
rownames_to_column( var = "champ" ) %>%
mutate_each(funs(rescale), -c(champ,name,patch)) %>%
melt(id.vars=c('champ','name','patch'), measure.vars=colnames(dfprep[,-c("name","patch")])) %>%
arrange(champ)
df = data.table(df)
#radar charts: better filter out some champs
df[name %in% c(adc.set1,adc.set2)] %>%
ggplot(aes(x=variable, y=value, group=name, color=name)) +
geom_polygon()
geom_polygon(fill=NA) +
coord_radar() + theme_bw() + facet_grid(name~factor(patch, levels=patchOrder)) +
#scale_x_discrete(labels = abbreviate) +
theme(axis.text.x = element_text(size = 5), legend.position="none")
#bar chart perspective
df %>%
ggplot(aes(x=variable, y=value, group= name, fill = name)) +
geom_bar(stat="identity") +
geom_line(y = 0.5, linetype =2, color = "black") +
facet_grid(factor(patch, levels=patchOrder)~name) +
coord_flip() +
theme_igray() + scale_fill_manual(values = colorRampPalette(brewer.pal(8, "Accent"))(length(unique(df$name)))) +
theme(axis.text.y = element_text(size = 5), legend.position="none")
dfprep %>%
mutate_each(funs(rescale), -c(name, patch)) %>% data.table() -> df_radar
p <- list()
for(j in 1:length(patchOrder))
p[[j]] <- ggRadar(df_radar[name %in% adc.set & patch == patchOrder[j]], aes(color=name), rescale = F, ylim=c(0,1))
do.call("grid.arrange", c(p, nrow = 4 ))
## attempt to scale into 3d and illustrate
adc.performance.patch %>%
rownames_to_column(var="id") -> adc.cmd
adc.cmd.numerics = adc.cmd[,-c("name", "championId", "lane", "role", "patch")]
setkey(adc.cmd.numerics, "id")
rownames(adc.cmd.numerics) = adc.cmd.numerics$id
adc.cmd.dist <- dist(adc.cmd.numerics[,-1])
colnames(adc.cmd.dist) = rownames
fit <- cmdscale(adc.cmd.dist, k = 3)
data.table(fit) %>%
rownames_to_column(var = "id") %>%
merge(adc.cmd[,c("id","name", "patch")], by="id") -> fit2
fit2$detailedName = paste0(fit2$name, " ", fit2$patch)
kmeans3 = kmeans(x = fit2[,2:4], centers = 3)
kmeans4 = kmeans(x = fit2[,2:4], centers = 4)
kmeans5 = kmeans(x = fit2[,2:4], centers = 5)
kmeans6 = kmeans(x = fit2[,2:4], centers = 6)
fit2$cluster3 = kmeans3$cluster
fit2$cluster4 = kmeans4$cluster
fit2$cluster5 = kmeans5$cluster
fit2$cluster6 = kmeans6$cluster
plot3d(fit2[,2:4], size = 10, col = fit2$cluster6)
text3d(fit2[,2:4], texts = fit2$detailedName, size=2)
## GAME DURATION, DPS AND DAMAGE DONE
# duration of games where champions participated
adc.performance.patch[championId %in% relchamps.adc] %>%
ggplot(aes(y = gameDuration, x = patch, group=name)) +
geom_line() +
scale_x_discrete(limits=patchOrder) +
facet_wrap(~name, ncol = 4)
# damage done to champions
adc.performance.patch[championId %in% relchamps.adc] %>%
ggplot(aes(y = totalDamageToChampions, x = patch, group=name)) +
geom_line() +
scale_x_discrete(limits=patchOrder) +
facet_wrap(~name, ncol = 4)
# dps
adc.performance.patch[championId %in% relchamps.adc] %>%
ggplot(aes(y = totalDamageToChampions/(gameDuration/60), x = patch, group=name)) +
geom_line() +
scale_x_discrete(limits=patchOrder) +
facet_wrap(~name, ncol = 4)
# dps by win/loss
df = adc.performance.patch.win[championId %in% relchamps.adc]
ggplot(df,aes(y = totalDamageToChampions/(gameDuration/60), x = patch, group=as.factor(win), color = as.factor(win))) +
geom_line() +
scale_x_discrete(limits=patchOrder) +
facet_wrap(~name, ncol = 4)
## gold earned and gold per min
adc.performance.patch[championId %in% relchamps.adc] %>%
ggplot(aes(y = goldEarned, x = patch, group=name)) +
geom_line() +
scale_x_discrete(limits=patchOrder) +
facet_wrap(~name, ncol = 4)
adc.performance.patch[championId %in% relchamps.adc] %>%
ggplot(aes(y = goldEarned/(gameDuration/60), x = patch, group=name)) +
geom_line() +
scale_x_discrete(limits=patchOrder) +
facet_wrap(~name, ncol = 4)
# win and loss
df = adc.performance.patch.win[championId %in% relchamps.adc]
ggplot(df,aes(y = goldEarned/(gameDuration/60), x = patch, group=as.factor(win), color = as.factor(win))) +
geom_line() +
scale_x_discrete(limits=patchOrder) +
facet_wrap(~name, ncol = 4)
### cs and xp development
#cs diffs
df = adc.performance.patch[championId %in% relchamps.adc, list(name, patch, csDiffPerMinTen, csDiffPerMinTwenty, csDiffPerMinThirty)]
melt(df, id.vars=c('name','patch'), measure.vars=colnames(df[,-c("name","patch")])) %>%
ggplot(aes(y = value, x = variable, group = name)) +
geom_line() + scale_x_discrete(labels=c("10","20","30")) +
facet_grid(name~factor(patch, levels = patchOrder)) +
ggtitle("Creep Score Differential for each Champion and Patch")
#xp Diffs
df = adc.performance.patch[championId %in% relchamps.adc, list(name, patch, xpDiffPerMinTen, xpDiffPerMinTwenty, xpDiffPerMinThirty)]
melt(df, id.vars=c('name','patch'), measure.vars=colnames(df[,-c("name","patch")])) %>%
ggplot(aes(y = value, x = variable, group = name)) +
geom_line() + scale_x_discrete(labels=c("10","20","30")) +
facet_grid(name~factor(patch, levels = patchOrder)) +
ggtitle("Experience Differential for each Champion and Patch")
# cs deltas
df = adc.performance.patch[championId %in% relchamps.adc, list(name, patch, csPerMinDeltaTen, csPerMinDeltaTwenty, csPerMinDeltaThirty)]
melt(df, id.vars=c('name','patch'), measure.vars=colnames(df[,-c("name","patch")])) %>%
ggplot(aes(y = value, x = variable, group = name)) +
geom_line() + scale_x_discrete(labels=c("0-10","10-20","20-30")) +
facet_grid(name~factor(patch, levels = patchOrder)) +
ggtitle("Creep Score Per Minute Delta for each Champion and Patch")
## first blood
adc.performance.patch[championId %in% relchamps.adc] %>%
ggplot(aes(y = firstBloodKill + firstBloodAssist, x = patch, group=name)) +
geom_line() +
scale_x_discrete(limits=patchOrder) +
facet_wrap(~name, ncol = 4)
adc.performance.patch.win[championId %in% relchamps.adc] %>%
ggplot(aes(y = firstBloodKill + firstBloodAssist, x = patch, group=as.factor(win), color=as.factor(win))) +
geom_line() +
scale_x_discrete(limits=patchOrder) +
facet_wrap(~name, ncol = 4)
# first tower
adc.performance.patch[championId %in% relchamps.adc] %>%
ggplot(aes(y = firstTowerKill + firstTowerAssist, x = patch, group=name)) +
geom_line() +
scale_x_discrete(limits=patchOrder) +
facet_wrap(~name, ncol = 4)
#first inhib
adc.performance.patch[championId %in% relchamps.adc] %>%
ggplot(aes(y = firstInhibitorKill + firstInhibitorAssist, x = patch, group=name)) +
geom_line() +
scale_x_discrete(limits=patchOrder) +
facet_wrap(~name, ncol = 4)
## winrate by game duration
df = adc.performance.patch[,list(name, patch, winrate, winrateAt25, winrateAt30, winrateAt35, winrateAt40, winrateAt45, winrateOver45)]
df1 = df %>%
rownames_to_column(var = "champ") %>%
melt(id.vars=c('champ', 'name', 'patch'), measure.vars = colnames(df[,-c("name", "patch")]))
df1$variableNew <- c(0,25,30,35,40,45,50)[match(df1$variable, c("winrate", "winrateAt25", "winrateAt30", "winrateAt35", "winrateAt40","winrateAt45", "winrateOver45"))]
winrate_scale = c("winrateAt25", "winrateAt30", "winrateAt35", "winrateAt40","winrateAt45", "winrateOver45")
p = ggplot(data = df1[variableNew!=0], aes(x = variableNew, y=value, color = name)) +
geom_point() +
geom_smooth(method="auto") +
facet_grid(name ~ factor(patch, levels = patchOrder)) +
scale_y_continuous(limits=c(0,1)) +
scale_x_continuous(limits = c(20,55), breaks = seq(25,50,5), labels=c( "25", "30", "35" ,"40", "45", ">45")) +
theme(legend.position="none")
p
|
library(ibelief)
### Name: decisionDST
### Title: Decision Rules
### Aliases: decisionDST
### ** Examples
m1=c(0,0.4, 0.1, 0.2, 0.2, 0, 0, 0.1);
m2=c(0,0.2, 0.3, 0.1, 0.1, 0, 0.2, 0.1);
m3=c(0.1,0.2, 0, 0.1, 0.1, 0.1, 0, 0.3);
m3d=discounting(m3,0.95);
M_comb_Smets=DST(cbind(m1,m2,m3d),1);
M_comb_PCR6=DST(cbind(m1,m2),8);
class_fusion=decisionDST(M_comb_Smets,1)
class_fusion=decisionDST(M_comb_PCR6,1)
class_fusion=decisionDST(M_comb_Smets,5,0.5)
class_fusion=decisionDST(cbind(M_comb_Smets,M_comb_PCR6),1)
|
/data/genthat_extracted_code/ibelief/examples/decisionDST.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false
| false
| 520
|
r
|
library(ibelief)
### Name: decisionDST
### Title: Decision Rules
### Aliases: decisionDST
### ** Examples
m1=c(0,0.4, 0.1, 0.2, 0.2, 0, 0, 0.1);
m2=c(0,0.2, 0.3, 0.1, 0.1, 0, 0.2, 0.1);
m3=c(0.1,0.2, 0, 0.1, 0.1, 0.1, 0, 0.3);
m3d=discounting(m3,0.95);
M_comb_Smets=DST(cbind(m1,m2,m3d),1);
M_comb_PCR6=DST(cbind(m1,m2),8);
class_fusion=decisionDST(M_comb_Smets,1)
class_fusion=decisionDST(M_comb_PCR6,1)
class_fusion=decisionDST(M_comb_Smets,5,0.5)
class_fusion=decisionDST(cbind(M_comb_Smets,M_comb_PCR6),1)
|
## Add tidyverse
library(tidyverse)
# Create the subset of lines and TOS that are to have extra data taken
sel_subset <- "Axe, Beaufort, Beckom, Cutlass, Gregory, Kittyhawk, Lancer, Mace, Manning, Scepter,
Trojan, Suntop, Commander, Compass, Fathom, Planet, Spartacus, Urambie, EGA_Gregory, RGT Planet, Spartacus CL"
sel_subset <- str_split(sel_subset,",")
sel_subset <- unlist(sel_subset)
sel_subset <- str_trim(sel_subset)
tos_subset <- c('1','4','8')
tos_subset_cal <- c('1','2','4','8') #Callingon requested TOS2 added due to animal damage in TOS1
tos_subset_yanyean <- c('1','4','5','8') #Yan Yean requested TOS5 added due to uneven irrigation in TOS4
#Wagga
wagga <- read_csv("raw_data/WaggaWagga2019_Design - updated 05032019 for TOS1 sowing error.csv")
wagga <- wagga %>%
mutate(subset=if_else((wagga$Variety %in% sel_subset & wagga$TOS %in% tos_subset), "subset\\u0007", ""))
write_csv(wagga, file.path("fp_files/2019_npi_waggawagga.csv"))
#Callington
callington <- read_csv("raw_data/Callington2019_Design.csv")
callington <- callington %>%
select(-(X1)) %>%
mutate(subset=if_else(callington$Variety %in% sel_subset & callington$TOS %in% tos_subset_cal, "subset\\u0007", ""))
write_csv(callington, file.path("fp_files/2019_npi_callington.csv"))
#Merridin
merridin <- read_csv("raw_data/Merridin_New_Design.csv")
merridin <- merridin %>%
mutate(subset=if_else(merridin$Variety %in% sel_subset & merridin$TOS %in% tos_subset, "subset\\u0007", ""))
write_csv(merridin, file.path("fp_files/2019_npi_merridin.csv"))
#Yan Yean
yanyean <- read_csv("raw_data/Yan Yean2019_Design.csv")
yanyean <- yanyean %>%
select(-(X1)) %>%
mutate(subset=if_else(yanyean$Variety %in% sel_subset & yanyean$TOS %in% tos_subset_yanyean, "subset\\u0007", ""))
write_csv(yanyean, file.path("fp_files/2019_npi_yanyean.csv"))
#Sandbox
NPIsandbox <- read_csv("raw_data/WaggaWagga2019_Design.csv")
NPIsandbox <- NPIsandbox %>%
select(-(X1)) %>%
mutate(subset=if_else(NPIsandbox$Variety %in% sel_subset & NPIsandbox$TOS %in% tos_subset, "subset\\u0007", ""))
write_csv(NPIsandbox, file.path("fp_files/NPIsandbox.csv"))
#Dale
dale <- read_csv("raw_data/Dale_2019.csv")
dale <- dale %>%
mutate(subset=if_else(dale$Variety %in% sel_subset & dale$TOS %in% tos_subset, "subset\\u0007", ""))
write_csv(dale, file.path("fp_files/2019_npi_dale.csv"))
|
/NPI_sandbox.R
|
no_license
|
EPLeyne/NPI
|
R
| false
| false
| 2,359
|
r
|
## Add tidyverse
library(tidyverse)
# Create the subset of lines and TOS that are to have extra data taken
sel_subset <- "Axe, Beaufort, Beckom, Cutlass, Gregory, Kittyhawk, Lancer, Mace, Manning, Scepter,
Trojan, Suntop, Commander, Compass, Fathom, Planet, Spartacus, Urambie, EGA_Gregory, RGT Planet, Spartacus CL"
sel_subset <- str_split(sel_subset,",")
sel_subset <- unlist(sel_subset)
sel_subset <- str_trim(sel_subset)
tos_subset <- c('1','4','8')
tos_subset_cal <- c('1','2','4','8') #Callingon requested TOS2 added due to animal damage in TOS1
tos_subset_yanyean <- c('1','4','5','8') #Yan Yean requested TOS5 added due to uneven irrigation in TOS4
#Wagga
wagga <- read_csv("raw_data/WaggaWagga2019_Design - updated 05032019 for TOS1 sowing error.csv")
wagga <- wagga %>%
mutate(subset=if_else((wagga$Variety %in% sel_subset & wagga$TOS %in% tos_subset), "subset\\u0007", ""))
write_csv(wagga, file.path("fp_files/2019_npi_waggawagga.csv"))
#Callington
callington <- read_csv("raw_data/Callington2019_Design.csv")
callington <- callington %>%
select(-(X1)) %>%
mutate(subset=if_else(callington$Variety %in% sel_subset & callington$TOS %in% tos_subset_cal, "subset\\u0007", ""))
write_csv(callington, file.path("fp_files/2019_npi_callington.csv"))
#Merridin
merridin <- read_csv("raw_data/Merridin_New_Design.csv")
merridin <- merridin %>%
mutate(subset=if_else(merridin$Variety %in% sel_subset & merridin$TOS %in% tos_subset, "subset\\u0007", ""))
write_csv(merridin, file.path("fp_files/2019_npi_merridin.csv"))
#Yan Yean
yanyean <- read_csv("raw_data/Yan Yean2019_Design.csv")
yanyean <- yanyean %>%
select(-(X1)) %>%
mutate(subset=if_else(yanyean$Variety %in% sel_subset & yanyean$TOS %in% tos_subset_yanyean, "subset\\u0007", ""))
write_csv(yanyean, file.path("fp_files/2019_npi_yanyean.csv"))
#Sandbox
NPIsandbox <- read_csv("raw_data/WaggaWagga2019_Design.csv")
NPIsandbox <- NPIsandbox %>%
select(-(X1)) %>%
mutate(subset=if_else(NPIsandbox$Variety %in% sel_subset & NPIsandbox$TOS %in% tos_subset, "subset\\u0007", ""))
write_csv(NPIsandbox, file.path("fp_files/NPIsandbox.csv"))
#Dale
dale <- read_csv("raw_data/Dale_2019.csv")
dale <- dale %>%
mutate(subset=if_else(dale$Variety %in% sel_subset & dale$TOS %in% tos_subset, "subset\\u0007", ""))
write_csv(dale, file.path("fp_files/2019_npi_dale.csv"))
|
## Reading the data
NEI <- readRDS("summarySCC_PM25.rds") # National Emissions Inventory (NEI)
SCC <- readRDS("Source_Classification_Code.rds")
# How have emissions from motor vehicle sources
# changed from 1999-2008 in Baltimore City?
# Type: ON-ROAD, Fips = "24510" Baltimore Motor Vehicle PM[2.5]* Emissions
baltimore_data <- subset(NEI, fips == "24510" & type == 'ON-ROAD')
# making the plot
png(filename = "plot5.png")
plot(x = unique(baltimore_data$year),
y = tapply(baltimore_data$Emissions, baltimore_data$year, sum),
pch = 19, type = 'b',
xlab = "Year",
ylab = "Total emissions of PM2.5 (tons)",
main = "Baltimore Motor Vehicle PM[2.5] Emissions From 1999 to 2008")
dev.off()
|
/4- Exploratory Data Analysis/Week 4/plot5.R
|
no_license
|
jrreda/JHU-Data-Science
|
R
| false
| false
| 732
|
r
|
## Reading the data
NEI <- readRDS("summarySCC_PM25.rds") # National Emissions Inventory (NEI)
SCC <- readRDS("Source_Classification_Code.rds")
# How have emissions from motor vehicle sources
# changed from 1999-2008 in Baltimore City?
# Type: ON-ROAD, Fips = "24510" Baltimore Motor Vehicle PM[2.5]* Emissions
baltimore_data <- subset(NEI, fips == "24510" & type == 'ON-ROAD')
# making the plot
png(filename = "plot5.png")
plot(x = unique(baltimore_data$year),
y = tapply(baltimore_data$Emissions, baltimore_data$year, sum),
pch = 19, type = 'b',
xlab = "Year",
ylab = "Total emissions of PM2.5 (tons)",
main = "Baltimore Motor Vehicle PM[2.5] Emissions From 1999 to 2008")
dev.off()
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/filterFeatures.R
\name{filterFeatures}
\alias{filterFeatures}
\title{Filter features by thresholding filter values.}
\usage{
filterFeatures(task, method = "rf.importance", fval = NULL, perc = NULL,
abs = NULL, threshold = NULL, mandatory.feat = NULL, ...)
}
\arguments{
\item{task}{[\code{\link{Task}}]\cr
The task.}
\item{method}{[\code{character(1)}]\cr
See \code{\link{listFilterMethods}}.
Default is \dQuote{rf.importance}.}
\item{fval}{[\code{\link{FilterValues}}]\cr
Result of \code{\link{getFilterValues}}.
If you pass this, the filter values in the object are used for feature filtering.
\code{method} and \code{...} are ignored then.
Default is \code{NULL} and not used.}
\item{perc}{[\code{numeric(1)}]\cr
If set, select \code{perc}*100 top scoring features.
Mutually exclusive with arguments \code{abs} and \code{threshold}.}
\item{abs}{[\code{numeric(1)}]\cr
If set, select \code{abs} top scoring features.
Mutually exclusive with arguments \code{perc} and \code{threshold}.}
\item{threshold}{[\code{numeric(1)}]\cr
If set, select features whose score exceeds \code{threshold}.
Mutually exclusive with arguments \code{perc} and \code{abs}.}
\item{mandatory.feat}{[\code{character}]\cr
Mandatory features which are always included regardless of their scores}
\item{...}{[any]\cr
Passed down to selected filter method.}
}
\value{
[\code{\link{Task}}].
}
\description{
First, calls \code{\link{getFilterValues}}.
Features are then selected via \code{select} and \code{val}.
}
\seealso{
Other filter: \code{\link{FilterValues}};
\code{\link{getFilterValues}};
\code{\link{getFilteredFeatures}};
\code{\link{makeFilterWrapper}}
}
|
/man/filterFeatures.Rd
|
no_license
|
elephann/mlr
|
R
| false
| false
| 1,739
|
rd
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/filterFeatures.R
\name{filterFeatures}
\alias{filterFeatures}
\title{Filter features by thresholding filter values.}
\usage{
filterFeatures(task, method = "rf.importance", fval = NULL, perc = NULL,
abs = NULL, threshold = NULL, mandatory.feat = NULL, ...)
}
\arguments{
\item{task}{[\code{\link{Task}}]\cr
The task.}
\item{method}{[\code{character(1)}]\cr
See \code{\link{listFilterMethods}}.
Default is \dQuote{rf.importance}.}
\item{fval}{[\code{\link{FilterValues}}]\cr
Result of \code{\link{getFilterValues}}.
If you pass this, the filter values in the object are used for feature filtering.
\code{method} and \code{...} are ignored then.
Default is \code{NULL} and not used.}
\item{perc}{[\code{numeric(1)}]\cr
If set, select \code{perc}*100 top scoring features.
Mutually exclusive with arguments \code{abs} and \code{threshold}.}
\item{abs}{[\code{numeric(1)}]\cr
If set, select \code{abs} top scoring features.
Mutually exclusive with arguments \code{perc} and \code{threshold}.}
\item{threshold}{[\code{numeric(1)}]\cr
If set, select features whose score exceeds \code{threshold}.
Mutually exclusive with arguments \code{perc} and \code{abs}.}
\item{mandatory.feat}{[\code{character}]\cr
Mandatory features which are always included regardless of their scores}
\item{...}{[any]\cr
Passed down to selected filter method.}
}
\value{
[\code{\link{Task}}].
}
\description{
First, calls \code{\link{getFilterValues}}.
Features are then selected via \code{select} and \code{val}.
}
\seealso{
Other filter: \code{\link{FilterValues}};
\code{\link{getFilterValues}};
\code{\link{getFilteredFeatures}};
\code{\link{makeFilterWrapper}}
}
|
#' Report function
#'
#' \code{report} is a general function that returns Markdown code of a statistical test in 6th edition APA style.
#'
#' @param results A tidy stats list.
#' @param identifier A character string identifying the model.
#' @param group A character string identifying the group.
#' @param term A character string indicating which term you want to report the statistics of.
#' @param term_nr A number indicating which term you want to report the the statistics of.
#' @param statistic A character string of a statistic you want to extract from a model.
#' @param var A character string identifying the variable.
#'
#' @details \code{report} calls a specific report function dependent on the type of statistical test that is supplied. The 'method' column of the statistical test is used to determine which report function to run.
#'
#' @examples
#' # Read in a list of results
#' results <- read_stats(system.file("results.csv", package = "tidystats"))
#'
#' # Set the list as the default list
#' options(tidystats_list = results)
#'
#' # Example: t-test
#' report("t_test_one_sample")
#' report("t_test_welch")
#'
#' # Example: correlation
#' report("correlation_pearson")
#' report("correlation_spearman")
#'
#' # Example: ANOVA
#' report("aov_two_way", term = "condition")
#' report("aov_two_way", term = "sex")
#'
#' # Example: Linear models
#' report("lm_simple", term = "conditionmortality salience")
#' report("lm_simple", term_nr = 2)
#' report("lm_simple", group = "model")
#'
#' @export
report <- function(identifier, term = NULL, term_nr = NULL, var = NULL,
group = NULL, statistic = NULL, results = getOption("tidystats_list")) {
# Check whether the results list is provided
if (is.null(results)) {
stop("No results found; did you specify a results list?")
}
# Check whether the identifier exists, otherwise extract it
if (!identifier %in% names(results)) {
stop("Identifier not found.")
} else {
res <- results[[identifier]]
}
output <- NULL
# Check whether a single statistic is requested, or a full line of output
if (is.null(statistic)) {
# Run the appropriate reporting function based on the method information
if ("method" %in% names(res)) {
method <- res$method[1]
if (stringr::str_detect(method, "t-test")) {
output <- report_t_test(identifier, results = results)
} else if (stringr::str_detect(method, "Chi-squared")) {
output <- report_chi_squared(identifier, results = results)
} else if (stringr::str_detect(method, "Wilcoxon")) {
output <- report_wilcoxon(identifier, results = results)
} else if (stringr::str_detect(method, "Fisher")) {
output <- report_fisher(identifier, results = results)
} else if (stringr::str_detect(method, "correlation")) {
output <- report_correlation(identifier, term, term_nr,
results = results)
# Check for GLMs before checking for LMs
} else if (stringr::str_detect(method, "Generalized linear model")) {
output <- report_glm(identifier, group, term, term_nr,
results = results)
} else if (stringr::str_detect(method, "(L|l)inear model")) {
output <- report_lm(identifier, group, term, term_nr, results = results)
} else if (stringr::str_detect(method, "(L|l)inear mixed model")) {
output <- report_lmm(identifier, group, term, term_nr,
results = results)
} else if (stringr::str_detect(method, "ANOVA|ANCOVA")) {
output <- report_anova(identifier, group, term, term_nr,
results = results)
}
}
}
# If output is null, this is either because we do not yet have support for the
# method, or a single statistic is requested. In thcan report a single statistic if enough information is provided.
if (is.null(output)) {
# Filter: term
if (!is.null(term)) {
res_term <- term
if (!res_term %in% unique(res$term)) {
stop("Term not found.")
}
res <- dplyr::filter(res, term == res_term)
}
# Filter: term_nr
if (!is.null(term_nr)) {
res_term_nr <- term_nr
if (!res_term_nr %in% unique(res$term_nr)) {
stop("Term number not found.")
}
res <- dplyr::filter(res, term_nr == res_term_nr)
}
# Filter: statistic
if (!is.null(statistic)) {
res_statistic <- statistic
if (!res_statistic %in% unique(res$statistic)) {
stop("Statistic not found.")
}
res <- dplyr::filter(res, statistic == res_statistic)
}
# Filter: group
if (!is.null(group)) {
res_group <- group
if (!res_group %in% unique(res$group)) {
stop("Group not found.")
}
res <- dplyr::filter(res, group == res_group)
}
# Filter: var
if (!is.null(var)) {
res_var <- var
if (!res_var %in% unique(res$var)) {
stop("Variable not found.")
}
res <- dplyr::filter(res, var == res_var)
}
# Check if enough information is provided
info <- dplyr::select(res, contains("var"), contains("group"),
contains("statistic"), contains("term"))
for (column in names(info)) {
if (length(unique(dplyr::pull(info, column))) > 1) {
stop(paste("Not enough information provided. Please provide", column,
"information."))
}
}
# Extract statistic
if (res$statistic[1] == "p") {
output <- report_p_value(res$value[1])
} else {
# Check if the value is an integer, else return a string with 2
# significant digits
if (res$value[1] %% 1 == 0) {
output <- prettyNum(res$value[1])
} else {
output <- format(res$value[1], digits = 2, nsmall = 2)
}
}
}
return(output)
}
|
/R/report.R
|
permissive
|
ikbentimkramer/tidystats-v0.3
|
R
| false
| false
| 5,762
|
r
|
#' Report function
#'
#' \code{report} is a general function that returns Markdown code of a statistical test in 6th edition APA style.
#'
#' @param results A tidy stats list.
#' @param identifier A character string identifying the model.
#' @param group A character string identifying the group.
#' @param term A character string indicating which term you want to report the statistics of.
#' @param term_nr A number indicating which term you want to report the the statistics of.
#' @param statistic A character string of a statistic you want to extract from a model.
#' @param var A character string identifying the variable.
#'
#' @details \code{report} calls a specific report function dependent on the type of statistical test that is supplied. The 'method' column of the statistical test is used to determine which report function to run.
#'
#' @examples
#' # Read in a list of results
#' results <- read_stats(system.file("results.csv", package = "tidystats"))
#'
#' # Set the list as the default list
#' options(tidystats_list = results)
#'
#' # Example: t-test
#' report("t_test_one_sample")
#' report("t_test_welch")
#'
#' # Example: correlation
#' report("correlation_pearson")
#' report("correlation_spearman")
#'
#' # Example: ANOVA
#' report("aov_two_way", term = "condition")
#' report("aov_two_way", term = "sex")
#'
#' # Example: Linear models
#' report("lm_simple", term = "conditionmortality salience")
#' report("lm_simple", term_nr = 2)
#' report("lm_simple", group = "model")
#'
#' @export
report <- function(identifier, term = NULL, term_nr = NULL, var = NULL,
group = NULL, statistic = NULL, results = getOption("tidystats_list")) {
# Check whether the results list is provided
if (is.null(results)) {
stop("No results found; did you specify a results list?")
}
# Check whether the identifier exists, otherwise extract it
if (!identifier %in% names(results)) {
stop("Identifier not found.")
} else {
res <- results[[identifier]]
}
output <- NULL
# Check whether a single statistic is requested, or a full line of output
if (is.null(statistic)) {
# Run the appropriate reporting function based on the method information
if ("method" %in% names(res)) {
method <- res$method[1]
if (stringr::str_detect(method, "t-test")) {
output <- report_t_test(identifier, results = results)
} else if (stringr::str_detect(method, "Chi-squared")) {
output <- report_chi_squared(identifier, results = results)
} else if (stringr::str_detect(method, "Wilcoxon")) {
output <- report_wilcoxon(identifier, results = results)
} else if (stringr::str_detect(method, "Fisher")) {
output <- report_fisher(identifier, results = results)
} else if (stringr::str_detect(method, "correlation")) {
output <- report_correlation(identifier, term, term_nr,
results = results)
# Check for GLMs before checking for LMs
} else if (stringr::str_detect(method, "Generalized linear model")) {
output <- report_glm(identifier, group, term, term_nr,
results = results)
} else if (stringr::str_detect(method, "(L|l)inear model")) {
output <- report_lm(identifier, group, term, term_nr, results = results)
} else if (stringr::str_detect(method, "(L|l)inear mixed model")) {
output <- report_lmm(identifier, group, term, term_nr,
results = results)
} else if (stringr::str_detect(method, "ANOVA|ANCOVA")) {
output <- report_anova(identifier, group, term, term_nr,
results = results)
}
}
}
# If output is null, this is either because we do not yet have support for the
# method, or a single statistic is requested. In thcan report a single statistic if enough information is provided.
if (is.null(output)) {
# Filter: term
if (!is.null(term)) {
res_term <- term
if (!res_term %in% unique(res$term)) {
stop("Term not found.")
}
res <- dplyr::filter(res, term == res_term)
}
# Filter: term_nr
if (!is.null(term_nr)) {
res_term_nr <- term_nr
if (!res_term_nr %in% unique(res$term_nr)) {
stop("Term number not found.")
}
res <- dplyr::filter(res, term_nr == res_term_nr)
}
# Filter: statistic
if (!is.null(statistic)) {
res_statistic <- statistic
if (!res_statistic %in% unique(res$statistic)) {
stop("Statistic not found.")
}
res <- dplyr::filter(res, statistic == res_statistic)
}
# Filter: group
if (!is.null(group)) {
res_group <- group
if (!res_group %in% unique(res$group)) {
stop("Group not found.")
}
res <- dplyr::filter(res, group == res_group)
}
# Filter: var
if (!is.null(var)) {
res_var <- var
if (!res_var %in% unique(res$var)) {
stop("Variable not found.")
}
res <- dplyr::filter(res, var == res_var)
}
# Check if enough information is provided
info <- dplyr::select(res, contains("var"), contains("group"),
contains("statistic"), contains("term"))
for (column in names(info)) {
if (length(unique(dplyr::pull(info, column))) > 1) {
stop(paste("Not enough information provided. Please provide", column,
"information."))
}
}
# Extract statistic
if (res$statistic[1] == "p") {
output <- report_p_value(res$value[1])
} else {
# Check if the value is an integer, else return a string with 2
# significant digits
if (res$value[1] %% 1 == 0) {
output <- prettyNum(res$value[1])
} else {
output <- format(res$value[1], digits = 2, nsmall = 2)
}
}
}
return(output)
}
|
library(ggthemes)
team_theme <- function() {list(
theme(axis.line = element_line(color = "black"),
text = element_text(size = 8, family = "Times"),
panel.background = element_rect(fill = 'white', colour = 'black'),
panel.grid.minor = element_blank(),
panel.grid.major = element_blank(),
plot.title = element_text(colour = "black", size = 14, hjust = 0.5),
legend.text = element_text(size = 12, family = "Times")),
scale_colour_colorblind())
}
|
/R/graph_theme.R
|
permissive
|
UofTCoders/eeb430.2017.Python
|
R
| false
| false
| 499
|
r
|
library(ggthemes)
team_theme <- function() {list(
theme(axis.line = element_line(color = "black"),
text = element_text(size = 8, family = "Times"),
panel.background = element_rect(fill = 'white', colour = 'black'),
panel.grid.minor = element_blank(),
panel.grid.major = element_blank(),
plot.title = element_text(colour = "black", size = 14, hjust = 0.5),
legend.text = element_text(size = 12, family = "Times")),
scale_colour_colorblind())
}
|
defaultdir <- getwd()
if (!file.exists("data")){
dir.create("data")
}
setwd("data")
if (!file.exists("household_power_consumption.txt")){
fileUrl <-"https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
download.file(fileUrl,"temp.zip")
unzip("temp.zip", "household_power_consumption.txt")
}
#memory required = no. of column * no. of rows * 8 bytes/numeric
#The dataset has 2,075,259 rows and 9 columns
paste("Memory required is: ",2075259*9*8,"bytes")
my_data <- read.table("household_power_consumption.txt", header=TRUE,sep=";",comment.char = "",na.strings="?",
colClasses = c("character", "character", "numeric", "numeric", "numeric", "numeric", "numeric", "numeric", "numeric" ))
x <- subset(my_data,my_data$Date=="1/2/2007"|my_data$Date=="2/2/2007")
x$datetime <- as.Date(x$Date, "%d/%m/%Y")
x$datetime <- as.POSIXlt(paste(x$datetime,x$Time))
setwd(defaultdir)
png(filename = "plot3.png",width = 480, height = 480)
with(x,plot(datetime,Sub_metering_1,type="l",xlab="",ylab ="Energy sub metering"))
with(x,points(datetime,Sub_metering_2,type="l",col="red"))
with(x,points(datetime,Sub_metering_3,type="l",col="blue"))
legend('topright', c("Sub_metering_1","Sub_metering_2","Sub_metering_3"),lty=1, col=c('black','red', 'blue'))
dev.off()
|
/plot3.R
|
no_license
|
VJ911/ExData_Plotting1
|
R
| false
| false
| 1,318
|
r
|
defaultdir <- getwd()
if (!file.exists("data")){
dir.create("data")
}
setwd("data")
if (!file.exists("household_power_consumption.txt")){
fileUrl <-"https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
download.file(fileUrl,"temp.zip")
unzip("temp.zip", "household_power_consumption.txt")
}
#memory required = no. of column * no. of rows * 8 bytes/numeric
#The dataset has 2,075,259 rows and 9 columns
paste("Memory required is: ",2075259*9*8,"bytes")
my_data <- read.table("household_power_consumption.txt", header=TRUE,sep=";",comment.char = "",na.strings="?",
colClasses = c("character", "character", "numeric", "numeric", "numeric", "numeric", "numeric", "numeric", "numeric" ))
x <- subset(my_data,my_data$Date=="1/2/2007"|my_data$Date=="2/2/2007")
x$datetime <- as.Date(x$Date, "%d/%m/%Y")
x$datetime <- as.POSIXlt(paste(x$datetime,x$Time))
setwd(defaultdir)
png(filename = "plot3.png",width = 480, height = 480)
with(x,plot(datetime,Sub_metering_1,type="l",xlab="",ylab ="Energy sub metering"))
with(x,points(datetime,Sub_metering_2,type="l",col="red"))
with(x,points(datetime,Sub_metering_3,type="l",col="blue"))
legend('topright', c("Sub_metering_1","Sub_metering_2","Sub_metering_3"),lty=1, col=c('black','red', 'blue'))
dev.off()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/s4_architecture.R
\name{plotSF}
\alias{plotSF}
\title{Method for plotting the Survival Function of a Curve object}
\usage{
plotSF(theObject, ...)
}
\arguments{
\item{theObject}{The name of the RCurve Object}
\item{...}{Pass-through arguments}
}
\description{
This plots a Curve Survival Function
}
\examples{
plotSF(Weibull(100,1))
plotSF(Weibull(100,1),xlab="Test x label",maxT=60)
plotSF(Weibull(80,0.8),overlay=TRUE,col=2,lty=2)
}
|
/man/plotSF.Rd
|
no_license
|
cran/gestate
|
R
| false
| true
| 513
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/s4_architecture.R
\name{plotSF}
\alias{plotSF}
\title{Method for plotting the Survival Function of a Curve object}
\usage{
plotSF(theObject, ...)
}
\arguments{
\item{theObject}{The name of the RCurve Object}
\item{...}{Pass-through arguments}
}
\description{
This plots a Curve Survival Function
}
\examples{
plotSF(Weibull(100,1))
plotSF(Weibull(100,1),xlab="Test x label",maxT=60)
plotSF(Weibull(80,0.8),overlay=TRUE,col=2,lty=2)
}
|
fig_width <- 15
fig_height <- 10
pdf("R/hs_fig.pdf", width = fig_width, height = fig_height)
source("R/hs_fig.R")
dev.off()
svg("R/hs_fig.svg", width = fig_width, height = fig_height)
source("R/hs_fig.R")
dev.off()
|
/R/hs_fig-make-svg-pdf.R
|
permissive
|
fboehm/QTLfigs
|
R
| false
| false
| 218
|
r
|
fig_width <- 15
fig_height <- 10
pdf("R/hs_fig.pdf", width = fig_width, height = fig_height)
source("R/hs_fig.R")
dev.off()
svg("R/hs_fig.svg", width = fig_width, height = fig_height)
source("R/hs_fig.R")
dev.off()
|
# Coursera JHU Exploratory Data Analysis
# Course Assignment 1. Plot 3.
library(dplyr)
source("common.R") # Code for loading data is factored out to common.R
# Running
# Note: Set your working directory accordingly.
# Step 1. Source this file. ie. source("plot3.R")
# Step 2. Load the data via function LoadData(<path.to.data.file>)
# Step 3. Generate graph via function DoPlot3(pcdata)
# Example:
# > source("plot3.R")
# > pcdata <- LoadData(file.path("data", "household_power_consumption.txt"))
# > DoPlot3(pcdata)
DoPlot3 <- function(pcdata) {
png(file="plot3.png")
plot(x=pcdata$Date.Time, y=pcdata$Sub_metering_1,
main = "", xlab = "",
ylab = "Energy sub metering", type="l")
lines(x=pcdata$Date.Time, y=pcdata$Sub_metering_2, col="red")
lines(x=pcdata$Date.Time, y=pcdata$Sub_metering_3, col="blue")
legend("topright", lwd=2, col = c("black", "red", "blue"),
legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
dev.off()
}
|
/plot3.R
|
no_license
|
subwarp/ExData_Plotting1
|
R
| false
| false
| 990
|
r
|
# Coursera JHU Exploratory Data Analysis
# Course Assignment 1. Plot 3.
library(dplyr)
source("common.R") # Code for loading data is factored out to common.R
# Running
# Note: Set your working directory accordingly.
# Step 1. Source this file. ie. source("plot3.R")
# Step 2. Load the data via function LoadData(<path.to.data.file>)
# Step 3. Generate graph via function DoPlot3(pcdata)
# Example:
# > source("plot3.R")
# > pcdata <- LoadData(file.path("data", "household_power_consumption.txt"))
# > DoPlot3(pcdata)
DoPlot3 <- function(pcdata) {
png(file="plot3.png")
plot(x=pcdata$Date.Time, y=pcdata$Sub_metering_1,
main = "", xlab = "",
ylab = "Energy sub metering", type="l")
lines(x=pcdata$Date.Time, y=pcdata$Sub_metering_2, col="red")
lines(x=pcdata$Date.Time, y=pcdata$Sub_metering_3, col="blue")
legend("topright", lwd=2, col = c("black", "red", "blue"),
legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
dev.off()
}
|
library(MCI)
### Name: huff.shares
### Title: Huff model market share/market area simulations
### Aliases: huff.shares
### ** Examples
data(Freiburg1)
data(Freiburg2)
# Loads the data
huff.shares (Freiburg1, "district", "store", "salesarea", "distance")
# Standard weighting (power function with gamma=1 and lambda=-2)
|
/data/genthat_extracted_code/MCI/examples/huff.shares.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false
| false
| 327
|
r
|
library(MCI)
### Name: huff.shares
### Title: Huff model market share/market area simulations
### Aliases: huff.shares
### ** Examples
data(Freiburg1)
data(Freiburg2)
# Loads the data
huff.shares (Freiburg1, "district", "store", "salesarea", "distance")
# Standard weighting (power function with gamma=1 and lambda=-2)
|
> library(datasets); attach(anscombe)
#cos'hanno in comune questi plot??
> par(mfrow=c(2,2)); plot(y1); plot(y2); plot(y3); plot(y4)
#calcolandone le medie...
> apply(cbind(y1,y2,y3,y4),2,mean)
#risultano essere pressoch\`{e} uguali (alla seconda cifra decimale)
> plot(sort(y1)); plot(sort(y2)); plot(sort(y3)); plot(sort(y4))
#sebbene le forme distribuzionali siano ben diverse
|
/snippets/anscombe.R
|
no_license
|
SunnyWangECNU/StatisticaDoc
|
R
| false
| false
| 382
|
r
|
> library(datasets); attach(anscombe)
#cos'hanno in comune questi plot??
> par(mfrow=c(2,2)); plot(y1); plot(y2); plot(y3); plot(y4)
#calcolandone le medie...
> apply(cbind(y1,y2,y3,y4),2,mean)
#risultano essere pressoch\`{e} uguali (alla seconda cifra decimale)
> plot(sort(y1)); plot(sort(y2)); plot(sort(y3)); plot(sort(y4))
#sebbene le forme distribuzionali siano ben diverse
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/hello.R
\name{get_last_sunday}
\alias{get_last_sunday}
\title{Return the date for last Sunday}
\usage{
get_last_sunday(now = Sys.Date())
}
\arguments{
\item{now}{Today's date in ymd format. Defaults to the output of \code{Sys.Date()}.}
}
\value{
Last Sunday's date in POSIXct format
}
\description{
Return the date for last Sunday
}
\examples{
get_last_sunday()
get_last_sunday("2014-02-13")
}
|
/scheduler/man/get_last_sunday.Rd
|
no_license
|
finchnSNPs/InspirationDisseminationSchedule
|
R
| false
| true
| 473
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/hello.R
\name{get_last_sunday}
\alias{get_last_sunday}
\title{Return the date for last Sunday}
\usage{
get_last_sunday(now = Sys.Date())
}
\arguments{
\item{now}{Today's date in ymd format. Defaults to the output of \code{Sys.Date()}.}
}
\value{
Last Sunday's date in POSIXct format
}
\description{
Return the date for last Sunday
}
\examples{
get_last_sunday()
get_last_sunday("2014-02-13")
}
|
##Example for Programming Assignment 2: Lexical Scoping
cachemean <- function(x, ...) {
m <- x$getmean()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
data <- x$get()
m <- mean(data, ...)
x$setmean(m)
m
}
|
/cachemean.R
|
no_license
|
kllontop/ProgrammingAssignment2
|
R
| false
| false
| 307
|
r
|
##Example for Programming Assignment 2: Lexical Scoping
cachemean <- function(x, ...) {
m <- x$getmean()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
data <- x$get()
m <- mean(data, ...)
x$setmean(m)
m
}
|
setwd("D://R//job Assign//install//data_for_question_uninstall//interview")
getwd()
# Library
library(lattice)
library(plyr)
library(dplyr)
library(ggplot2)#
library(readr)
library(lubridate)
library(data.table)
library(lattice)
library(ggplot2)
library(reshape)
library(scales)#
library(data.table)#
library(lubridate)
library(readr)
# change date to date format
# coltypes <- list(event_timestamp= col_datetime("%d-%m-%Y %H:%M:%S"),properties.timeStamp=col_datetime("%d-%m-%Y %H:%M:%S"))
# Read A file
# file<-read_csv("Formatted Data.csv", col_types=coltypes)
# saveRDS(file,"rowfile.rds")
eventlogfile<-readRDS("rowfile.rds")
# Reading second file
# coltypes <- list(creation_date= col_datetime("%Y-%m-%d %H:%M:%S"))
#
# uifile<-read_csv("uicycles.csv", col_types=coltypes)
# saveRDS( uifile,"uifile.rds")
# uifile<-readRDS("uifile.rds")
# Year- 2017 Month - 1-12 Day- 0-31 Hour- 0-23 Minute- 0-59 Second- 0-59 dayDate - DATE
# DayParting Day time divided into interval of three hours.
# Late Night - 0:3
# Early Morning - 3:6
# Morning - 6:12
# AfterNoon - 12:15
# Evening - 15:18
# Night - 18:21
# Mid Night - 21:24
# DayOfWeek: week days
# check user unique user info in both file
str(unique(uifile$uuid)) #118242
str(unique(eventlogfile$uuid)) #72805
#### Transform
uifile <-
uifile %>%
mutate(uuid =factor(uuid),
os =factor(os),
event_type =factor(event_type),
uiYear = factor(year(creation_date), levels=2015:2017),
uiMonth = factor(month(creation_date), levels=1:12),
uiDay = day(creation_date),
uiHour = factor(hour(creation_date), levels=0:23),
uiMinute = factor(minute(creation_date), levels=0:59),
uiSecond = factor(second(creation_date), levels=0:59),
uidayDate = as.POSIXct(round(creation_date, units = "days")),
uiDayOfWeek = weekdays(as.Date(uidayDate)),
uiDayParting = cut(as.numeric(as.character(factor(hour(creation_date), levels=0:23))),
breaks = c(0 , 3, 6, 9,12, 15, 18, 21, 24),
labels = c("Late Night", "Early Morning","Morning","Late Morning", "AfterNoon", "Evening", "Night","Mid Night"),
right = FALSE)
)
saveRDS( uifile,"uifile.rds")
eventlogfile <-
eventlogfile %>%
mutate(uuid =factor(uuid),
event =factor(event),
elfYear = factor(year(event_timestamp), levels=2015:2017),
elfMonth = factor(month(event_timestamp), levels=1:12),
elfDay = day(event_timestamp),
elfHour = factor(hour(event_timestamp), levels=0:23),
elfMinute = factor(minute(event_timestamp), levels=0:59),
elfSecond = factor(second(event_timestamp), levels=0:59),
elfdayDate = as.POSIXct(round(event_timestamp, units = "days")),
elfDayOfWeek = weekdays(as.Date(elfdayDate)),
elfDayParting = cut(as.numeric(as.character(factor(hour(event_timestamp), levels=0:23))),
breaks = c(0 , 3, 6,9, 12, 15, 18, 21, 24),
labels = c("Late Night", "Early Morning","Morning","Late Morning", "AfterNoon", "Evening", "Night","Mid Night"),
right = FALSE)
)
# saveRDS(eventlogfile,"rowfile.rds")
##################
eventlogfile<-readRDS("rowfile.rds")
uifile<-readRDS("uifile.rds")
####################################################################
# Customer retention trends
# Stack bar Graph
retentionInstall<-data.table(uifile)
retentionInstall<- retentionInstall[ , list(ActivityCount = sum(.N)), by = list(event_type,uidayDate)]
# Stack bar graph of daily event install, reinstall, uninstall
ggplot(retentionInstall, aes(x = uidayDate, y = ActivityCount, fill = event_type)) +
geom_bar(stat = "identity")+
#scale_x_date(date_breaks="1 week", date_minor_breaks = "1 day",date_labels = "%d-%b") +
scale_y_continuous(label=comma)+
xlab(" Date") +
ylab("Activity Count") +
ggtitle("Daily Activity Chart ")
# Time series of event type
ggplot(data=retentionInstall, aes(x = uidayDate, y = ActivityCount, group = event_type, colour = event_type)) +
geom_line() +
geom_point( size=4)+
xlab(" Date") +
ylab("Activity Count") +
ggtitle("Daily Activity Time Series ")
# Customer retention trends from their install-uninstall cycles
# [frequency chart or histogram plot]
dt<-data.table(uifile)
# using setkey or setkeyv to set the key
setkeyv(dt, c('uidayDate','event_type'))
# converting date to week number to find weekly activity
dt$weekNumber<-week(dt$uidayDate)
dt<-transform(dt, weekNumber = factor(weekNumber,
levels = c("31", "32", "33", "34", "35","36"),
labels = c("1 FirstWeek", "2 Second Week", "3 Third Week",
"4 Forth Week", "5 Fifth Week", "6 Sixth Week")))
weeklyFile<-dt[, .(NumberofDistinctCoustomer = length(unique(uuid))), by =list(weekNumber,event_type)]
# Weekly Activity
ggplot(weeklyFile, aes(x = weekNumber, y = NumberofDistinctCoustomer, fill = event_type)) +
geom_bar(stat = "identity")+
scale_y_continuous(label=comma)+
xlab(" Weeks") +
ylab("Activity Count") +
ggtitle("Weekly Activity ")
# line graph
ggplot(data=weeklyFile, aes(x=weekNumber, y=NumberofDistinctCoustomer, group = event_type, colour = event_type)) +
geom_line() +
geom_point( size=4)+
xlab(" Weeks") +
ylab("Activity Count") +
ggtitle("Weekly Activity ")
# Transpose to colomn install, reinstall, uninstall
weeklyFile<-cast(weeklyFile,weekNumber~event_type)
# Add re-install install
weeklyFile$Install_ReInstall<-rowSums(weeklyFile[,2:3])
# Summary report
View(weeklyFile)
rm(dt,weeklyFile,retentionInstall)
###################################################################
## Find out the time of day when the customers are most active in the day
## [use your own discretion for time of day bucketing
# Year- 2017 Month - 1-12 Day- 0-31 Hour- 0-23
# Minute- 0-59 Second- 0-59 dayDate - DATE
# DayParting Day time divided into interval of three hours.
# Late Night - 0:3
# Early Morning - 3:6
# Morning - 6:12
# AfterNoon - 12:15
# Evening - 15:18
# Night - 18:21
# Mid Night - 21:24
# DayOfWeek: week days
# - Find out the time of day when the customers are most active in the day
# [use your own discretion for time of day bucketing]
# Customer Activity on the basis of day parting
eventlogfile <-
eventlogfile %>%
mutate(elfDayParting = cut(as.numeric(as.character(factor(hour(event_timestamp), levels=0:23))),
breaks = c(0 , 3, 6, 9,12, 15, 18, 21, 24),
labels = c("Late Night", "Early Morning","Morning","Late Morning", "AfterNoon", "Evening", "Night","Mid Night"),
right = FALSE))
ggplot(
data = eventlogfile,
aes(x = elfDayParting)) +
geom_bar(fill="light blue") +
xlab(" Day Parting") +
ylab("User Activity Count") +
scale_y_continuous(label=comma)+
coord_flip() +
ggtitle("Dayparting ~ User Activity ")
# Purchase value buckets
# [find purchase events from event logs and parse the 'properties' column to get
# money/cart value associated and generate a simple bucketed frequency chart/histogram plot]
# convert to numeric
eventlogfile$`properties.Cart Value`<-as.numeric(eventlogfile$`properties.Cart Value`)
# Histogram and frequency chart
ggplot(eventlogfile, aes(`properties.Cart Value`)) + # removing na values
geom_histogram(binwidth = 500,fill="sky blue")+
geom_freqpoly(binwidth =500, color = "dark blue")+
xlab(" Cart Value( in Rupee)") +
ylab("No of users") +
ggtitle("Cart Value Histogram ")
#custom width
temp<-as.numeric(eventlogfile$`properties.Cart Value`)
temp<-as.data.frame(temp)
names(temp)<-c("properties.Cart Value")
temp<-na.omit(temp) # removed the NA value
temp <-
temp %>%
mutate(customBin=cut( `properties.Cart Value`,
breaks = c(0 , 1000, 2000, 4000, 8000, 16000, 32000, 64000),
labels = c("0-1000", "1000-2000","2000-4000", "4000-8000", "8000-16000",
"16000-32000","32000-64000"),
right = FALSE)
)
ggplot(temp, aes(customBin)) + # removing na values
geom_histogram(stat = "count",fill="sky blue")+
xlab(" Cart Value( in Rupee)") +
ylab("No of Users") +
ggtitle("Cart Value Histogram ")
# remove temp
rm(temp)
# Filter Checkout activity
checkoutevent<- c("Checkout is completed by PG",
"Checkout is completed by null",
"Checkout is completed by Credit Cards / Debit Cards / Net Banking",
"Checkout is completed by Cash On Delivery","Guest checkout initiated",
"Checkout is completed by Paid using zCoins")
# filtered value has empty cart value
checkoutFilter<-filter(eventlogfile, grepl(paste(checkoutevent, collapse="|"), event))
## this have no information about cart value
# Histogram and frequency chart
# ggplot(checkoutFilter, aes(`properties.Cart Value`)) + # removing na values
# geom_histogram(binwidth = 500,fill="sky blue")+
# geom_freqpoly(binwidth =500, color = "dark blue")+
# xlab(" Cart Value( in Rupee)") +
# ylab("Count") +
# ggtitle("Cart Value Histogram ")
rm(checkoutevent,checkoutFilter)
## Behavior of purchasing and non-purchasing customers
##[something along the lines of their in-app event path in a given install-uninstall cycle]
## User file
PNCustomer<-uifile
# order by creation date
PNCustomer<-PNCustomer %>%
group_by(uuid) %>%
arrange(creation_date)
## Create summary report of activity
PNCustomer<-PNCustomer %>%
group_by(uuid) %>%
summarise(eventSeries = paste(event_type, collapse = ", ")
,eventdate = paste(as.character(creation_date), collapse = ", "))
View(PNCustomer)
# table have user information on his activity and frequecy count
PNCustomertable<-(table(PNCustomer$eventSeries))
PNCustomertable<-as.data.frame(PNCustomertable)
# file has information about diffrent pattern of install reinstall uninstall cycle
View(PNCustomertable)
## Graph for coustomer cycle of install and uninstall
GraphPNCustomertable<-PNCustomertable
GraphPNCustomertable$CyclicEvent<-paste(sub('\\s*,.*', '', as.character(GraphPNCustomertable$Var1))
,sub('.*,\\s*', '', as.character(GraphPNCustomertable$Var1)), sep = ",")
GraphPNCustomertable<-data.table(GraphPNCustomertable)
GraphPNCustomertable<-
GraphPNCustomertable[ , list(ActivityCount = sum(as.numeric(Freq))), by = CyclicEvent]
GraphPNCustomertable<-GraphPNCustomertable[order(ActivityCount),]
#GraphPNCustomertable: Similar Activity merge into install reinstall uninstall cycle by just keeping first and last activity.
View(GraphPNCustomertable)
# Bar graph coustomer cycle of install and uninstall
ggplot(GraphPNCustomertable, aes(x = CyclicEvent, y = ActivityCount)) +
geom_bar(stat = "identity")+
scale_y_continuous(label=comma)+
coord_flip()+
xlab(" Cyclic Event") +
ylab("Customer Activity Count") +
ggtitle("Customer cycle of install and uninstall ")
#rm(PNCustomertable,GraphPNCustomertable)
# on Event file
efltemp<-eventlogfile[,1:3]
## removing uio_push_token
efltemp<-efltemp[- grep("uio_push_token", efltemp$event),]
# order by event_timestamp
efltemp<-efltemp %>%
group_by(uuid) %>%
arrange(event_timestamp)
## Create summary report of activity
efltemp<-efltemp %>%
group_by(uuid) %>%
summarise(eventSeries = paste(event, collapse = ", ")
,eventdate = paste(as.character(event_timestamp), collapse = ", "))
# file has information about diffrent Activity of customer
View(efltemp)
# join data have information of all the activity he has done over a period of time and
# install reinstall uninstall cycle.
CombineActivityInfo<-full_join(efltemp, PNCustomer,by = "uuid")
View(CombineActivityInfo)
# How are their purchases distributed post install?
# Coustome behaviour
# uuid
# first Activity
# last activity
# number of order
# total amount of purchase
# use data table for aggregation because of large data
dt<-data.table(eventlogfile)
a<- dt[ , list(FirstActivity = min(event_timestamp)), by = uuid]
b<- dt[ , list(LastActivity = max(event_timestamp)), by = uuid]
c<- dt[ , list(TotalCartValue = sum(`properties.Cart Value`,na.rm = TRUE)), by = uuid]
d<- dt[ , list(productCount = sum(as.numeric(`properties.No Of Products`),na.rm = TRUE)), by = uuid]
data_raw <- cbind(a,b,c,d)
# Customer Activity Summary
data_raw <-data_raw[,c(1,2,4,6,8)]
# ProductCount ~ TotalCartValue
ggplot(data_raw,aes(productCount,TotalCartValue))+
geom_point(color="blue")+
geom_smooth(method=lm)+
scale_y_continuous(label=comma)+
xlab(" Product purchase Count") +
ylab("Total Amount Spend") +
ggtitle("User Purchase Summary ")
# ProductCount ~ TotalNSCartValue -- droping this information as this is not mactching with product count
# ggplot(data_raw,aes(productCount,TotalNSCartValue))+
# geom_point(color="blue")+
# geom_smooth(method=lm)+
# scale_y_continuous(label=comma)+
# xlab(" Product purchase Count") +
# ylab("Total Amount Spend") +
# ggtitle("User Purchase Summary ")
data_raw <-data_raw[,c(1,2,3,4,6)]
# Coustome behaviour from his activity (Event log file)
View(data_raw)
rm(data_raw)
# Do they perform purchases in the 2nd,3rd etc weeks post install?
# Filter user who have installed and reinstalled the app.
uisubset<-subset(uifile, event_type == "install" | event_type == "re-install")
uisubset<-as.data.frame(uisubset[,1])
# filter data from event log file
elfsubfile<-eventlogfile[
,c("uuid","event","event_timestamp","properties.No Of Products", "properties.Cart Value","properties.category","properties.ns_cart_amount","elfdayDate")]
## removing uio_push_token
elfsubfile<-elfsubfile[- grep("uio_push_token", elfsubfile$event),]
# converting date to week number to find weekly activity
elfsubfile$weekNumber<-week(elfsubfile$elfdayDate)
elfsubfile<-transform(elfsubfile, weekNumber = factor(weekNumber,
levels = c("34", "35","36"),
labels = c("1 FirstWeek", "2 Second Week","3 Third Week")))
# join with event log to find out user's activity
EventJoinData<-inner_join(x=elfsubfile,y=uisubset,by="uuid")
dt<-data.table(EventJoinData)
# using setkey or setkeyv to set the key
setkeyv(dt, c('uuid', 'weekNumber'))
# self
a<- dt[ , list(FirstActivity = min(event_timestamp)), list(uuid, weekNumber)]
b<- dt[ , list(LastActivity = max(event_timestamp)), list(uuid, weekNumber)]
c<- dt[ , list(TotalCartValue = sum(as.numeric(`properties.Cart.Value`),na.rm = TRUE)), by = list(uuid, weekNumber)]
d<- dt[ , list(TotalCartValue = sum(as.numeric(`properties.ns_cart_amount`),na.rm = TRUE)), by = list(uuid, weekNumber)]
e<- dt[ , list(productCount = sum(as.numeric(`properties.No.Of.Products`),na.rm = TRUE)), by = list(uuid, weekNumber)]
f<- dt[ , list(ActivityCount = sum(.N)), by = list(uuid, weekNumber)]
g<- dt[ , .(eventTrack = paste(event, collapse=",")), by = list(uuid, weekNumber)]
h<- dt[ , .(dayDateTrack = paste(elfdayDate, collapse=",")), by = list(uuid, weekNumber)]
data_raw <- cbind(a,b,c,d,e,f,g,h)
data_raw<-data_raw[, which(duplicated(names(data_raw))) := NULL]
View(data_raw)
rm(a,b,c,d,e,f,g,h)
# - Is there a steady inflow of revenue as a customer's retention increases?
# [growth can decline but is it still a positive gradient?]
# large volume of data handle better in data.table
dt<-data.table(uifile)
# using setkey or setkeyv to set the key
setkeyv(dt, c('uidayDate','event_type'))
# converting date to week number to find weekly activity
dt$weekNumber<-week(dt$uidayDate)
dt<-transform(dt, weekNumber = factor(weekNumber,
levels = c("31", "32", "33", "34", "35","36"),
labels = c("1 FirstWeek", "2 Second Week", "3 Third Week",
"4 Forth Week", "5 Fifth Week", "6 Sixth Week")))
weeklyFile<-dt[, .(NumberofDistinctCoustomer = length(unique(uuid))), by =list(weekNumber,event_type)]
# Weekly Activity
ggplot(weeklyFile, aes(x = weekNumber, y = NumberofDistinctCoustomer, fill = event_type)) +
geom_bar(stat = "identity")+
scale_y_continuous(label=comma)+
xlab(" Week") +
ylab("Customer Activity") +
ggtitle("Weekly Activity ")
# line graph
ggplot(data=weeklyFile, aes(x=weekNumber, y=NumberofDistinctCoustomer, group = event_type, colour = event_type)) +
geom_line() +
geom_point( size=4)+
xlab(" Week") +
ylab("Customer Activity") +
ggtitle("Weekly Activity ")
# Transpose to colomn install, reinstall, uninstall
weeklyFile<-cast(weeklyFile,weekNumber~event_type)
# Add re-install install
weeklyFile$Install_New<-rowSums(weeklyFile[,2:3])
View(weeklyFile)
# rm(checkoutFilter,checkoutevent,data_raw)
# rm(CombineActivityInfo,dt,efltemp,elfsubfile,EventJoinData)
## Customer Age
retention<-uifile[,c(1,3,4)]
retention<-data.table(retention)
retention<-retention[order(uuid,creation_date),]
retentionInstall<-subset(retention, event_type == "install" | event_type == "re-install")
retentionUninstall<-subset(retention, event_type == "uninstall")
a<- retentionInstall[ , list(FirstActivity = min(creation_date)), by=uuid]
b<- retentionUninstall[ , list(LastActivity = max(creation_date)), by=uuid]
retention<-full_join(retentionInstall,retentionUninstall,by="uuid")
names(retention)<-c("uuid","Install","InstallDate" ,"Uninstall","UninstallDate")
retention<-setorder(retention, uuid,InstallDate,UninstallDate)
retention<-retention[3:121723,]
minimum<-min(retention$InstallDate,na.rm = TRUE)
minimum<-as.POSIXct(minimum, "%m-%d-%Y-%X")
maximum<-max(retention$UninstallDate,na.rm = TRUE)
maximum<-as.POSIXct(maximum, "%m-%d-%Y-%X")
# replacing na with lower date and maximum date
retention$InstallDate<- ymd_hms(ifelse(is.na(retention$InstallDate),paste(minimum),paste(retention$InstallDate)))
retention$UninstallDate<- ymd_hms(ifelse(is.na(retention$UninstallDate),paste(maximum),paste(retention$UninstallDate)))
retention$AgeInDay<-as.Date(as.character(retention$UninstallDate), format="%Y-%m-%d %H:%M:%S")-
as.Date(as.character(retention$InstallDate), format="%Y-%m-%d %H:%M:%S")
retention<-retention[,c(1,3,5,6)]
# if Age is -ve it maen he have not uninstalled the app yet
retention$AgeInDay<-ifelse(retention$AgeInDay<0,30,retention$AgeInDay)
# Create box plot of Customer Age
ggplot(
data = retention,
aes(x = AgeInDay, y = AgeInDay)) +
geom_boxplot() +
coord_flip() +
ggtitle("Distribution of Customer Age") +
xlab("") +
ylab("Customer Age(Days)") +
scale_y_continuous(minor_breaks = seq(1, 30, 1))+
theme(
axis.text.y = element_blank(),
axis.ticks.y = element_blank())
summary(retention$AgeInDay)
|
/Company C/Script.R
|
no_license
|
BhanuPratapSinghSikarwar/Assignments
|
R
| false
| false
| 20,019
|
r
|
setwd("D://R//job Assign//install//data_for_question_uninstall//interview")
getwd()
# Library
library(lattice)
library(plyr)
library(dplyr)
library(ggplot2)#
library(readr)
library(lubridate)
library(data.table)
library(lattice)
library(ggplot2)
library(reshape)
library(scales)#
library(data.table)#
library(lubridate)
library(readr)
# change date to date format
# coltypes <- list(event_timestamp= col_datetime("%d-%m-%Y %H:%M:%S"),properties.timeStamp=col_datetime("%d-%m-%Y %H:%M:%S"))
# Read A file
# file<-read_csv("Formatted Data.csv", col_types=coltypes)
# saveRDS(file,"rowfile.rds")
eventlogfile<-readRDS("rowfile.rds")
# Reading second file
# coltypes <- list(creation_date= col_datetime("%Y-%m-%d %H:%M:%S"))
#
# uifile<-read_csv("uicycles.csv", col_types=coltypes)
# saveRDS( uifile,"uifile.rds")
# uifile<-readRDS("uifile.rds")
# Year- 2017 Month - 1-12 Day- 0-31 Hour- 0-23 Minute- 0-59 Second- 0-59 dayDate - DATE
# DayParting Day time divided into interval of three hours.
# Late Night - 0:3
# Early Morning - 3:6
# Morning - 6:12
# AfterNoon - 12:15
# Evening - 15:18
# Night - 18:21
# Mid Night - 21:24
# DayOfWeek: week days
# check user unique user info in both file
str(unique(uifile$uuid)) #118242
str(unique(eventlogfile$uuid)) #72805
#### Transform
uifile <-
uifile %>%
mutate(uuid =factor(uuid),
os =factor(os),
event_type =factor(event_type),
uiYear = factor(year(creation_date), levels=2015:2017),
uiMonth = factor(month(creation_date), levels=1:12),
uiDay = day(creation_date),
uiHour = factor(hour(creation_date), levels=0:23),
uiMinute = factor(minute(creation_date), levels=0:59),
uiSecond = factor(second(creation_date), levels=0:59),
uidayDate = as.POSIXct(round(creation_date, units = "days")),
uiDayOfWeek = weekdays(as.Date(uidayDate)),
uiDayParting = cut(as.numeric(as.character(factor(hour(creation_date), levels=0:23))),
breaks = c(0 , 3, 6, 9,12, 15, 18, 21, 24),
labels = c("Late Night", "Early Morning","Morning","Late Morning", "AfterNoon", "Evening", "Night","Mid Night"),
right = FALSE)
)
saveRDS( uifile,"uifile.rds")
eventlogfile <-
eventlogfile %>%
mutate(uuid =factor(uuid),
event =factor(event),
elfYear = factor(year(event_timestamp), levels=2015:2017),
elfMonth = factor(month(event_timestamp), levels=1:12),
elfDay = day(event_timestamp),
elfHour = factor(hour(event_timestamp), levels=0:23),
elfMinute = factor(minute(event_timestamp), levels=0:59),
elfSecond = factor(second(event_timestamp), levels=0:59),
elfdayDate = as.POSIXct(round(event_timestamp, units = "days")),
elfDayOfWeek = weekdays(as.Date(elfdayDate)),
elfDayParting = cut(as.numeric(as.character(factor(hour(event_timestamp), levels=0:23))),
breaks = c(0 , 3, 6,9, 12, 15, 18, 21, 24),
labels = c("Late Night", "Early Morning","Morning","Late Morning", "AfterNoon", "Evening", "Night","Mid Night"),
right = FALSE)
)
# saveRDS(eventlogfile,"rowfile.rds")
##################
eventlogfile<-readRDS("rowfile.rds")
uifile<-readRDS("uifile.rds")
####################################################################
# Customer retention trends
# Stack bar Graph
retentionInstall<-data.table(uifile)
retentionInstall<- retentionInstall[ , list(ActivityCount = sum(.N)), by = list(event_type,uidayDate)]
# Stack bar graph of daily event install, reinstall, uninstall
ggplot(retentionInstall, aes(x = uidayDate, y = ActivityCount, fill = event_type)) +
geom_bar(stat = "identity")+
#scale_x_date(date_breaks="1 week", date_minor_breaks = "1 day",date_labels = "%d-%b") +
scale_y_continuous(label=comma)+
xlab(" Date") +
ylab("Activity Count") +
ggtitle("Daily Activity Chart ")
# Time series of event type
ggplot(data=retentionInstall, aes(x = uidayDate, y = ActivityCount, group = event_type, colour = event_type)) +
geom_line() +
geom_point( size=4)+
xlab(" Date") +
ylab("Activity Count") +
ggtitle("Daily Activity Time Series ")
# Customer retention trends from their install-uninstall cycles
# [frequency chart or histogram plot]
dt<-data.table(uifile)
# using setkey or setkeyv to set the key
setkeyv(dt, c('uidayDate','event_type'))
# converting date to week number to find weekly activity
dt$weekNumber<-week(dt$uidayDate)
dt<-transform(dt, weekNumber = factor(weekNumber,
levels = c("31", "32", "33", "34", "35","36"),
labels = c("1 FirstWeek", "2 Second Week", "3 Third Week",
"4 Forth Week", "5 Fifth Week", "6 Sixth Week")))
weeklyFile<-dt[, .(NumberofDistinctCoustomer = length(unique(uuid))), by =list(weekNumber,event_type)]
# Weekly Activity
ggplot(weeklyFile, aes(x = weekNumber, y = NumberofDistinctCoustomer, fill = event_type)) +
geom_bar(stat = "identity")+
scale_y_continuous(label=comma)+
xlab(" Weeks") +
ylab("Activity Count") +
ggtitle("Weekly Activity ")
# line graph
ggplot(data=weeklyFile, aes(x=weekNumber, y=NumberofDistinctCoustomer, group = event_type, colour = event_type)) +
geom_line() +
geom_point( size=4)+
xlab(" Weeks") +
ylab("Activity Count") +
ggtitle("Weekly Activity ")
# Transpose to colomn install, reinstall, uninstall
weeklyFile<-cast(weeklyFile,weekNumber~event_type)
# Add re-install install
weeklyFile$Install_ReInstall<-rowSums(weeklyFile[,2:3])
# Summary report
View(weeklyFile)
rm(dt,weeklyFile,retentionInstall)
###################################################################
## Find out the time of day when the customers are most active in the day
## [use your own discretion for time of day bucketing
# Year- 2017 Month - 1-12 Day- 0-31 Hour- 0-23
# Minute- 0-59 Second- 0-59 dayDate - DATE
# DayParting Day time divided into interval of three hours.
# Late Night - 0:3
# Early Morning - 3:6
# Morning - 6:12
# AfterNoon - 12:15
# Evening - 15:18
# Night - 18:21
# Mid Night - 21:24
# DayOfWeek: week days
# - Find out the time of day when the customers are most active in the day
# [use your own discretion for time of day bucketing]
# Customer Activity on the basis of day parting
eventlogfile <-
eventlogfile %>%
mutate(elfDayParting = cut(as.numeric(as.character(factor(hour(event_timestamp), levels=0:23))),
breaks = c(0 , 3, 6, 9,12, 15, 18, 21, 24),
labels = c("Late Night", "Early Morning","Morning","Late Morning", "AfterNoon", "Evening", "Night","Mid Night"),
right = FALSE))
ggplot(
data = eventlogfile,
aes(x = elfDayParting)) +
geom_bar(fill="light blue") +
xlab(" Day Parting") +
ylab("User Activity Count") +
scale_y_continuous(label=comma)+
coord_flip() +
ggtitle("Dayparting ~ User Activity ")
# Purchase value buckets
# [find purchase events from event logs and parse the 'properties' column to get
# money/cart value associated and generate a simple bucketed frequency chart/histogram plot]
# convert to numeric
eventlogfile$`properties.Cart Value`<-as.numeric(eventlogfile$`properties.Cart Value`)
# Histogram and frequency chart
ggplot(eventlogfile, aes(`properties.Cart Value`)) + # removing na values
geom_histogram(binwidth = 500,fill="sky blue")+
geom_freqpoly(binwidth =500, color = "dark blue")+
xlab(" Cart Value( in Rupee)") +
ylab("No of users") +
ggtitle("Cart Value Histogram ")
#custom width
temp<-as.numeric(eventlogfile$`properties.Cart Value`)
temp<-as.data.frame(temp)
names(temp)<-c("properties.Cart Value")
temp<-na.omit(temp) # removed the NA value
temp <-
temp %>%
mutate(customBin=cut( `properties.Cart Value`,
breaks = c(0 , 1000, 2000, 4000, 8000, 16000, 32000, 64000),
labels = c("0-1000", "1000-2000","2000-4000", "4000-8000", "8000-16000",
"16000-32000","32000-64000"),
right = FALSE)
)
ggplot(temp, aes(customBin)) + # removing na values
geom_histogram(stat = "count",fill="sky blue")+
xlab(" Cart Value( in Rupee)") +
ylab("No of Users") +
ggtitle("Cart Value Histogram ")
# remove temp
rm(temp)
# Filter Checkout activity
checkoutevent<- c("Checkout is completed by PG",
"Checkout is completed by null",
"Checkout is completed by Credit Cards / Debit Cards / Net Banking",
"Checkout is completed by Cash On Delivery","Guest checkout initiated",
"Checkout is completed by Paid using zCoins")
# filtered value has empty cart value
checkoutFilter<-filter(eventlogfile, grepl(paste(checkoutevent, collapse="|"), event))
## this have no information about cart value
# Histogram and frequency chart
# ggplot(checkoutFilter, aes(`properties.Cart Value`)) + # removing na values
# geom_histogram(binwidth = 500,fill="sky blue")+
# geom_freqpoly(binwidth =500, color = "dark blue")+
# xlab(" Cart Value( in Rupee)") +
# ylab("Count") +
# ggtitle("Cart Value Histogram ")
rm(checkoutevent,checkoutFilter)
## Behavior of purchasing and non-purchasing customers
##[something along the lines of their in-app event path in a given install-uninstall cycle]
## User file
PNCustomer<-uifile
# order by creation date
PNCustomer<-PNCustomer %>%
group_by(uuid) %>%
arrange(creation_date)
## Create summary report of activity
PNCustomer<-PNCustomer %>%
group_by(uuid) %>%
summarise(eventSeries = paste(event_type, collapse = ", ")
,eventdate = paste(as.character(creation_date), collapse = ", "))
View(PNCustomer)
# table have user information on his activity and frequecy count
PNCustomertable<-(table(PNCustomer$eventSeries))
PNCustomertable<-as.data.frame(PNCustomertable)
# file has information about diffrent pattern of install reinstall uninstall cycle
View(PNCustomertable)
## Graph for coustomer cycle of install and uninstall
GraphPNCustomertable<-PNCustomertable
GraphPNCustomertable$CyclicEvent<-paste(sub('\\s*,.*', '', as.character(GraphPNCustomertable$Var1))
,sub('.*,\\s*', '', as.character(GraphPNCustomertable$Var1)), sep = ",")
GraphPNCustomertable<-data.table(GraphPNCustomertable)
GraphPNCustomertable<-
GraphPNCustomertable[ , list(ActivityCount = sum(as.numeric(Freq))), by = CyclicEvent]
GraphPNCustomertable<-GraphPNCustomertable[order(ActivityCount),]
#GraphPNCustomertable: Similar Activity merge into install reinstall uninstall cycle by just keeping first and last activity.
View(GraphPNCustomertable)
# Bar graph coustomer cycle of install and uninstall
ggplot(GraphPNCustomertable, aes(x = CyclicEvent, y = ActivityCount)) +
geom_bar(stat = "identity")+
scale_y_continuous(label=comma)+
coord_flip()+
xlab(" Cyclic Event") +
ylab("Customer Activity Count") +
ggtitle("Customer cycle of install and uninstall ")
#rm(PNCustomertable,GraphPNCustomertable)
# on Event file
efltemp<-eventlogfile[,1:3]
## removing uio_push_token
efltemp<-efltemp[- grep("uio_push_token", efltemp$event),]
# order by event_timestamp
efltemp<-efltemp %>%
group_by(uuid) %>%
arrange(event_timestamp)
## Create summary report of activity
efltemp<-efltemp %>%
group_by(uuid) %>%
summarise(eventSeries = paste(event, collapse = ", ")
,eventdate = paste(as.character(event_timestamp), collapse = ", "))
# file has information about diffrent Activity of customer
View(efltemp)
# join data have information of all the activity he has done over a period of time and
# install reinstall uninstall cycle.
CombineActivityInfo<-full_join(efltemp, PNCustomer,by = "uuid")
View(CombineActivityInfo)
# How are their purchases distributed post install?
# Coustome behaviour
# uuid
# first Activity
# last activity
# number of order
# total amount of purchase
# use data table for aggregation because of large data
dt<-data.table(eventlogfile)
a<- dt[ , list(FirstActivity = min(event_timestamp)), by = uuid]
b<- dt[ , list(LastActivity = max(event_timestamp)), by = uuid]
c<- dt[ , list(TotalCartValue = sum(`properties.Cart Value`,na.rm = TRUE)), by = uuid]
d<- dt[ , list(productCount = sum(as.numeric(`properties.No Of Products`),na.rm = TRUE)), by = uuid]
data_raw <- cbind(a,b,c,d)
# Customer Activity Summary
data_raw <-data_raw[,c(1,2,4,6,8)]
# ProductCount ~ TotalCartValue
ggplot(data_raw,aes(productCount,TotalCartValue))+
geom_point(color="blue")+
geom_smooth(method=lm)+
scale_y_continuous(label=comma)+
xlab(" Product purchase Count") +
ylab("Total Amount Spend") +
ggtitle("User Purchase Summary ")
# ProductCount ~ TotalNSCartValue -- droping this information as this is not mactching with product count
# ggplot(data_raw,aes(productCount,TotalNSCartValue))+
# geom_point(color="blue")+
# geom_smooth(method=lm)+
# scale_y_continuous(label=comma)+
# xlab(" Product purchase Count") +
# ylab("Total Amount Spend") +
# ggtitle("User Purchase Summary ")
data_raw <-data_raw[,c(1,2,3,4,6)]
# Coustome behaviour from his activity (Event log file)
View(data_raw)
rm(data_raw)
# Do they perform purchases in the 2nd,3rd etc weeks post install?
# Filter user who have installed and reinstalled the app.
uisubset<-subset(uifile, event_type == "install" | event_type == "re-install")
uisubset<-as.data.frame(uisubset[,1])
# filter data from event log file
elfsubfile<-eventlogfile[
,c("uuid","event","event_timestamp","properties.No Of Products", "properties.Cart Value","properties.category","properties.ns_cart_amount","elfdayDate")]
## removing uio_push_token
elfsubfile<-elfsubfile[- grep("uio_push_token", elfsubfile$event),]
# converting date to week number to find weekly activity
elfsubfile$weekNumber<-week(elfsubfile$elfdayDate)
elfsubfile<-transform(elfsubfile, weekNumber = factor(weekNumber,
levels = c("34", "35","36"),
labels = c("1 FirstWeek", "2 Second Week","3 Third Week")))
# join with event log to find out user's activity
EventJoinData<-inner_join(x=elfsubfile,y=uisubset,by="uuid")
dt<-data.table(EventJoinData)
# using setkey or setkeyv to set the key
setkeyv(dt, c('uuid', 'weekNumber'))
# self
a<- dt[ , list(FirstActivity = min(event_timestamp)), list(uuid, weekNumber)]
b<- dt[ , list(LastActivity = max(event_timestamp)), list(uuid, weekNumber)]
c<- dt[ , list(TotalCartValue = sum(as.numeric(`properties.Cart.Value`),na.rm = TRUE)), by = list(uuid, weekNumber)]
d<- dt[ , list(TotalCartValue = sum(as.numeric(`properties.ns_cart_amount`),na.rm = TRUE)), by = list(uuid, weekNumber)]
e<- dt[ , list(productCount = sum(as.numeric(`properties.No.Of.Products`),na.rm = TRUE)), by = list(uuid, weekNumber)]
f<- dt[ , list(ActivityCount = sum(.N)), by = list(uuid, weekNumber)]
g<- dt[ , .(eventTrack = paste(event, collapse=",")), by = list(uuid, weekNumber)]
h<- dt[ , .(dayDateTrack = paste(elfdayDate, collapse=",")), by = list(uuid, weekNumber)]
data_raw <- cbind(a,b,c,d,e,f,g,h)
data_raw<-data_raw[, which(duplicated(names(data_raw))) := NULL]
View(data_raw)
rm(a,b,c,d,e,f,g,h)
# - Is there a steady inflow of revenue as a customer's retention increases?
# [growth can decline but is it still a positive gradient?]
# large volume of data handle better in data.table
dt<-data.table(uifile)
# using setkey or setkeyv to set the key
setkeyv(dt, c('uidayDate','event_type'))
# converting date to week number to find weekly activity
dt$weekNumber<-week(dt$uidayDate)
dt<-transform(dt, weekNumber = factor(weekNumber,
levels = c("31", "32", "33", "34", "35","36"),
labels = c("1 FirstWeek", "2 Second Week", "3 Third Week",
"4 Forth Week", "5 Fifth Week", "6 Sixth Week")))
weeklyFile<-dt[, .(NumberofDistinctCoustomer = length(unique(uuid))), by =list(weekNumber,event_type)]
# Weekly Activity
ggplot(weeklyFile, aes(x = weekNumber, y = NumberofDistinctCoustomer, fill = event_type)) +
geom_bar(stat = "identity")+
scale_y_continuous(label=comma)+
xlab(" Week") +
ylab("Customer Activity") +
ggtitle("Weekly Activity ")
# line graph
ggplot(data=weeklyFile, aes(x=weekNumber, y=NumberofDistinctCoustomer, group = event_type, colour = event_type)) +
geom_line() +
geom_point( size=4)+
xlab(" Week") +
ylab("Customer Activity") +
ggtitle("Weekly Activity ")
# Transpose to colomn install, reinstall, uninstall
weeklyFile<-cast(weeklyFile,weekNumber~event_type)
# Add re-install install
weeklyFile$Install_New<-rowSums(weeklyFile[,2:3])
View(weeklyFile)
# rm(checkoutFilter,checkoutevent,data_raw)
# rm(CombineActivityInfo,dt,efltemp,elfsubfile,EventJoinData)
## Customer Age
retention<-uifile[,c(1,3,4)]
retention<-data.table(retention)
retention<-retention[order(uuid,creation_date),]
retentionInstall<-subset(retention, event_type == "install" | event_type == "re-install")
retentionUninstall<-subset(retention, event_type == "uninstall")
a<- retentionInstall[ , list(FirstActivity = min(creation_date)), by=uuid]
b<- retentionUninstall[ , list(LastActivity = max(creation_date)), by=uuid]
retention<-full_join(retentionInstall,retentionUninstall,by="uuid")
names(retention)<-c("uuid","Install","InstallDate" ,"Uninstall","UninstallDate")
retention<-setorder(retention, uuid,InstallDate,UninstallDate)
retention<-retention[3:121723,]
minimum<-min(retention$InstallDate,na.rm = TRUE)
minimum<-as.POSIXct(minimum, "%m-%d-%Y-%X")
maximum<-max(retention$UninstallDate,na.rm = TRUE)
maximum<-as.POSIXct(maximum, "%m-%d-%Y-%X")
# replacing na with lower date and maximum date
retention$InstallDate<- ymd_hms(ifelse(is.na(retention$InstallDate),paste(minimum),paste(retention$InstallDate)))
retention$UninstallDate<- ymd_hms(ifelse(is.na(retention$UninstallDate),paste(maximum),paste(retention$UninstallDate)))
retention$AgeInDay<-as.Date(as.character(retention$UninstallDate), format="%Y-%m-%d %H:%M:%S")-
as.Date(as.character(retention$InstallDate), format="%Y-%m-%d %H:%M:%S")
retention<-retention[,c(1,3,5,6)]
# if Age is -ve it maen he have not uninstalled the app yet
retention$AgeInDay<-ifelse(retention$AgeInDay<0,30,retention$AgeInDay)
# Create box plot of Customer Age
ggplot(
data = retention,
aes(x = AgeInDay, y = AgeInDay)) +
geom_boxplot() +
coord_flip() +
ggtitle("Distribution of Customer Age") +
xlab("") +
ylab("Customer Age(Days)") +
scale_y_continuous(minor_breaks = seq(1, 30, 1))+
theme(
axis.text.y = element_blank(),
axis.ticks.y = element_blank())
summary(retention$AgeInDay)
|
# create test cases for breast cancer project
#
#
#
# print Odds ratios
PRINTORS= FALSE
create.eth <- function() {
i <- round(runif(1,0,3))
eth = 'eur'
if (i==0) {
eth <- 'eur'
}
if (i==1) {
eth <- 'mao'
}
if (i==2) {
eth <- 'oth'
}
return(eth)
}
create.test <- function() {
eth <- create.eth()
age1 <- round(runif(1,1,3))
age2 <- round(runif(1,1,4))
par <- round(runif(1,0,4))
con <- round(runif(1,0,1))
hbcan <- round(runif(1,0,2))
hbben <- round(runif(1,0,1))
htdis <- round(runif(1,0,1))
age <- round(runif(1,20,54))
rr <- calc.risk(eth,age1,age2,par,con,hbcan,hbben,htdis,age)
return(c(eth,age1,age2,par,con,hbcan,hbben,htdis,age,rr))
}
calc.rr <- function(ethP,age1P,age2P,parP,conP,hbcanP,hbbenP,htdisP,age) {
result <- 0.1
# ethnicity
eth <- c(1,1.6933480)
if (age < 50) {
eth <- c(1,1.6945510)
}
if (age < 40) {
eth <- c(1,1.8781130)
}
# age1
age1 <- c(1,1.4724590,1.3363460)
if (age < 50) {
age1 <- c(1,1.1611370,1.2784060)
}
if (age < 40) {
age1 <- c(1,1.2458240,1.5706720)
}
# age2
age2 <- c(1,1.6360440,1.8564560,1.6326780)
if (age < 50) {
age2 <- c(-1,2.5666830,1,1.3288150)
}
if (age < 40) {
age2 <- c(-1,-1,1.3860280,1)
}
# parity
par <- c(2.4877230,1.8288870,1.2481120,1.2545910,1)
if (age < 50) {
par <- c(2.1628540,2.8637210,1.5404100,1.4023330,1)
}
if (age < 40) {
par <- c(2.4407610,1,2.0563060,3.1174840,2.2136740)
}
# oral contraceptives
con <- c(1,1.3689630)
if (age < 50) {
con <- c(1,1.2328330)
}
if (age < 40) {
con <- c(1,1.1665310)
}
# fam hist bc hbcan
hbcan <- c(1,1.1381650,2.0561460)
if (age < 50) {
hbcan <- c(1,2.3343510,2.7571180)
}
if (age < 40) {
hbcan <- c(1,1.2265770,3.2352410)
}
# hbben benign breast disease
hbben <- c(1,1.6373880)
if (age < 50) {
hbben <- c(1,1.1701340)
}
if (age < 40) {
hbben <- c(1,2.9657820)
}
# history of theroid disease
htdis <- c(1,2.3022770)
if (age < 50) {
htdis <- c(1,1.3971010)
}
if (age < 40) {
htdis <- c(1,1.3393520)
}
ethIndex <- 1
if (ethP == "mao") {
ethIndex = 2
}
or.eth <- eth[ethIndex]
or.age1 <- age1[age1P]
or.age2 <- age2[age2P]
# add 1 to index of those who can be zero,
# because R arrays start at 1
or.par <- par[parP+1]
or.con <- con[conP+1]
or.hbcan <- hbcan[hbcanP+1]
or.hbben <- hbben[hbbenP+1]
or.htdis <- htdis[htdisP+1]
if (PRINTORS) {
print(or.eth)
print(or.age1)
print(or.age2)
print(or.par)
print(or.con)
print(or.hbcan)
print(or.hbben)
print(or.htdis)
}
result <- or.eth * or.age1 * or.age2 * or.par * or.con * or.hbcan * or.hbben * or.htdis
return(result)
}
calc.ar <- function(age) {
ar <- 0.74961775
if (age < 50) {
ar <- 0.690972837
}
if (age < 40) {
ar <- 0.761488315
}
return(ar)
}
to.agegroup <- function(age) {
result <- 6
if (age < 50) { result <- 5}
if (age < 45) { result <- 4}
if (age < 40) { result <- 3}
if (age < 35) { result <- 2}
if (age < 30) { result <- 1}
if (age < 25) { result <- 0}
return(result)
}
mortality <- function(eth,age.group) {
eur.mortality <- c(30.79,29.43,41.44,43.83,79.92,118.54,177.30)
mao.mortality <- c(54.89,62.69,83.81,117.45,155.05,291.82,492.52)
if (eth == 'eur') {
# add 1 because R starts arrays at index 1
result <- eur.mortality[age.group + 1]
} else {
result <- mao.mortality[age.group + 1]
}
return(1e-5 *result)
}
baseline <- function(age.group) {
base <- 1e-5 * c(1.65,7.93,29.97,64.97,125.09,237.35,215.08)
return(base[age.group+1])
}
calc.risk <- function(eth,age1,age2,par,con,hbcan,hbben,htdis,age) {
result <- -1
rr <- calc.rr(eth,age1,age2,par,con,hbcan,hbben,htdis,age)
rr2 <- calc.rr(eth,age1,age2,par,con,hbcan,hbben,htdis,age+5)
#return(rr)
age.group <- to.agegroup(age)
next.age.group <- to.agegroup(age + 5)
years.in.age.group <- 5 - age %% 5
years.in.next.age.group <- 5 - years.in.age.group
numerator <- baseline(age.group) * (1-calc.ar(age)) * rr
denominator <- numerator + mortality(eth,age.group)
first.term <- (numerator / denominator ) * (1 - exp(-years.in.age.group * denominator))
numerator.2 <- baseline(next.age.group) * (1-calc.ar(age+5)) * rr2
denominator.2 <- numerator.2 + mortality(eth,next.age.group)
second <- (numerator.2 / denominator.2) * exp(-years.in.age.group * denominator)
third <- 1 - exp(-years.in.next.age.group * denominator.2)
second.term <- second * third
result <- first.term + second.term
return(result)
}
create.test()
eth = "eur"
age1 = 3
age2 = 3
par = 0
con = 1
hbcan = 2
hbben = 1
htdis = 1
age = 50
# example 1
calc.rr("eur",3,3,2,1,2,1,1,30)
calc.risk("eur",3,3,2,1,2,1,1,30)
# example 2
calc.rr("eur",3,3,2,1,2,1,1,33)
calc.risk("eur",3,3,2,1,2,1,1,33)
# example 3
calc.rr("eur",3,3,1,1,2,1,1,40)
calc.risk("eur",3,3,1,1,2,1,1,40)
# example 4
eth = "eur"
age1 = 3
age2 = 3
par = 1
con = 1
hbcan = 2
hbben = 1
htdis = 1
age = 47
calc.rr("eur",3,3,1,1,2,1,1,47)
calc.risk("eur",3,3,1,1,2,1,1,47)
#example 5
eth = "eur"
age1 = 3
age2 = 3
par = 0
con = 1
hbcan = 2
hbben = 1
htdis = 1
age = 50
calc.rr("eur",3,3,0,1,2,1,1,50)
calc.risk("eur",3,3,0,1,2,1,1,50)
# example 2
calc.rr("eur",3,3,0,1,2,1,1,50)
calc.rr("mao",2,3,1,1,1,0,1,50)
{
result <- data.frame(eth,age1,age2,par,con,hbcan,hbben,htdis,age,absrisk,
stringsAsFactors=FALSE)
N <- 1000
for (i in 1:N) {
eth <- "eur"
if (rbinom(1,1,prob=0.5)==1) { eth = "mao"}
age <- trunc(runif(1,20,51))
age1 <- sample(c(1,2,3),size=1)
age2 <- sample(c(1,2,3,4),size=1)
par <- sample(c(0,1,2,3,4),size=1)
con <- sample(c(0,1),size=1)
hbcan <- sample(c(0,1,2),size=1)
hbben <- sample(c(0,1),size=1)
htdis <- sample(c(0,1),size=1)
age <- trunc(runif(1,20,51))
absrisk <- calc.risk(eth,
age1,
age2,
par,
con,
hbcan,
hbben,
htdis,
age)
print(paste(i,absrisk))
#if (absrisk < 0) {
# test cases with negative absrisk occur if the test case is invalid
# for example, when we have age at menopause > age. The user interface
# will prevent such cases, and the algorithm (web service) will produce
# a negative number to show something is amiss. However, we can leave them
# in here, because the output (the negative number) should still be the same
# in both implemtations (this one here and the web service)
absrisk <- formatC( round( absrisk, 4 ), format='f', digits=4 )
testcase <- c(eth,
age1,
age2,
par,
con,
hbcan,
hbben,
htdis,
age,
absrisk)
result <- rbind(testcase,result)
#} else {
# next;
#}
#rr <- calc.risk(testcase)
#calc.risk("eur",3,3,2,1,2,1,1,30)
}
}
# drop the last line, because -inexplicably- it can contain negative risk
result <- result[-dim(result)[1],]
tail(result)
write.csv(result,'/home/michel/Documents/breast cancer calculator/testdata/mytest02.csv',row.names=F,quote=F)
x <- 0.00034
formatC( round( x, 4 ), format='f', digits=4 )
calc.rr("mao",2,2,1,1,0,1,1,47)
calc.risk("mao",2,2,1,1,0,1,1,47)
PRINTORS = F
calc.rr("mao",3,3, 0, 1, 2, 1, 1, 20)
calc.risk("mao",3,3, 0, 1, 2, 1, 1, 20)
PRINTORS = T
absrisk = calc.rr("mao", 3, 1, 1, 0, 1, 1, 1, 37)
absrisk = calc.risk("mao", 3, 1, 1, 0, 1, 1, 1, 37)
|
/ bctest.R
|
no_license
|
OldMortality/bctestR
|
R
| false
| false
| 7,727
|
r
|
# create test cases for breast cancer project
#
#
#
# print Odds ratios
PRINTORS= FALSE
create.eth <- function() {
i <- round(runif(1,0,3))
eth = 'eur'
if (i==0) {
eth <- 'eur'
}
if (i==1) {
eth <- 'mao'
}
if (i==2) {
eth <- 'oth'
}
return(eth)
}
create.test <- function() {
eth <- create.eth()
age1 <- round(runif(1,1,3))
age2 <- round(runif(1,1,4))
par <- round(runif(1,0,4))
con <- round(runif(1,0,1))
hbcan <- round(runif(1,0,2))
hbben <- round(runif(1,0,1))
htdis <- round(runif(1,0,1))
age <- round(runif(1,20,54))
rr <- calc.risk(eth,age1,age2,par,con,hbcan,hbben,htdis,age)
return(c(eth,age1,age2,par,con,hbcan,hbben,htdis,age,rr))
}
calc.rr <- function(ethP,age1P,age2P,parP,conP,hbcanP,hbbenP,htdisP,age) {
result <- 0.1
# ethnicity
eth <- c(1,1.6933480)
if (age < 50) {
eth <- c(1,1.6945510)
}
if (age < 40) {
eth <- c(1,1.8781130)
}
# age1
age1 <- c(1,1.4724590,1.3363460)
if (age < 50) {
age1 <- c(1,1.1611370,1.2784060)
}
if (age < 40) {
age1 <- c(1,1.2458240,1.5706720)
}
# age2
age2 <- c(1,1.6360440,1.8564560,1.6326780)
if (age < 50) {
age2 <- c(-1,2.5666830,1,1.3288150)
}
if (age < 40) {
age2 <- c(-1,-1,1.3860280,1)
}
# parity
par <- c(2.4877230,1.8288870,1.2481120,1.2545910,1)
if (age < 50) {
par <- c(2.1628540,2.8637210,1.5404100,1.4023330,1)
}
if (age < 40) {
par <- c(2.4407610,1,2.0563060,3.1174840,2.2136740)
}
# oral contraceptives
con <- c(1,1.3689630)
if (age < 50) {
con <- c(1,1.2328330)
}
if (age < 40) {
con <- c(1,1.1665310)
}
# fam hist bc hbcan
hbcan <- c(1,1.1381650,2.0561460)
if (age < 50) {
hbcan <- c(1,2.3343510,2.7571180)
}
if (age < 40) {
hbcan <- c(1,1.2265770,3.2352410)
}
# hbben benign breast disease
hbben <- c(1,1.6373880)
if (age < 50) {
hbben <- c(1,1.1701340)
}
if (age < 40) {
hbben <- c(1,2.9657820)
}
# history of theroid disease
htdis <- c(1,2.3022770)
if (age < 50) {
htdis <- c(1,1.3971010)
}
if (age < 40) {
htdis <- c(1,1.3393520)
}
ethIndex <- 1
if (ethP == "mao") {
ethIndex = 2
}
or.eth <- eth[ethIndex]
or.age1 <- age1[age1P]
or.age2 <- age2[age2P]
# add 1 to index of those who can be zero,
# because R arrays start at 1
or.par <- par[parP+1]
or.con <- con[conP+1]
or.hbcan <- hbcan[hbcanP+1]
or.hbben <- hbben[hbbenP+1]
or.htdis <- htdis[htdisP+1]
if (PRINTORS) {
print(or.eth)
print(or.age1)
print(or.age2)
print(or.par)
print(or.con)
print(or.hbcan)
print(or.hbben)
print(or.htdis)
}
result <- or.eth * or.age1 * or.age2 * or.par * or.con * or.hbcan * or.hbben * or.htdis
return(result)
}
calc.ar <- function(age) {
ar <- 0.74961775
if (age < 50) {
ar <- 0.690972837
}
if (age < 40) {
ar <- 0.761488315
}
return(ar)
}
to.agegroup <- function(age) {
result <- 6
if (age < 50) { result <- 5}
if (age < 45) { result <- 4}
if (age < 40) { result <- 3}
if (age < 35) { result <- 2}
if (age < 30) { result <- 1}
if (age < 25) { result <- 0}
return(result)
}
mortality <- function(eth,age.group) {
eur.mortality <- c(30.79,29.43,41.44,43.83,79.92,118.54,177.30)
mao.mortality <- c(54.89,62.69,83.81,117.45,155.05,291.82,492.52)
if (eth == 'eur') {
# add 1 because R starts arrays at index 1
result <- eur.mortality[age.group + 1]
} else {
result <- mao.mortality[age.group + 1]
}
return(1e-5 *result)
}
baseline <- function(age.group) {
base <- 1e-5 * c(1.65,7.93,29.97,64.97,125.09,237.35,215.08)
return(base[age.group+1])
}
calc.risk <- function(eth,age1,age2,par,con,hbcan,hbben,htdis,age) {
result <- -1
rr <- calc.rr(eth,age1,age2,par,con,hbcan,hbben,htdis,age)
rr2 <- calc.rr(eth,age1,age2,par,con,hbcan,hbben,htdis,age+5)
#return(rr)
age.group <- to.agegroup(age)
next.age.group <- to.agegroup(age + 5)
years.in.age.group <- 5 - age %% 5
years.in.next.age.group <- 5 - years.in.age.group
numerator <- baseline(age.group) * (1-calc.ar(age)) * rr
denominator <- numerator + mortality(eth,age.group)
first.term <- (numerator / denominator ) * (1 - exp(-years.in.age.group * denominator))
numerator.2 <- baseline(next.age.group) * (1-calc.ar(age+5)) * rr2
denominator.2 <- numerator.2 + mortality(eth,next.age.group)
second <- (numerator.2 / denominator.2) * exp(-years.in.age.group * denominator)
third <- 1 - exp(-years.in.next.age.group * denominator.2)
second.term <- second * third
result <- first.term + second.term
return(result)
}
create.test()
eth = "eur"
age1 = 3
age2 = 3
par = 0
con = 1
hbcan = 2
hbben = 1
htdis = 1
age = 50
# example 1
calc.rr("eur",3,3,2,1,2,1,1,30)
calc.risk("eur",3,3,2,1,2,1,1,30)
# example 2
calc.rr("eur",3,3,2,1,2,1,1,33)
calc.risk("eur",3,3,2,1,2,1,1,33)
# example 3
calc.rr("eur",3,3,1,1,2,1,1,40)
calc.risk("eur",3,3,1,1,2,1,1,40)
# example 4
eth = "eur"
age1 = 3
age2 = 3
par = 1
con = 1
hbcan = 2
hbben = 1
htdis = 1
age = 47
calc.rr("eur",3,3,1,1,2,1,1,47)
calc.risk("eur",3,3,1,1,2,1,1,47)
#example 5
eth = "eur"
age1 = 3
age2 = 3
par = 0
con = 1
hbcan = 2
hbben = 1
htdis = 1
age = 50
calc.rr("eur",3,3,0,1,2,1,1,50)
calc.risk("eur",3,3,0,1,2,1,1,50)
# example 2
calc.rr("eur",3,3,0,1,2,1,1,50)
calc.rr("mao",2,3,1,1,1,0,1,50)
{
result <- data.frame(eth,age1,age2,par,con,hbcan,hbben,htdis,age,absrisk,
stringsAsFactors=FALSE)
N <- 1000
for (i in 1:N) {
eth <- "eur"
if (rbinom(1,1,prob=0.5)==1) { eth = "mao"}
age <- trunc(runif(1,20,51))
age1 <- sample(c(1,2,3),size=1)
age2 <- sample(c(1,2,3,4),size=1)
par <- sample(c(0,1,2,3,4),size=1)
con <- sample(c(0,1),size=1)
hbcan <- sample(c(0,1,2),size=1)
hbben <- sample(c(0,1),size=1)
htdis <- sample(c(0,1),size=1)
age <- trunc(runif(1,20,51))
absrisk <- calc.risk(eth,
age1,
age2,
par,
con,
hbcan,
hbben,
htdis,
age)
print(paste(i,absrisk))
#if (absrisk < 0) {
# test cases with negative absrisk occur if the test case is invalid
# for example, when we have age at menopause > age. The user interface
# will prevent such cases, and the algorithm (web service) will produce
# a negative number to show something is amiss. However, we can leave them
# in here, because the output (the negative number) should still be the same
# in both implemtations (this one here and the web service)
absrisk <- formatC( round( absrisk, 4 ), format='f', digits=4 )
testcase <- c(eth,
age1,
age2,
par,
con,
hbcan,
hbben,
htdis,
age,
absrisk)
result <- rbind(testcase,result)
#} else {
# next;
#}
#rr <- calc.risk(testcase)
#calc.risk("eur",3,3,2,1,2,1,1,30)
}
}
# drop the last line, because -inexplicably- it can contain negative risk
result <- result[-dim(result)[1],]
tail(result)
write.csv(result,'/home/michel/Documents/breast cancer calculator/testdata/mytest02.csv',row.names=F,quote=F)
x <- 0.00034
formatC( round( x, 4 ), format='f', digits=4 )
calc.rr("mao",2,2,1,1,0,1,1,47)
calc.risk("mao",2,2,1,1,0,1,1,47)
PRINTORS = F
calc.rr("mao",3,3, 0, 1, 2, 1, 1, 20)
calc.risk("mao",3,3, 0, 1, 2, 1, 1, 20)
PRINTORS = T
absrisk = calc.rr("mao", 3, 1, 1, 0, 1, 1, 1, 37)
absrisk = calc.risk("mao", 3, 1, 1, 0, 1, 1, 1, 37)
|
# exercise 7.1.4
rm(list=ls())
source('setup.R')
# Load results from previous exercise.
source("Scripts/ex7_1_1.R")
alpha = 0.05
rt <- mcnemar(y_true[,1], yhat[,1], yhat[,2], alpha=alpha)
rt$CI # confidence interval of difference theta = thetaA-thetaB
rt$p # p-value of null hypothesis thetaA = thetaB
rt$thetahat # estimated difference in accuracy theta = thetaA - thetaB
|
/DTU_ML_kursus/02450Toolbox_R/Scripts/ex7_1_4.R
|
no_license
|
AnnaLHansen/projects
|
R
| false
| false
| 378
|
r
|
# exercise 7.1.4
rm(list=ls())
source('setup.R')
# Load results from previous exercise.
source("Scripts/ex7_1_1.R")
alpha = 0.05
rt <- mcnemar(y_true[,1], yhat[,1], yhat[,2], alpha=alpha)
rt$CI # confidence interval of difference theta = thetaA-thetaB
rt$p # p-value of null hypothesis thetaA = thetaB
rt$thetahat # estimated difference in accuracy theta = thetaA - thetaB
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/droplevels.R
\name{droplevels.TidySet}
\alias{droplevels.TidySet}
\title{Drop unused elements and sets}
\usage{
\method{droplevels}{TidySet}(x, elements = TRUE, sets = TRUE, relations = TRUE, ...)
}
\arguments{
\item{x}{A TidySet object.}
\item{elements}{Logical value: Should elements be dropped?}
\item{sets}{Logical value: Should sets be dropped?}
\item{relations}{Logical value: Should sets be dropped?}
\item{...}{Other arguments, currently ignored.}
}
\value{
A TidySet object.
}
\description{
Drop elements and sets without any relation.
}
|
/man/droplevels.TidySet.Rd
|
permissive
|
annakrystalli/BaseSet
|
R
| false
| true
| 629
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/droplevels.R
\name{droplevels.TidySet}
\alias{droplevels.TidySet}
\title{Drop unused elements and sets}
\usage{
\method{droplevels}{TidySet}(x, elements = TRUE, sets = TRUE, relations = TRUE, ...)
}
\arguments{
\item{x}{A TidySet object.}
\item{elements}{Logical value: Should elements be dropped?}
\item{sets}{Logical value: Should sets be dropped?}
\item{relations}{Logical value: Should sets be dropped?}
\item{...}{Other arguments, currently ignored.}
}
\value{
A TidySet object.
}
\description{
Drop elements and sets without any relation.
}
|
#---------------------------------------------------------------------------------------------------------------------------------------
# RJMCMC
# functions
#---------------------------------------------------------------------------------------------------------------------------------------
# MH step
# this performs the within models update using a Metropolis-Hastings update
updateparam_logit <- function(beta_vec, sigma, llikhood, prior_beta = "norm", prior_beta_par, prop_beta = "norm", prop_beta_par)
{
# update each beta coefficient that is not equal to 0 at the current iteration
for(j in 1:length(beta_vec))
{
beta_st <- beta_vec
if(beta_st[j] != 0)
{
# random walk update N(current value, chosen sigma)
beta_st[j] <- get(paste("r", prop_beta, sep=""))(1, beta_vec[j], prop_beta_par)
# log-likelihood evaluated at proposed beta values
newllikhood <- sum(dbinom(y, 1, plogis(X%*%beta_st), log = T))
# log of numerator of acceptance probability
num <- newllikhood + get(paste("d", prior_beta, sep=""))(beta_st[j], prior_beta_par[1], prior_beta_par[2], log=TRUE) + get(paste("d", prop_beta, sep=""))(beta_vec[j], beta_st[j], prop_beta_par, log=TRUE)
den <- llikhood + get(paste("d", prior_beta, sep=""))(beta_vec[j], prior_beta_par[1], prior_beta_par[2], log=TRUE) + get(paste("d", prop_beta, sep=""))(beta_st[j], beta_vec[j], prop_beta_par, log=TRUE)
u <- runif(1)
if(is.na(exp(num-den))) browser()
# accept/reject step
# if move is accepted then set beta to the proposed beta and update the log-likelihood value
if (u < exp(num-den))
{
#print("accepted")
beta_vec <- beta_st
llikhood <- newllikhood
}
}
}
sigma <- 3 # placeholder
# the current beta, sigma and log-likelihood values
list("beta_vec" = beta_vec, "sigma" = sigma, "llikhood"= llikhood)
}
#---------------------------------------------------------------------------------------------------------------------------------------
# RJ step
# this performs the between models update
updatemodel_logit <- function(beta_vec, sigma, llikhood, prior_beta = "norm", prior_beta_par, prop_beta = "norm", prop_beta_par)
{
beta_st <- beta_vec
# which coefficient to update (exclude the intercept)
r <- sample((1:length(beta_vec))[-1], 1)
# if at the current iteration the rth coefficient is equal to 0 propose a new value for it
if(beta_vec[r] == 0)
{
beta_st[r] <- get(paste("r", prop_beta, sep=""))(1, prop_beta_par[1], prop_beta_par[2])
#calculate acceptance probability - this is just the ratio of the log-likelihoods here
# because we have chosen proposal distributions equal to the prior distributions
# all prior model probabilities are equal so they cancel out
# value of the log-likelihood for the proposed model
newllikhood <- sum(dbinom(y, 1, plogis(X%*%beta_st), log = T))
num <- newllikhood + log(get(paste("d", prior_beta, sep=""))(beta_st[r], prior_beta_par[1], prior_beta_par[2]))
den <- llikhood + log(get(paste("d", prop_beta, sep=""))(beta_st[r], prop_beta_par[1], prop_beta_par[2]))
}
# if at the current iteration the rth coefficient is not equal to 0 propose to set it equal to 0
else
{
beta_st[r] <- 0
#calculate acceptance probability
# value of the log-likelihood for the proposed model
newllikhood <- sum(dbinom(y, 1, plogis(X%*%beta_st), log = T))
num <- newllikhood + log(get(paste("d", prop_beta, sep=""))(beta_vec[r], prop_beta_par[1], prop_beta_par[2]))
den <- llikhood + log(get(paste("d", prior_beta, sep=""))(beta_vec[r], prior_beta_par[1], prior_beta_par[2]))
}
A <- min(1, exp(num-den))
u <- runif(1)
# accept/reject step
# if move is accepted then beta parameter to the proposed ones
# and update the log-likelihood value
if(u <= A)
{
llikhood <- newllikhood
beta_vec <- beta_st
}
list("beta_vec" = beta_vec, "llikhood"= llikhood)
}
|
/RJMCMCfunctions_logit.R
|
no_license
|
QuantEcol-ConsLab/Model-Averaging
|
R
| false
| false
| 4,120
|
r
|
#---------------------------------------------------------------------------------------------------------------------------------------
# RJMCMC
# functions
#---------------------------------------------------------------------------------------------------------------------------------------
# MH step
# this performs the within models update using a Metropolis-Hastings update
updateparam_logit <- function(beta_vec, sigma, llikhood, prior_beta = "norm", prior_beta_par, prop_beta = "norm", prop_beta_par)
{
# update each beta coefficient that is not equal to 0 at the current iteration
for(j in 1:length(beta_vec))
{
beta_st <- beta_vec
if(beta_st[j] != 0)
{
# random walk update N(current value, chosen sigma)
beta_st[j] <- get(paste("r", prop_beta, sep=""))(1, beta_vec[j], prop_beta_par)
# log-likelihood evaluated at proposed beta values
newllikhood <- sum(dbinom(y, 1, plogis(X%*%beta_st), log = T))
# log of numerator of acceptance probability
num <- newllikhood + get(paste("d", prior_beta, sep=""))(beta_st[j], prior_beta_par[1], prior_beta_par[2], log=TRUE) + get(paste("d", prop_beta, sep=""))(beta_vec[j], beta_st[j], prop_beta_par, log=TRUE)
den <- llikhood + get(paste("d", prior_beta, sep=""))(beta_vec[j], prior_beta_par[1], prior_beta_par[2], log=TRUE) + get(paste("d", prop_beta, sep=""))(beta_st[j], beta_vec[j], prop_beta_par, log=TRUE)
u <- runif(1)
if(is.na(exp(num-den))) browser()
# accept/reject step
# if move is accepted then set beta to the proposed beta and update the log-likelihood value
if (u < exp(num-den))
{
#print("accepted")
beta_vec <- beta_st
llikhood <- newllikhood
}
}
}
sigma <- 3 # placeholder
# the current beta, sigma and log-likelihood values
list("beta_vec" = beta_vec, "sigma" = sigma, "llikhood"= llikhood)
}
#---------------------------------------------------------------------------------------------------------------------------------------
# RJ step
# this performs the between models update
updatemodel_logit <- function(beta_vec, sigma, llikhood, prior_beta = "norm", prior_beta_par, prop_beta = "norm", prop_beta_par)
{
beta_st <- beta_vec
# which coefficient to update (exclude the intercept)
r <- sample((1:length(beta_vec))[-1], 1)
# if at the current iteration the rth coefficient is equal to 0 propose a new value for it
if(beta_vec[r] == 0)
{
beta_st[r] <- get(paste("r", prop_beta, sep=""))(1, prop_beta_par[1], prop_beta_par[2])
#calculate acceptance probability - this is just the ratio of the log-likelihoods here
# because we have chosen proposal distributions equal to the prior distributions
# all prior model probabilities are equal so they cancel out
# value of the log-likelihood for the proposed model
newllikhood <- sum(dbinom(y, 1, plogis(X%*%beta_st), log = T))
num <- newllikhood + log(get(paste("d", prior_beta, sep=""))(beta_st[r], prior_beta_par[1], prior_beta_par[2]))
den <- llikhood + log(get(paste("d", prop_beta, sep=""))(beta_st[r], prop_beta_par[1], prop_beta_par[2]))
}
# if at the current iteration the rth coefficient is not equal to 0 propose to set it equal to 0
else
{
beta_st[r] <- 0
#calculate acceptance probability
# value of the log-likelihood for the proposed model
newllikhood <- sum(dbinom(y, 1, plogis(X%*%beta_st), log = T))
num <- newllikhood + log(get(paste("d", prop_beta, sep=""))(beta_vec[r], prop_beta_par[1], prop_beta_par[2]))
den <- llikhood + log(get(paste("d", prior_beta, sep=""))(beta_vec[r], prior_beta_par[1], prior_beta_par[2]))
}
A <- min(1, exp(num-den))
u <- runif(1)
# accept/reject step
# if move is accepted then beta parameter to the proposed ones
# and update the log-likelihood value
if(u <= A)
{
llikhood <- newllikhood
beta_vec <- beta_st
}
list("beta_vec" = beta_vec, "llikhood"= llikhood)
}
|
#
# 01 February 2017, updated on 11 December 2020
#
library(raster)
library(fBasics)
library(maptools)
#
data(wrld_simpl)
#
load("plot_data.RData")
load("plotToRemove.RData")
load("pca3.RData")
#
ls()
#
output[[1]]
#
# 858 cells from the PC1-PC2 space have been resampled with a cutoff value of
# 50 plots maximum per cell with 50 being the median value of the total number
# of plots across all grid cells of the PC1-PC2 space thus also being a good
# compromise between quantity and quality in terms of extracting a subset of
# sPlot that has a balanced sampling effort across the PC1-PC2 space
#
plotToRemove <- output[[2]]
rm(output)
class(plotToRemove)
#
# A list of 100 vectors (100 different resampling iterations) that contains the
# IDs of sPlot releves to remove from the plot_data object
#
length(plotToRemove[[1]])
#
# First iteration containing 700037 IDs
#
length(plotToRemove[[2]])
#
# Second iteration containing 700022 IDs
#
head(plotToRemove[[1]])
head(plotToRemove[[2]])
#
# First ID is NA for each vector in the list which is normal (cf. a property of
# the resampling loop) we have to clean that (see below)
#
for (i in 1:100) {
plotToRemove[[i]] <- na.omit(plotToRemove[[i]])
}
#
# One extraction exemple from the first vector in the plotToRemove list object
#
posit <- match(plotToRemove[[1]], plot_data$PlotID)
plot_sel <- plot_data[-posit, c("PlotID")]
length(plot_sel)
#
# A total of 99364 plots seem to be selected which is a bit too much given that
# 50*858 grid cells gives only 42900 plots and even if some grid cells have less
# than 50 plots, the total should not be 99364??? This is far too much and thus
# something is wrong
#
length(which(is.na(plot_data$pc1_val)))
#
# It seems that 42878 plots in the plot_data object have NAs for PC1 and these
# mostly correspond to coastal pixels where data from SoilGrid are unavailable
#
posit <- which(is.na(plot_data$pc1_val))
plot_data <- plot_data[-posit, ]
dim(plot_data)[[1]]
#
# After removing rows with NAS for PC1, the plot_data object has 756522 plots
# instead of 799400 plots
#
posit <- match(plotToRemove[[1]], plot_data$PlotID)
plot_sel <- plot_data[-posit, c("PlotID")]
length(plot_sel)
#
# In the end, the true selection from the first resampling iteration is 56486
# plots instead of 99364 plots
#
plot_sel <- list()
for (i in 1:100) {
posit <- match(plotToRemove[[i]], plot_data$PlotID)
plot_sel[[i]] <- plot_data[-posit, c("PlotID")]
}
save(plot_sel, file="plot_sel.RData")
#
|
/_resampling/03_extracting_selected_plots_from_the_sPlot_database.R
|
permissive
|
sPlotOpen/sPlotOpen_Code
|
R
| false
| false
| 2,576
|
r
|
#
# 01 February 2017, updated on 11 December 2020
#
library(raster)
library(fBasics)
library(maptools)
#
data(wrld_simpl)
#
load("plot_data.RData")
load("plotToRemove.RData")
load("pca3.RData")
#
ls()
#
output[[1]]
#
# 858 cells from the PC1-PC2 space have been resampled with a cutoff value of
# 50 plots maximum per cell with 50 being the median value of the total number
# of plots across all grid cells of the PC1-PC2 space thus also being a good
# compromise between quantity and quality in terms of extracting a subset of
# sPlot that has a balanced sampling effort across the PC1-PC2 space
#
plotToRemove <- output[[2]]
rm(output)
class(plotToRemove)
#
# A list of 100 vectors (100 different resampling iterations) that contains the
# IDs of sPlot releves to remove from the plot_data object
#
length(plotToRemove[[1]])
#
# First iteration containing 700037 IDs
#
length(plotToRemove[[2]])
#
# Second iteration containing 700022 IDs
#
head(plotToRemove[[1]])
head(plotToRemove[[2]])
#
# First ID is NA for each vector in the list which is normal (cf. a property of
# the resampling loop) we have to clean that (see below)
#
for (i in 1:100) {
plotToRemove[[i]] <- na.omit(plotToRemove[[i]])
}
#
# One extraction exemple from the first vector in the plotToRemove list object
#
posit <- match(plotToRemove[[1]], plot_data$PlotID)
plot_sel <- plot_data[-posit, c("PlotID")]
length(plot_sel)
#
# A total of 99364 plots seem to be selected which is a bit too much given that
# 50*858 grid cells gives only 42900 plots and even if some grid cells have less
# than 50 plots, the total should not be 99364??? This is far too much and thus
# something is wrong
#
length(which(is.na(plot_data$pc1_val)))
#
# It seems that 42878 plots in the plot_data object have NAs for PC1 and these
# mostly correspond to coastal pixels where data from SoilGrid are unavailable
#
posit <- which(is.na(plot_data$pc1_val))
plot_data <- plot_data[-posit, ]
dim(plot_data)[[1]]
#
# After removing rows with NAS for PC1, the plot_data object has 756522 plots
# instead of 799400 plots
#
posit <- match(plotToRemove[[1]], plot_data$PlotID)
plot_sel <- plot_data[-posit, c("PlotID")]
length(plot_sel)
#
# In the end, the true selection from the first resampling iteration is 56486
# plots instead of 99364 plots
#
plot_sel <- list()
for (i in 1:100) {
posit <- match(plotToRemove[[i]], plot_data$PlotID)
plot_sel[[i]] <- plot_data[-posit, c("PlotID")]
}
save(plot_sel, file="plot_sel.RData")
#
|
#### Data preparation ####
#### Library ####
library(tidyverse)
library(reshape2)
library(plyr)
#### Load in data ###
data <- read.delim("Data/Data_Chickens.txt", sep = "")
colnames(data) <- c("Department", "Pen", "Group", "Animal_nr", "Time",
"Weight_change")
#### Set up ####
summary(data)
str(data)
data$Group <- as.factor(data$Group)
data$Animal_nr <- as.factor(data$Animal_nr)
data$Time <- as.numeric(data$Time)
data$Department <- as.factor(data$Department)
data$Pen <- as.factor(data$Pen)
summary(data)
# Creating unique IDs
data <- data %>%
mutate(ID = group_indices_(data, .dots=list("Animal_nr", "Pen", "Department",
"Group"))) %>%
select(-Animal_nr) %>%
mutate(ID = as.factor(ID))
# save data
write.table(data, file = "Data/Data_chickens.txt", row.names = F)
############################# End ##############################################
|
/Data_prep.R
|
no_license
|
ilsevb95/LMM_Case_Study
|
R
| false
| false
| 930
|
r
|
#### Data preparation ####
#### Library ####
library(tidyverse)
library(reshape2)
library(plyr)
#### Load in data ###
data <- read.delim("Data/Data_Chickens.txt", sep = "")
colnames(data) <- c("Department", "Pen", "Group", "Animal_nr", "Time",
"Weight_change")
#### Set up ####
summary(data)
str(data)
data$Group <- as.factor(data$Group)
data$Animal_nr <- as.factor(data$Animal_nr)
data$Time <- as.numeric(data$Time)
data$Department <- as.factor(data$Department)
data$Pen <- as.factor(data$Pen)
summary(data)
# Creating unique IDs
data <- data %>%
mutate(ID = group_indices_(data, .dots=list("Animal_nr", "Pen", "Department",
"Group"))) %>%
select(-Animal_nr) %>%
mutate(ID = as.factor(ID))
# save data
write.table(data, file = "Data/Data_chickens.txt", row.names = F)
############################# End ##############################################
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/PCT_SAFN.R
\docType{data}
\name{PCT_SAFN}
\alias{PCT_SAFN}
\title{Random forest model for percent sands and fines}
\format{A \code{\link[randomForest]{randomForest}} model}
\usage{
PCT_SAFN
}
\description{
Random forest model for percent sands and fines
}
\examples{
data(PCT_SAFN)
}
\keyword{datasets}
|
/man/PCT_SAFN.Rd
|
no_license
|
SCCWRP/PHAB
|
R
| false
| true
| 381
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/PCT_SAFN.R
\docType{data}
\name{PCT_SAFN}
\alias{PCT_SAFN}
\title{Random forest model for percent sands and fines}
\format{A \code{\link[randomForest]{randomForest}} model}
\usage{
PCT_SAFN
}
\description{
Random forest model for percent sands and fines
}
\examples{
data(PCT_SAFN)
}
\keyword{datasets}
|
#install.packages("mapr")
library(mapr)#加载绘图包
map_leaflet(acaule) #绘图
spp <- c('Danaus plexippus', 'Accipiter striatus', 'Pinus contorta')#定义多个物种
dat<- occ(query = spp, from = 'gbif', has_coords = TRUE, limit = 50) #搜索多个物种
map_leaflet(dat, color =c ("#976AAE"," #6B944D","#BD5945")) #可视化
|
/output/物种可视化.R
|
no_license
|
LHH2021/data-analysis
|
R
| false
| false
| 336
|
r
|
#install.packages("mapr")
library(mapr)#加载绘图包
map_leaflet(acaule) #绘图
spp <- c('Danaus plexippus', 'Accipiter striatus', 'Pinus contorta')#定义多个物种
dat<- occ(query = spp, from = 'gbif', has_coords = TRUE, limit = 50) #搜索多个物种
map_leaflet(dat, color =c ("#976AAE"," #6B944D","#BD5945")) #可视化
|
testlist <- list(a = 0L, b = 0L, x = c(-1125711872L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L))
result <- do.call(grattan:::anyOutside,testlist)
str(result)
|
/grattan/inst/testfiles/anyOutside/libFuzzer_anyOutside/anyOutside_valgrind_files/1610386058-test.R
|
no_license
|
akhikolla/updated-only-Issues
|
R
| false
| false
| 354
|
r
|
testlist <- list(a = 0L, b = 0L, x = c(-1125711872L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L))
result <- do.call(grattan:::anyOutside,testlist)
str(result)
|
\name{simpleRhat}
\alias{simpleRhat}
\title{
The Brooks-Gelman-Rubin (BGR) convergence diagnostic
}
\description{
An 'interval' estimator of the 'potential scale reduction factor' (Rhat) for MCMC output. Similar to the function \code{\link{gelman.diag}} in \pkg{coda}, but much faster when thousands of parameters are involved and will not cause R to crash.
}
\usage{
simpleRhat(object, n.chains, burnin=0)
}
\arguments{
\item{object}{
a vector, matrix or data frame with MCMC output in columns
}
\item{n.chains}{
scalar integer, the number of chains concatenated to form the columns of \code{x}; multiple chains are required to calculate Rhat; if \code{n.chains} is missing and \code{object} has an attribute \code{n.chains}, that value will be used.
}
\item{burnin}{
scalar, between 0 and 0.9; the proportion of values at the start of each chain to discard as burn-in.
}
}
\details{
Calculates the Gelman-Rubin convergence statistic, as modified by Brooks and Gelman (1998). Following the WinBUGS User Manual (Spiegelhalter et al, 2003), we use the width of the central 80\% interval as the measure of width. Rhat is the ratio of the width of the pooled chains to the mean width of the individual chains. At convergence, this should be 1; values less than 1.1 or 1.05 are considered satisfactory.
}
\value{
A named vector with the Rhat values.
}
\references{
Brooks, S.P. & Gelman, A. (1998) General methods for monitoring convergence of iterative simulations. \emph{Journal of Computational and Graphical Statistics}, 7, 434-455.
Spiegelhalter, Thomas, Best & Lunn (2003) WinBUGS User Manual Version 1.4, on line \href{https://www.mrc-bsu.cam.ac.uk/wp-content/uploads/manual14.pdf}{here}.
}
\author{
Mike Meredith
}
\examples{
# Get some output to use
data(salamanders)
y <- rowSums(salamanders)
( out <- BoccSS0(y, 5) )
simpleRhat(out)
}
|
/man/simpleRhat.Rd
|
no_license
|
dsfernandez/wiqid
|
R
| false
| false
| 1,912
|
rd
|
\name{simpleRhat}
\alias{simpleRhat}
\title{
The Brooks-Gelman-Rubin (BGR) convergence diagnostic
}
\description{
An 'interval' estimator of the 'potential scale reduction factor' (Rhat) for MCMC output. Similar to the function \code{\link{gelman.diag}} in \pkg{coda}, but much faster when thousands of parameters are involved and will not cause R to crash.
}
\usage{
simpleRhat(object, n.chains, burnin=0)
}
\arguments{
\item{object}{
a vector, matrix or data frame with MCMC output in columns
}
\item{n.chains}{
scalar integer, the number of chains concatenated to form the columns of \code{x}; multiple chains are required to calculate Rhat; if \code{n.chains} is missing and \code{object} has an attribute \code{n.chains}, that value will be used.
}
\item{burnin}{
scalar, between 0 and 0.9; the proportion of values at the start of each chain to discard as burn-in.
}
}
\details{
Calculates the Gelman-Rubin convergence statistic, as modified by Brooks and Gelman (1998). Following the WinBUGS User Manual (Spiegelhalter et al, 2003), we use the width of the central 80\% interval as the measure of width. Rhat is the ratio of the width of the pooled chains to the mean width of the individual chains. At convergence, this should be 1; values less than 1.1 or 1.05 are considered satisfactory.
}
\value{
A named vector with the Rhat values.
}
\references{
Brooks, S.P. & Gelman, A. (1998) General methods for monitoring convergence of iterative simulations. \emph{Journal of Computational and Graphical Statistics}, 7, 434-455.
Spiegelhalter, Thomas, Best & Lunn (2003) WinBUGS User Manual Version 1.4, on line \href{https://www.mrc-bsu.cam.ac.uk/wp-content/uploads/manual14.pdf}{here}.
}
\author{
Mike Meredith
}
\examples{
# Get some output to use
data(salamanders)
y <- rowSums(salamanders)
( out <- BoccSS0(y, 5) )
simpleRhat(out)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/CFunctions.R
\name{ForwardR}
\alias{ForwardR}
\title{Forward step}
\usage{
ForwardR(emisVec, initPr, trsVec)
}
\arguments{
\item{emisVec}{a vector of emission probabilities.}
\item{initPr}{a vector specifying initial state probabilities.}
\item{trsVec}{a vector of state transition probabilities.}
}
\description{
Forward step
}
\keyword{internal}
|
/man/ForwardR.Rd
|
no_license
|
julieaubert/CHMM
|
R
| false
| true
| 428
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/CFunctions.R
\name{ForwardR}
\alias{ForwardR}
\title{Forward step}
\usage{
ForwardR(emisVec, initPr, trsVec)
}
\arguments{
\item{emisVec}{a vector of emission probabilities.}
\item{initPr}{a vector specifying initial state probabilities.}
\item{trsVec}{a vector of state transition probabilities.}
}
\description{
Forward step
}
\keyword{internal}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/OLIVESWater.R
\docType{data}
\name{OLIVESWater}
\alias{OLIVESWater}
\title{Olives water requirement for land evaluation}
\format{
A data frame with 3 rows and 8 columns
}
\description{
A dataset containing the water characteristics of the crop requirements for farming Olives.
}
\details{
The following are the factors for evaluation:
\itemize{
\item WyAv - Annual precipitation (mm)
\item WmSpecial1 - Monthly rainfall during the sclerification of stone (mm) - August (N hem) February (S hem.)
\item WmSpecial2 - Monthly rainfall during the sclerification of stone (mm) - September (N hem) March (S hem.)
}
}
\seealso{
\itemize{
\item Yen, B. T., Pheng, K. S., and Hoanh, C. T. (2006). \emph{LUSET: Land Use Suitability Evaluation Tool User's Guide}. International Rice Research Institute.
}
}
\keyword{dataset}
|
/man/OLIVESWater.Rd
|
permissive
|
alstat/ALUES
|
R
| false
| true
| 897
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/OLIVESWater.R
\docType{data}
\name{OLIVESWater}
\alias{OLIVESWater}
\title{Olives water requirement for land evaluation}
\format{
A data frame with 3 rows and 8 columns
}
\description{
A dataset containing the water characteristics of the crop requirements for farming Olives.
}
\details{
The following are the factors for evaluation:
\itemize{
\item WyAv - Annual precipitation (mm)
\item WmSpecial1 - Monthly rainfall during the sclerification of stone (mm) - August (N hem) February (S hem.)
\item WmSpecial2 - Monthly rainfall during the sclerification of stone (mm) - September (N hem) March (S hem.)
}
}
\seealso{
\itemize{
\item Yen, B. T., Pheng, K. S., and Hoanh, C. T. (2006). \emph{LUSET: Land Use Suitability Evaluation Tool User's Guide}. International Rice Research Institute.
}
}
\keyword{dataset}
|
.l2norm <- function(vec){sqrt(sum(vec^2))}
.opnorm <- function(mat){.svd_truncated(mat, K = 1)$d[1]}
|
/R/norm.R
|
permissive
|
linnykos/utilities
|
R
| false
| false
| 101
|
r
|
.l2norm <- function(vec){sqrt(sum(vec^2))}
.opnorm <- function(mat){.svd_truncated(mat, K = 1)$d[1]}
|
\name{plotTraj3d,ClusterLongData3d}
\alias{plotTraj3d}
%\alias{plotTraj3d,ClusterLongData3d-method}
%\alias{plotTraj3d,ClusterLongData3d,missing-method}
\alias{plotTraj3d,ClusterLongData3d,numeric-method}
%\alias{plotTraj3d,ClusterLongData3d,Partition-method}
\title{ ~ Function: plotTraj3d for ClusterLongData3d ~ }
\description{
Plot the trajectories of two variables of a \code{\linkS4class{ClusterLongData3d}} object in 3D
relatively to a \code{\linkS4class{Partition}}.
}
\usage{
%\S4method{plotTraj3d}{ClusterLongData3d,missing}(x,y,varY=1,varZ=2,
% parTraj=parTRAJ(),parMean=parMEAN(),nbSample=200,...)
\S4method{plotTraj3d}{ClusterLongData3d,numeric}(x,y,varY=1,varZ=2,
parTraj=parTRAJ(col="clusters"),parMean=parMEAN(type="n"),...)
}
\arguments{
\item{x}{\code{[ClusterLongData3d]}: Object containing the trajectories to plot.}
\item{y}{\code{[numeric]} or \code{[vector2(numeric)]}: Define the \code{Partition} P that will be use to plot
the object. \code{P} is a \code{Partition} hold in the field \code{c2},
\code{c3}, ... \code{c26}. If \code{y=c(a,b)}, then \code{P} is
the \code{Partition} number \code{b} with \code{a} clusters. If \code{y=a},
then \code{P} is the partition number \code{1} with \code{a}
clusters.}
\item{varY}{\code{[numeric]} or \code{[character]}: either the
number or the name of the first variable to display. 1 by default.}
\item{varZ}{\code{[numeric]} or \code{[character]}: either the
number or the name of the second variable to display. 2 by default.}
\item{parTraj}{\code{[ParLongData]}: Set the graphical parameters
used to plot the trajectories of the \code{ClusterLongData3d}. See \code{\linkS4class{ParLongData}}
for details.}
\item{parMean}{\code{[ParLongData]}: Set the graphical parameters
used to plot the mean trajectories of each clusters \code{ClusterLongData3d}
(only when \code{y} is non missing). See \code{\linkS4class{ParLongData}}
for details.}
% \item{nbSample}{\code{[numeric]}: Graphical display of huge sample can
% be time consumming. This parameters fixe the maximum numbre of
% trajectories (randomly chosen) that will be drawn.}
\item{...}{Arguments to be passed to methods, such as graphical parameters.}
}
\details{
Plot the means trajectories of two variables of a \code{\linkS4class{ClusterLongData3d}} object in 3D. It
use the \code{\link[rgl:rgl]{rgl}} library. The user can make the
graphical representation turn using its mouse.
}
\seealso{\code{\linkS4class{ClusterLongData3d}}}
\examples{
##################
### Real example on array
time=c(1,2,3,4,8,12,16,20)
id2=1:120
f <- function(id,t)((id-1)\%\%3-1) * t
g <- function(id,t)(id\%\%2+1)*t
h <- function(id,t)(id\%\%4-0.5)*(20-t)
myCld <- clusterLongData3d(array(cbind(outer(id2,time,f),outer(id2,time,g),
outer(id2,time,h))+rnorm(120*8*3,0,3),dim=c(120,8,3)))
kml3d(myCld,3:4,2)
### Basic plot
plotMeans3d(myCld,3)
### plotTraj3d, variable 1 and 3
plotMeans3d(myCld,4,varZ=3)
plotMeans3d(myCld,3,parMean=parMEAN(col="red"))
}
\keyword{package}
\keyword{ts}
\keyword{aplot}
|
/man/plotTraj3d.Rd
|
no_license
|
dmurdoch/kml3d
|
R
| false
| false
| 3,181
|
rd
|
\name{plotTraj3d,ClusterLongData3d}
\alias{plotTraj3d}
%\alias{plotTraj3d,ClusterLongData3d-method}
%\alias{plotTraj3d,ClusterLongData3d,missing-method}
\alias{plotTraj3d,ClusterLongData3d,numeric-method}
%\alias{plotTraj3d,ClusterLongData3d,Partition-method}
\title{ ~ Function: plotTraj3d for ClusterLongData3d ~ }
\description{
Plot the trajectories of two variables of a \code{\linkS4class{ClusterLongData3d}} object in 3D
relatively to a \code{\linkS4class{Partition}}.
}
\usage{
%\S4method{plotTraj3d}{ClusterLongData3d,missing}(x,y,varY=1,varZ=2,
% parTraj=parTRAJ(),parMean=parMEAN(),nbSample=200,...)
\S4method{plotTraj3d}{ClusterLongData3d,numeric}(x,y,varY=1,varZ=2,
parTraj=parTRAJ(col="clusters"),parMean=parMEAN(type="n"),...)
}
\arguments{
\item{x}{\code{[ClusterLongData3d]}: Object containing the trajectories to plot.}
\item{y}{\code{[numeric]} or \code{[vector2(numeric)]}: Define the \code{Partition} P that will be use to plot
the object. \code{P} is a \code{Partition} hold in the field \code{c2},
\code{c3}, ... \code{c26}. If \code{y=c(a,b)}, then \code{P} is
the \code{Partition} number \code{b} with \code{a} clusters. If \code{y=a},
then \code{P} is the partition number \code{1} with \code{a}
clusters.}
\item{varY}{\code{[numeric]} or \code{[character]}: either the
number or the name of the first variable to display. 1 by default.}
\item{varZ}{\code{[numeric]} or \code{[character]}: either the
number or the name of the second variable to display. 2 by default.}
\item{parTraj}{\code{[ParLongData]}: Set the graphical parameters
used to plot the trajectories of the \code{ClusterLongData3d}. See \code{\linkS4class{ParLongData}}
for details.}
\item{parMean}{\code{[ParLongData]}: Set the graphical parameters
used to plot the mean trajectories of each clusters \code{ClusterLongData3d}
(only when \code{y} is non missing). See \code{\linkS4class{ParLongData}}
for details.}
% \item{nbSample}{\code{[numeric]}: Graphical display of huge sample can
% be time consumming. This parameters fixe the maximum numbre of
% trajectories (randomly chosen) that will be drawn.}
\item{...}{Arguments to be passed to methods, such as graphical parameters.}
}
\details{
Plot the means trajectories of two variables of a \code{\linkS4class{ClusterLongData3d}} object in 3D. It
use the \code{\link[rgl:rgl]{rgl}} library. The user can make the
graphical representation turn using its mouse.
}
\seealso{\code{\linkS4class{ClusterLongData3d}}}
\examples{
##################
### Real example on array
time=c(1,2,3,4,8,12,16,20)
id2=1:120
f <- function(id,t)((id-1)\%\%3-1) * t
g <- function(id,t)(id\%\%2+1)*t
h <- function(id,t)(id\%\%4-0.5)*(20-t)
myCld <- clusterLongData3d(array(cbind(outer(id2,time,f),outer(id2,time,g),
outer(id2,time,h))+rnorm(120*8*3,0,3),dim=c(120,8,3)))
kml3d(myCld,3:4,2)
### Basic plot
plotMeans3d(myCld,3)
### plotTraj3d, variable 1 and 3
plotMeans3d(myCld,4,varZ=3)
plotMeans3d(myCld,3,parMean=parMEAN(col="red"))
}
\keyword{package}
\keyword{ts}
\keyword{aplot}
|
library(testthat)
library(variantBedOverlap)
test_check('variantBedOverlap')
|
/tests/testthat.R
|
permissive
|
letaylor/variantBedOverlap
|
R
| false
| false
| 78
|
r
|
library(testthat)
library(variantBedOverlap)
test_check('variantBedOverlap')
|
#' Select nodes in a graph
#' @description Select nodes from a graph object of
#' class \code{dgr_graph}.
#' @param graph a graph object of class
#' \code{dgr_graph} that is created using
#' \code{create_graph}.
#' @param node_attr an optional character vector of
#' node attribute values for filtering the node ID
#' values returned.
#' @param search an option to provide a logical
#' expression with a comparison operator (\code{>},
#' \code{<}, \code{==}, or \code{!=}) followed by a
#' number for numerical filtering, or, a regular
#' expression for filtering the nodes returned through
#' string matching.
#' @param set_op the set operation to perform upon
#' consecutive selections of graph nodes. This can
#' either be as a \code{union} (the default), as an
#' intersection of selections with \code{intersect},
#' or, as a \code{difference} on the previous
#' selection, if it exists.
#' @param nodes an optional vector of node IDs for
#' filtering list of nodes present in the graph.
#' @return a graph object of class \code{dgr_graph}.
#' @examples
#' library(magrittr)
#'
#' # Create a node data frame (ndf)
#' nodes <-
#' create_nodes(
#' nodes = c("a", "b", "c", "d"),
#' type = c("A", "A", "Z", "Z"),
#' label = TRUE,
#' value = c(3.5, 2.6, 9.4, 2.7))
#'
#' # Create an edge data frame (edf)
#' edges <-
#' create_edges(
#' from = c("a", "b", "c"),
#' to = c("d", "c", "a"),
#' rel = c("A", "Z", "A"))
#'
#' # Create a graph with the ndf and edf
#' graph <-
#' create_graph(nodes_df = nodes,
#' edges_df = edges)
#'
#' # Explicitly select nodes `a` and `c`
#' graph <-
#' graph %>%
#' select_nodes(
#' nodes = c("a", "c"))
#'
#' # Verify that the node selection has been made
#' # using the `get_selection()` function
#' get_selection(graph)
#' #> $nodes
#' #> [1] "a" "c"
#'
#' # Select nodes based on the node `type`
#' # being `Z`
#' graph <-
#' graph %>%
#' clear_selection %>%
#' select_nodes(
#' node_attr = "type",
#' search = "Z")
#'
#' # Verify that an node selection has been made, and
#' # recall that the `c` and `d` nodes are of the
#' # `Z` type
#' get_selection(graph)
#' #> $nodes
#' #> [1] "c" "d"
#'
#' # Select edges based on the node value attribute
#' # being greater than 3.0 (first clearing the current
#' # selection of nodes)
#' graph <-
#' graph %>%
#' clear_selection %>%
#' select_nodes(
#' node_attr = "value",
#' search = ">3.0")
#'
#' # Verify that the correct node selection has been
#' # made; in this case, nodes `a` and `c` have values
#' # for `value` greater than 3.0
#' get_selection(graph)
#' #> $nodes
#' #> [1] "a" "c"
#' @export select_nodes
select_nodes <- function(graph,
node_attr = NULL,
search = NULL,
set_op = "union",
nodes = NULL) {
if (is_graph_empty(graph)) {
stop("The graph is empty so no selections can be made.")
}
# Remove any selection of edges
graph$selection$edges <- NULL
# Remove `graph$selection` if empty
if (length(graph$selection) == 0){
graph$selection <- NULL
}
# Extract the graph's internal ndf
nodes_df <- graph$nodes_df
if (!is.null(node_attr)) {
if (length(node_attr) > 1) {
stop("Only one node attribute can be specified.")
}
if (!(node_attr %in% colnames(nodes_df)[-1])) {
stop("The specified attribute is not available.")
}
}
if (is.null(node_attr)) {
nodes_selected <- nodes_df$nodes
if (!is.null(nodes)) {
if (any(!(nodes %in% nodes_selected))) {
stop("One of more of the nodes specified are not available in the graph.")
}
nodes_selected <- nodes
}
}
if (!is.null(node_attr)) {
# Filter nodes_df by node ID values in 'nodes'
if (!is.null(nodes)) {
if (any(!(nodes %in% nodes_df$nodes))) {
stop("One of more of the nodes specified are not available in the graph.")
}
nodes_df <- nodes_df[which(nodes_df$nodes %in% nodes),]
}
# Determine the column number for which the value
# for `node_attr` is available
column_number <-
which(colnames(nodes_df) %in% node_attr)
# If a search term provided, filter using a logical
# expression or a regex match
if (!is.null(search)) {
if (grepl("^>.*", search) | grepl("^<.*", search) |
grepl("^==.*", search) | grepl("^!=.*", search)) {
logical_expression <- TRUE } else {
logical_expression <- FALSE
}
# Filter using a logical expression
if (logical_expression) {
if (grepl("^>.*", search)) {
rows_where_true_le <-
which(nodes_df[,column_number] >
as.numeric(gsub(">(.*)", "\\1", search)))
}
if (grepl("^<.*", search)) {
rows_where_true_le <-
which(nodes_df[,column_number] <
as.numeric(gsub("<(.*)", "\\1", search)))
}
if (grepl("^==.*", search)) {
rows_where_true_le <-
which(nodes_df[,column_number] ==
as.numeric(gsub("==(.*)", "\\1", search)))
}
if (grepl("^!=.*", search)) {
rows_where_true_le <-
which(nodes_df[,column_number] !=
as.numeric(gsub("!=(.*)", "\\1", search)))
}
nodes_selected <- nodes_df[rows_where_true_le, 1]
}
# Filter using a `search` value as a
# regular expression
if (logical_expression == FALSE) {
rows_where_true_regex <-
which(grepl(search, as.character(nodes_df[,column_number])))
nodes_selected <- nodes_df[rows_where_true_regex, 1]
}
}
}
# Obtain vector of node IDs selection of nodes
# already present
if (!is.null(graph$selection)) {
if (!is.null(graph$selection$nodes)) {
nodes_prev_selection <- graph$selection$nodes
}
} else {
nodes_prev_selection <- vector(mode = "character")
}
# Incorporate the selected nodes into the
# graph's selection
if (set_op == "union") {
nodes_combined <- union(nodes_prev_selection, nodes_selected)
} else if (set_op == "intersect") {
nodes_combined <- intersect(nodes_prev_selection, nodes_selected)
} else if (set_op == "difference") {
nodes_combined <- setdiff(nodes_prev_selection, nodes_selected)
}
graph$selection$nodes <- nodes_combined
return(graph)
}
|
/R/select_nodes.R
|
no_license
|
Oscar-Deng/DiagrammeR
|
R
| false
| false
| 6,453
|
r
|
#' Select nodes in a graph
#' @description Select nodes from a graph object of
#' class \code{dgr_graph}.
#' @param graph a graph object of class
#' \code{dgr_graph} that is created using
#' \code{create_graph}.
#' @param node_attr an optional character vector of
#' node attribute values for filtering the node ID
#' values returned.
#' @param search an option to provide a logical
#' expression with a comparison operator (\code{>},
#' \code{<}, \code{==}, or \code{!=}) followed by a
#' number for numerical filtering, or, a regular
#' expression for filtering the nodes returned through
#' string matching.
#' @param set_op the set operation to perform upon
#' consecutive selections of graph nodes. This can
#' either be as a \code{union} (the default), as an
#' intersection of selections with \code{intersect},
#' or, as a \code{difference} on the previous
#' selection, if it exists.
#' @param nodes an optional vector of node IDs for
#' filtering list of nodes present in the graph.
#' @return a graph object of class \code{dgr_graph}.
#' @examples
#' library(magrittr)
#'
#' # Create a node data frame (ndf)
#' nodes <-
#' create_nodes(
#' nodes = c("a", "b", "c", "d"),
#' type = c("A", "A", "Z", "Z"),
#' label = TRUE,
#' value = c(3.5, 2.6, 9.4, 2.7))
#'
#' # Create an edge data frame (edf)
#' edges <-
#' create_edges(
#' from = c("a", "b", "c"),
#' to = c("d", "c", "a"),
#' rel = c("A", "Z", "A"))
#'
#' # Create a graph with the ndf and edf
#' graph <-
#' create_graph(nodes_df = nodes,
#' edges_df = edges)
#'
#' # Explicitly select nodes `a` and `c`
#' graph <-
#' graph %>%
#' select_nodes(
#' nodes = c("a", "c"))
#'
#' # Verify that the node selection has been made
#' # using the `get_selection()` function
#' get_selection(graph)
#' #> $nodes
#' #> [1] "a" "c"
#'
#' # Select nodes based on the node `type`
#' # being `Z`
#' graph <-
#' graph %>%
#' clear_selection %>%
#' select_nodes(
#' node_attr = "type",
#' search = "Z")
#'
#' # Verify that an node selection has been made, and
#' # recall that the `c` and `d` nodes are of the
#' # `Z` type
#' get_selection(graph)
#' #> $nodes
#' #> [1] "c" "d"
#'
#' # Select edges based on the node value attribute
#' # being greater than 3.0 (first clearing the current
#' # selection of nodes)
#' graph <-
#' graph %>%
#' clear_selection %>%
#' select_nodes(
#' node_attr = "value",
#' search = ">3.0")
#'
#' # Verify that the correct node selection has been
#' # made; in this case, nodes `a` and `c` have values
#' # for `value` greater than 3.0
#' get_selection(graph)
#' #> $nodes
#' #> [1] "a" "c"
#' @export select_nodes
select_nodes <- function(graph,
node_attr = NULL,
search = NULL,
set_op = "union",
nodes = NULL) {
if (is_graph_empty(graph)) {
stop("The graph is empty so no selections can be made.")
}
# Remove any selection of edges
graph$selection$edges <- NULL
# Remove `graph$selection` if empty
if (length(graph$selection) == 0){
graph$selection <- NULL
}
# Extract the graph's internal ndf
nodes_df <- graph$nodes_df
if (!is.null(node_attr)) {
if (length(node_attr) > 1) {
stop("Only one node attribute can be specified.")
}
if (!(node_attr %in% colnames(nodes_df)[-1])) {
stop("The specified attribute is not available.")
}
}
if (is.null(node_attr)) {
nodes_selected <- nodes_df$nodes
if (!is.null(nodes)) {
if (any(!(nodes %in% nodes_selected))) {
stop("One of more of the nodes specified are not available in the graph.")
}
nodes_selected <- nodes
}
}
if (!is.null(node_attr)) {
# Filter nodes_df by node ID values in 'nodes'
if (!is.null(nodes)) {
if (any(!(nodes %in% nodes_df$nodes))) {
stop("One of more of the nodes specified are not available in the graph.")
}
nodes_df <- nodes_df[which(nodes_df$nodes %in% nodes),]
}
# Determine the column number for which the value
# for `node_attr` is available
column_number <-
which(colnames(nodes_df) %in% node_attr)
# If a search term provided, filter using a logical
# expression or a regex match
if (!is.null(search)) {
if (grepl("^>.*", search) | grepl("^<.*", search) |
grepl("^==.*", search) | grepl("^!=.*", search)) {
logical_expression <- TRUE } else {
logical_expression <- FALSE
}
# Filter using a logical expression
if (logical_expression) {
if (grepl("^>.*", search)) {
rows_where_true_le <-
which(nodes_df[,column_number] >
as.numeric(gsub(">(.*)", "\\1", search)))
}
if (grepl("^<.*", search)) {
rows_where_true_le <-
which(nodes_df[,column_number] <
as.numeric(gsub("<(.*)", "\\1", search)))
}
if (grepl("^==.*", search)) {
rows_where_true_le <-
which(nodes_df[,column_number] ==
as.numeric(gsub("==(.*)", "\\1", search)))
}
if (grepl("^!=.*", search)) {
rows_where_true_le <-
which(nodes_df[,column_number] !=
as.numeric(gsub("!=(.*)", "\\1", search)))
}
nodes_selected <- nodes_df[rows_where_true_le, 1]
}
# Filter using a `search` value as a
# regular expression
if (logical_expression == FALSE) {
rows_where_true_regex <-
which(grepl(search, as.character(nodes_df[,column_number])))
nodes_selected <- nodes_df[rows_where_true_regex, 1]
}
}
}
# Obtain vector of node IDs selection of nodes
# already present
if (!is.null(graph$selection)) {
if (!is.null(graph$selection$nodes)) {
nodes_prev_selection <- graph$selection$nodes
}
} else {
nodes_prev_selection <- vector(mode = "character")
}
# Incorporate the selected nodes into the
# graph's selection
if (set_op == "union") {
nodes_combined <- union(nodes_prev_selection, nodes_selected)
} else if (set_op == "intersect") {
nodes_combined <- intersect(nodes_prev_selection, nodes_selected)
} else if (set_op == "difference") {
nodes_combined <- setdiff(nodes_prev_selection, nodes_selected)
}
graph$selection$nodes <- nodes_combined
return(graph)
}
|
library( ANTsR )
args <- commandArgs( trailingOnly = TRUE )
if( length( args ) != 2 )
{
helpMessage <- paste0( "Usage: Rscript averageImages.R inputFiles outputFile\n" )
stop( helpMessage )
} else {
inputFileName <- Sys.glob( args[1] )
modelFile <- args[2]
}
avg = antsAverageImages( inputFileName )
antsImageWrite( avg, modelFile )
|
/src/averageImages.R
|
permissive
|
ANTsXNet/brainSR
|
R
| false
| false
| 350
|
r
|
library( ANTsR )
args <- commandArgs( trailingOnly = TRUE )
if( length( args ) != 2 )
{
helpMessage <- paste0( "Usage: Rscript averageImages.R inputFiles outputFile\n" )
stop( helpMessage )
} else {
inputFileName <- Sys.glob( args[1] )
modelFile <- args[2]
}
avg = antsAverageImages( inputFileName )
antsImageWrite( avg, modelFile )
|
complete <- function(directory, id = 1:332){
ids <- c()
nobs <- c()
for(i in id){
fileNcsv <- read.csv(paste0(getwd(), "/", directory,"/",
formatC(i, width=3, flag="0"), ".csv"))
ids <- c(ids, i)
completeNNA <- subset(fileNcsv, !is.na(nitrate) & !is.na(sulfate))
nobs <- c(nobs, nrow(completeNNA))
}
dt_frame <- data.frame(ids, nobs)
dt_frame
}
|
/complete.R
|
no_license
|
mauropaganin/datasciencecoursera
|
R
| false
| false
| 425
|
r
|
complete <- function(directory, id = 1:332){
ids <- c()
nobs <- c()
for(i in id){
fileNcsv <- read.csv(paste0(getwd(), "/", directory,"/",
formatC(i, width=3, flag="0"), ".csv"))
ids <- c(ids, i)
completeNNA <- subset(fileNcsv, !is.na(nitrate) & !is.na(sulfate))
nobs <- c(nobs, nrow(completeNNA))
}
dt_frame <- data.frame(ids, nobs)
dt_frame
}
|
grridge <- function(highdimdata, response, partitions, unpenal = ~1,
offset=NULL, method="exactstable",
niter=10, monotone=NULL, optl=NULL, innfold=NULL,
fixedfoldsinn=TRUE, maxsel=c(25,100),selectionEN=FALSE,cvlmarg=1,
savepredobj="all", dataunpen=NULL, ord = 1:length(partitions),
comparelasso=FALSE,optllasso=NULL,cvllasso=TRUE,
compareunpenal=FALSE,trace=FALSE,modus=1,
EBlambda=FALSE,standardizeX = TRUE){#
# highdimdata=simdata; response=round(exp(Y)/(1+exp(Y))); partitions=part5; unpenal = ~1; innfold=3;
# offset=NULL; method="exactstable";
# niter=10; monotone=NULL; optl=NULL; innfold=NULL;
# fixedfoldsinn=TRUE; maxsel=c(25,100);selectionEN=TRUE;cvlmarg=1;
# savepredobj="all"; dataunpen=NULL; ord = 1:length(partitions);
# comparelasso=FALSE;optllasso=NULL;cvllasso=TRUE;
# compareunpenal=FALSE;trace=FALSE;modus=1;
# EBlambda=FALSE;standardizeX = TRUE
if(class(response) =="factor") {
nlevel <- length(levels(response))
if(nlevel != 2){
print("Response is not binary, so not suitable for two-class classification.")
return(NULL)
} else {
model = "logistic"
if(trace) print("Binary response, executing logistic ridge regression")
lev <- levels(response)
if(trace) print(paste("Predicting probability on factor level",lev[2]))
}} else {
if(class(response) == "numeric" | class(response)=="integer"){
valresp <- sort(unique(response))
if(length(valresp)==2 & valresp[1]==0 & valresp[2]==1) {
model = "logistic"
if(trace) print("Binary response, executing logistic ridge regression")
} else {
model = "linear"
if(trace) print("Numeric continuous response, executing linear ridge regression")
return(.grridgelin(highdimdata=highdimdata, response=response,partitions= partitions, unpenal=unpenal,
offset=offset, method=method, niter=niter, monotone=monotone, optl=optl, innfold=innfold,
fixedfoldsinn=fixedfoldsinn, maxsel=maxsel,selectionEN=selectionEN,cvlmarg=cvlmarg,
savepredobj=savepredobj, dataunpen=dataunpen, ord = ord,
comparelasso=comparelasso,optllasso=optllasso,cvllasso=cvllasso,
compareunpenal=compareunpenal,trace=trace,modus=modus,
EBlambda=EBlambda,standardizeX = standardizeX))
}
} else {
if(class(response) == "Surv"){
model="survival"
if(trace) print("Survival response, executing cox ridge regression")
} else {
print("Non-valid response. Should be binary, numeric or survival.")
return(NULL)
}
}
}
if(standardizeX) {
if(trace) print("Covariates are standardized")
sds <- apply(highdimdata,1,sd)
sds2 <- sapply(sds,function(x) max(x,10^{-5}))
highdimdata <- (highdimdata-apply(highdimdata,1,mean))/sds2
}
nsam <- ncol(highdimdata)
if(method=="adaptridge" | method== "exact") niter <- 1
if(class(partitions[[1]]) =="integer"){
partitions=list(group=partitions)
ord=1
}
nclass <- length(partitions)
if(is.null(monotone)) monotone <- rep(FALSE,nclass)
if(length(monotone) != length(partitions)) {
print(paste("ERROR: length 'monotone' unequal to length 'partitions' "))
return(NULL)
}
partitions <- partitions[ord]
monotone <- monotone[ord]
nr <- nrow(highdimdata)
for(ncl in 1:nclass){
indexset <- unlist(partitions[[ncl]])
if(length(indexset) < nr){
print(paste("Warning: partition",ncl,"does not contain all row indices of the data"))
}
if(max(indexset) > nr | min(indexset)<1){
print(paste("ERROR: partition",ncl,"contains an invalid index, e.g. larger than number of data rows"))
return(NULL)
}
}
overlap <- c()
Wmat <- c()
nfeattot <- c()
for(ncl in 1:nclass){
indexset <- unlist(partitions[[ncl]])
nfeatcl <- length(unique(indexset))
nfeattot <- c(nfeattot,nfeatcl)
if(length(indexset) > nfeatcl){
if(trace) print(paste("Grouping",ncl,"contains overlapping groups"))
overlap <- c(overlap,TRUE)
whgroup <- partitions[[ncl]]
nover <- rep(0,nr)
for(k in 1:length(whgroup)){
wh <- whgroup[[k]]
nover[wh] <- nover[wh] + 1
}
Wmat <- cbind(Wmat,sqrt(1/nover))
} else {
if(trace) print(paste("Grouping",ncl,"contains mutually exclusive groups"))
overlap <- c(overlap,FALSE)
Wmat <- cbind(Wmat,rep(1,nr))
}
}
arguments <- list(partitions=partitions,unpenal=unpenal, offset=offset, method=method,
niter=niter, monotone=monotone, optl=optl, innfold=innfold,
fixedfoldsinn=fixedfoldsinn,
selectionEN=selectionEN,maxsel=maxsel,
cvlmarg=cvlmarg, dataunpen=dataunpen,savepredobj=savepredobj, ord=ord,
comparelasso=comparelasso, optllasso=optllasso,
compareunpenal=compareunpenal, modus=modus,EBlambda=EBlambda,standardizeX=standardizeX)
if(nr > 10000 & is.null(innfold)) print("NOTE: consider setting innfold=10 to save computing time")
nmp0 <- names(partitions)
if(is.null(nmp0)) nmp0 <- sapply(1:length(partitions),function(i) paste("Grouping",i))
nmp0 <- sapply(1:length(partitions),function(i) {
if(nmp0[i]=="") return(paste("Grouping",i)) else return(nmp0[i])})
nmp <- c("NoGroups","GroupRegul")
nmpweight <- nmp #to be used later
if(comparelasso) nmp <- c(nmp,"lasso")
#new 29/11
if(selectionEN) nmp <- c(nmp,paste("EN",maxsel,sep=""))
if(compareunpenal) nmp <- c(nmp,"modelunpen")
if((unpenal != ~0) & (unpenal != ~1)) {
if(is.null(dataunpen)) {print("If unpenal contains variables, data of
the unpenalized variables should be specified in the data slot!")
return(NULL)
}
}
nsam <- ncol(highdimdata)
if(!is.null(offset)){
noffs <- length(offset)
offsets <-"c("
if(noffs==1) {for(i in 1:(nsam-1)) offsets <- paste(offsets,offset,",",sep="")} else {
for(i in 1:(nsam-1)) offsets <- paste(offsets,offset[i],",",sep="")}
if(noffs==1) offsets <- paste(offsets,offset,")",sep="") else offsets <- paste(offsets,offset[nsam],")",sep="")
if((unpenal != ~0) & (unpenal != ~1)){
unpenal <- formula(paste(deparse(unpenal),"+ offset(",offsets,")",sep=""))
} else {
unpenal <- formula(paste("~","offset(",offsets,")",sep=""))
}
}
if(is.null(dataunpen)) datapred <- data.frame(fake=rep(NA,ncol(highdimdata))) else datapred <- dataunpen
nopen <- unpenal
if(is.null(innfold)) foldinit <- nsam else foldinit <- innfold
pmt0<- proc.time()
optl0 <- optl
if(is.null(optl)){
if(trace) print("Finding lambda for initial ridge regression")
if(fixedfoldsinn) set.seed(346477)
opt <- optL2(response, penalized = t(highdimdata),fold=foldinit,unpenalized=nopen,data=datapred,trace=trace)
time1 <- proc.time()-pmt0
if(trace) print(opt$cv)
if(trace) print(paste("Computation time for cross-validating main penalty parameter:",time1[3]))
optl <- opt$lambda
if(trace) print(paste("lambda2",optl))
if(is.infinite(optl)) {
if(trace) print("Infinite penalty returned. Data contains no signal. Penalty set to 10^10 ")
optl <- 10^10
}
arguments$optl <- optl
}
pmt <- proc.time()
nsam <- ncol(highdimdata)
nfeat <- nrow(highdimdata)
XM0 <- t(highdimdata)
response0 <- response
cvlnstot <- rep(0,(nclass+1))
allpreds <- c()
whsam <- 1:nsam
responsemin <- response0
pen0 <- penalized(responsemin, penalized = XM0, lambda2 = optl, unpenalized=nopen,data=cbind(XM0,datapred),
trace=trace)
nmunpen <- names(pen0@unpenalized)
if(is.element("(Intercept)",nmunpen)) addintercept <- TRUE else addintercept <- FALSE
if(is.null(innfold)) {nf <- nrow(XM0)} else {
if(!is.null(optl0)) {
nf <- innfold
if(fixedfoldsinn) set.seed(346477)
} else {nf <- opt$fold}
}
opt2 <- cvl(responsemin, penalized = XM0,fold=nf, lambda2 = optl,unpenalized=nopen,data=datapred,trace=trace)
nf <- opt2$fold
cvln0 <- opt2$cvl
cvlnprev <- cvln0
penprev <- pen0
pen <- pen0
if(trace) print(cvln0)
XMw0 <- XM0
XMw0prev <- XM0
converged <- FALSE
conv <- rep(FALSE,nclass)
controlbound1 <-1000
controlbound2 <- controlbound3 <- 10
almvecall <- rep(1,nfeat)
lambdas <- lapply(partitions, function(cla) {
ngroup <- length(cla)
return(rep(1,ngroup))
})
lmvec <- lmvecprev <- array(1,nfeat)
i <- 1
while(!converged & i <= niter){
cl <- 1
if(method=="adaptridge") cl <- nclass
while(cl <= nclass){
convcl <- conv[cl]
if(!convcl){
whgr <- partitions[[cl]]
lenggr <- unlist(lapply(whgr,length))
ngroup1 <- length(whgr)
names(lambdas[[cl]]) <- names(whgr)
coeff <- penprev@penalized
if(model == "survival"){
preds <- predict(penprev,XMw0,data=datapred)
} else {
preds <- predict(penprev,XMw0,data=datapred)[1:nsam]
}
coeffsq <- coeff^2
if(model=="logistic") {
Wi <- sqrt(preds*(1-preds))
constlam <- 2
}
# if(model == "linear"){ #running grridgelin when model =linear
# Wi <- rep(1,length(preds))
# constlam <- 1
# }
if(model == "survival"){
resptime <- response[,1]
predsnew <- -log(sapply(1:nsam,function(k) survival(preds,time=resptime[k])[k]))
Wi <- sqrt(predsnew)
constlam <- 2
}
if(!is.null(dataunpen)) {
mm <- model.matrix(nopen,dataunpen)
XMW <- t(t(cbind(XMw0,10^5*mm)) %*% diag(Wi))
} else {
if(addintercept) XMW <- t(t(cbind(XMw0,rep(10^5,nsam))) %*% diag(Wi)) else XMW <- t(t(XMw0) %*% diag(Wi))
}
SVD <- svd(XMW)
leftmat <- SVD$v %*% diag(1/((SVD$d)^2+constlam*optl)) %*% diag(SVD$d) %*% t(SVD$u)
# if(model=="linear"){
# Hatm <- XMW %*% leftmat
# df <- nsam - sum(diag(2*Hatm - Hatm %*% t(Hatm)))
# VarRes <- sum((response - preds)^2)/df
# print(paste("Sigma^2 estimate:",VarRes))
# vars3 <- VarRes*rowSums(leftmat^2)
# } else {
vars3 <- rowSums(leftmat^2)
#}
which0 <- which(vars3==0)
vars3[which0] <- 10^{-30}
# if(model=="linear"){
# mycoeff2svd <- (leftmat %*% response)^2
# }
if(model=="logistic"){
if(is.factor(response)) respnum <- as.numeric(response)-1 else respnum <- response
z <- matrix(log(preds/(1-preds))+(respnum - preds)/(preds*(1-preds)),ncol=1)
if(modus == 1) mycoeff2svd <- coeffsq
if(modus == 2) mycoeff2svd <- (leftmat %*% z)^2
}
if(model=="survival"){
mycoeff2svd <- coeffsq
}
cii2 <- (rowSums(leftmat * t(XMW)))^2
leftmat <- leftmat/sqrt(vars3)
lowerleft <- 10^(-30)
lefts2 <- function(group){
ind <- whgr[[group]]
ngr <- lenggr[group]
coefftau2 <- sum(sapply(mycoeff2svd[ind]/vars3[ind],function(x) max(x,1)))-length(ind)
return(max(lowerleft,coefftau2/ngr))
}
leftside <- sapply(1:length(whgr),lefts2)
ellarg0 <- length(leftside[leftside>lowerleft])/length(leftside)
if(ellarg0 <=.5){
if(trace) print(paste("Partition",nmp0[cl],"NOT ITERATED"))
conv[cl] <- TRUE
cvln1 <- cvlnprev
XMw0 <- XMw0prev
pen <- penprev
} else {
lefts2ran <- function(group,randomind){
ind <- whgr[[group]]
ngr <- lenggr[group]
coefftau2 <- sum(sapply(mycoeff2svd[1:nfeat][randomind[ind]]/vars3[1:nfeat][randomind[ind]],
function(x) max(x,1)))-length(randomind[ind])
return(max(lowerleft,coefftau2/ngr))
}
randomiz <- function(fakex){
randomind2 <- sample(1:nfeat)
leftsideran <- sapply(1:length(whgr),lefts2ran,randomind=randomind2)
return(leftsideran)}
nlefts <- 100
leftsran <- sapply(1:nlefts,randomiz)
means <- apply(leftsran,1,mean)
leftsrancen <- t(t(leftsran)-means)
relerror <- sum(abs(leftsrancen))/(nlefts*sum(abs(means)))
if(cl==1 & i==1) if(trace) print(cvln0)
if(trace) print(paste("Relative error:",relerror))
if(relerror>= 0.1) print("WARNING: large relative error (>=0.1). Consider using larger groups of variable.")
nadd <- ncol(XMW) - ncol(XMw0)
rightmat = t(t(XMW) * c(Wmat[,cl],rep(1,nadd)))
rightmats <- lapply(1:length(whgr),function(j){
rightj2 <- rightmat[,whgr[[j]]]
rcp <- rightj2 %*% t(rightj2)
return(rcp)
})
coefmatfast <- t(apply(matrix(1:length(whgr),nrow=length(whgr)),1,
function(i){
lefti2 <- leftmat[whgr[[i]],]
lcp <- t(lefti2) %*% lefti2
ckls <- sapply(1:length(whgr),function(j){
rcp <- rightmats[[j]]
return(sum(lcp*rcp))
})
return(ckls)
}))
coefmatfast <- coefmatfast/lenggr
CNfun <- function(lam,cfmmat=coefmatfast){
ng <- nrow(cfmmat)
dmax <- max(diag(cfmmat))
cfmlam <- (1-lam)*cfmmat + lam*diag(dmax,nrow=ng)
eigenvals <- eigen(cfmlam,only.values=TRUE)$values
CN <- eigenvals[1]/eigenvals[ng]
return(Re(CN))
}
lams <- seq(0,1,by=0.005)
CNsRan <- sapply(lams, CNfun,cfmmat=coefmatfast)
CNsRanre <- CNsRan*relerror
if(relerror<=0.1){
lam <- lams[which(CNsRanre<=0.1)[1]]
} else lam <- 1
if(trace) print(paste("Shrink Factor coefficient matrix",lam))
cfmmat <- coefmatfast;
ng <- nrow(cfmmat)
dmax <- max(diag(cfmmat))
cfmlam <- (1-lam)*cfmmat + lam*diag(dmax,nrow=ng)
if(method=="exactstable"){
soltau = solve(sum(cfmlam),sum(leftside))
sol = solve(cfmlam,leftside)
low <- soltau/controlbound1;up = soltau*controlbound1
parinint <- sapply(sol,function(x) min(max(low,x),up))
minopt <- optim(par = parinint,fn = function(pars=c(parinint)) sum(leftside - cfmlam %*% pars)^2,
method="L-BFGS-B",
lower=rep(low,ngroup1),upper=rep(up,ngroup1))
tausqest0 <- minopt$par
}
if(method=="exact"){
soltau = solve(sum(coefmatfast),sum(leftside))
sol = solve(coefmatfast,leftside)
low <- soltau/controlbound2;up = soltau*controlbound2
parinint <- sapply(sol,function(x) min(max(low,x),up))
minopt <- optim(par = parinint,fn = function(pars=c(parinint)) sum(leftside- cfmlam %*% pars)^2,
method="L-BFGS-B",
lower=rep(low,ngroup1),upper=rep(up,ngroup1))
tausqest0 <- minopt$par
}
if(method=="stable"){
soltau = solve(sum(coefmatfast),sum(leftside))
solhyb <- sapply(1:ngroup1,function(i){
leftsidei <- leftside[i]
rightsidei <- c(coefmatfast[i,i], sum(coefmatfast[i,-i]*soltau))
soli <- (leftsidei-rightsidei[2])/rightsidei[1]
return(max(min(soltau*controlbound3,soli),soltau/controlbound3))
})
}
if(method=="simple"){
solsim = leftside
tausqest <- solsim
if(trace) print("simple")
}
if(method=="exact") {
tausqest <- tausqest0
if(trace) print("exact")
}
if(method=="exactstable") {
tausqest <- tausqest0
if(trace) print("exactstable")
}
if(method=="stable") {
if(trace) print("stable")
tausqest <- solhyb
}
if(method=="adaptridge") {
if(trace) print("adaptive ridge")
}
if(method=="stable" | method=="exact" | method=="exactstable" | method=="simple"){
lambdanoncal <- 1/tausqest
if(monotone[cl]){
weigh = unlist(lapply(whgr,length))
lambdamultnoncal <- pava(lambdanoncal,w=weigh)
} else lambdamultnoncal <- lambdanoncal
tausqest<-1/lambdamultnoncal
nfeatcl <- nfeattot[cl]
overl <- overlap[cl]
if(!overl){
con3 <- sum(sapply(1:length(whgr),function(gr){return(length(whgr[[gr]])*tausqest[gr])}))
tausqestcal<- nfeatcl/con3*tausqest
lambdamult <- 1/tausqestcal
if(trace) print(lambdamult)
for(k in 1:length(whgr)){
wh <- whgr[[k]]
XMw0[,wh] <- XMw0[,wh]/sqrt(lambdamult[k])
}
} else {
tauk <- rep(0,nfeat)
Wsq <- (Wmat[,cl])^2
for(k in 1:length(whgr)){
wh <- whgr[[k]]
tauk[wh] <- tauk[wh] + tausqest[k]
}
tauk <- tauk*Wsq
whna <- which(is.na(tauk))
if(length(whna)>0) con3 <- sum(tauk[-whna]) else con3 <- sum(tauk)
tausqestcal0<- nfeatcl/con3*tausqest
lambdamult <- 1/tausqestcal0
if(trace) print(lambdamult)
tausqestcal<- (nfeatcl/con3)*tauk
lambdamultperk <- 1/tausqestcal
lambdamultperk[whna]<- 1
XMw0 <- t(t(XMw0)/sqrt(lambdamultperk))
}
} else {
tausqest <- coeffsq
con3 <- sum(tausqest)
tausqestcal<- nfeat/con3*tausqest
lambdamult <- 1/tausqestcal
XMw0 <- t(t(XMw0)/sqrt(lambdamult))
}
opt2w <- cvl(responsemin, penalized = XMw0,fold=nf,lambda2=optl,
unpenalized=nopen,data=datapred, trace=trace)
cvln1 <- opt2w$cvl
if(trace) print(cvln1)
if((cvln1 - cvlnprev)/abs(cvlnprev) > 1/100 | ((cvln1 - cvlnprev)/abs(cvlnprev) >= 0 & i==1)){
pen <- penalized(responsemin, penalized = XMw0, trace=trace,
lambda2 = optl,unpenalized=nopen,data=datapred)
if(niter>1){
if(!overl){
for(group in 1:ngroup1){
ind <- whgr[[group]]
lmvec[ind] <- lmvec[ind]*lambdamult[group]
}} else {
lmvec <- lmvec * lambdamultperk
}
}
lambdas[[cl]] <- lambdas[[cl]]*lambdamult
cvlnprev <- cvln1
penprev <- pen
XMw0prev <- XMw0
if(trace) print(paste("Partition",nmp0[cl],"improved results"))
} else {
if(niter>1) if(trace) print(paste("Partition",nmp0[cl],"CONVERGED after",i,"iterations"))
else if(trace) print(paste("Partition",nmp0[cl],"did not improve results"))
conv[cl] <- TRUE
cvln1 <- cvlnprev
XMw0 <- XMw0prev
pen <- penprev
}
}
}
cl <- cl+1
}
if(sum(conv)==nclass){
converged <- TRUE
if(niter>1) if(trace) print(paste("All partitions CONVERGED after",i,"iterations"))
}
i <- i+1
}
if(niter==0) {pen <- pen0;XMw0<-XM0;cvln1<-cvln0;soltau <- NULL}
if(model=="survival"){
pred0 <- predict(pen0,XM0,unpenalized=nopen,data=datapred)
predw <- predict(pen,XMw0,unpenalized=nopen,data=datapred)
} else {
pred0 <- predict(pen0,XM0,unpenalized=nopen,data=datapred)[1:nsam]
predw <- predict(pen,XMw0,unpenalized=nopen,data=datapred)[1:nsam]
}
predshere <- cbind(pred0,predw)
cvlnssam <- c(cvln0,cvln1)
lmvecall <- lmvec
almvecall <- cbind(almvecall,lmvecall)
predobj <- c(pen0,pen)
allpreds <- predshere
whichsel <- NULL
betassel <- NULL
npr <- length(predobj)
pred2 <-predobj[[npr]]
lambs <- lmvecall
oldbeta <- pred2@penalized
newbeta <- oldbeta/sqrt(lambs)
time2 <- proc.time()-pmt
if(trace) print(paste("Computation time for adaptive weigthing:",time2[3]))
cvlnstot <- cvlnssam
reslasso <- NULL
mm <- NULL
# if(model=="survival" & comparelasso){
# print("Comparison with Cox-lasso is not yet supported")
# comparelasso <- arguments$comparelasso <-FALSE
# }+
if(comparelasso){
if(model == "logistic") fam <- "binomial"
if(model == "linear") fam <- "gaussian"
if(model == "survival") fam <- "cox"
interc <- TRUE
if(unpenal == ~0 | fam=="cox") interc <- FALSE
if((is.null(dataunpen)) | (unpenal == ~0) | (unpenal == ~1)) {
X0 <- t(highdimdata)
pf <- rep(1,nr)
nunpen <- 0
} else {
mm <- model.matrix(unpenal,dataunpen)
if(prod(mm[,1]==rep(1,nsam))==1) {
interc <- TRUE
mm <- mm[,-1,drop=FALSE]
} else {
interc <- FALSE
}
nunpen <- ncol(mm)
pf <- c(rep(1,nr),rep(0,nunpen))
}
X0 <- cbind(t(highdimdata),mm)
if(is.null(offset)) offset <- rep(0,nsam)
if(trace) print("Starting lasso by glmnet")
if(is.null(optllasso)){
if(trace) print("Finding lambda for lasso regression")
if(fixedfoldsinn) set.seed(346477)
#alpha=1 implies lasso
opt <- cv.glmnet(x=X0,y=response,offset=offset,foldid=nf,penalty.factor=pf,alpha=1,family=fam,
intercept=interc)
optllasso <- opt$lambda.min
whmin <- which(opt$lambda==optllasso)
if(trace) print(paste("lambda1 (multiplied by N):",optllasso*nsam))
arguments$optllasso <- optllasso
cvliklasso <- opt$cvm[whmin]
} else {
cvliklasso0 <- if(cvllasso) try(cv.glmnet(x=X0,y=response,offset=offset,foldid=nf,lambda=c(optllasso,optllasso/2),
penalty.factor=pf,alpha=1,family=fam,intercept=interc))
if(class(cvliklasso0) == "try-error" | !cvllasso) cvliklasso <- NA else cvliklasso <- cvliklasso0$cvm[1]
}
cvlnstot <- c(cvlnstot,cvelasso=cvliklasso)
penlasso <- glmnet(x=X0,y=response,offset=offset,nlambda=1,lambda=optllasso,penalty.factor=pf,alpha=1,family=fam,
intercept=interc,standardize=FALSE)
betaspenalizedlasso <- penlasso$beta[1:nr]
whichlasso <- which(betaspenalizedlasso != 0)
betaslasso <- betaspenalizedlasso[whichlasso]
predobj <- c(predobj,list(penlasso))
reslasso <- list(cvllasso=cvliklasso,whichlasso=whichlasso,betaslasso=betaslasso)
if(trace) print(paste("lasso uses",length(whichlasso),"penalized variables"))
}
resEN <- list()
#one cannot select more than the nr of variables
if(selectionEN){
if(trace) print("Variable selection by elastic net started...")
for(maxsel0 in maxsel){
maxsel2 <- min(maxsel0,nr)
if(trace) print(paste("Maximum nr of variables",maxsel2))
fsel <- function(lam1,maxselec=maxsel2,lam2){
if(lam1==0) return(nfeat-maxselec) else {
penselEN <- penalized(responsemin,XMw0,lambda1=lam1,lambda2=lam2,
unpenalized=nopen,data=datapred,trace=FALSE,maxiter=100)
coef <- penselEN@penalized
return(length(coef[coef!=0])-maxselec)
}
}
lam1 <- uniroot(fsel,interval=c(0,optl*10),maxiter=50,lam2=optl)$root
penselEN0 <- penalized(responsemin,XMw0,lambda1=lam1,lambda2=optl, unpenalized=nopen,data=datapred,
trace=FALSE,maxiter=100)
coefEN0 <- penselEN0@penalized
whichEN <- which(coefEN0 != 0)
penselEN <- penalized(responsemin,XMw0[,whichEN,drop=FALSE],lambda2=optl, unpenalized=nopen,data=datapred,
trace=FALSE,maxiter=100)
coefEN <- penselEN@penalized
predobj <- c(predobj,penselEN)
resEN <- c(resEN,list(list(whichEN=whichEN,betasEN=coefEN)))
}
names(resEN) <- paste("resEN",maxsel,sep="")
}
if(compareunpenal){
if(model=="survival"){
if(trace) print("Starting unpenalized Cox-model")
bogus <- matrix(rnorm(nsam),ncol=1)
if(trace) print(dim(datapred))
penlambdas0 <- penalized(response,penalized = bogus,unpenalized = nopen,lambda1=0,lambda2=10^8, data=datapred,
trace=trace)
predobj <- c(predobj,penlambdas0)
} else {
if(model == "logistic") famglm <- "binomial" else famglm <- "gaussian"
if(trace) print("Starting unpenalized glm")
form <- formula(paste("response","~",as.character(unpenal)[2]))
modelglm <- glm(form,family=famglm,data=dataunpen)
predobj <- c(predobj,list(modelglm))
}
}
printlam <- function(lambs) {if(length(lambs)<=10) return(lambs) else return(summary(lambs))}
suml <- lapply(lambdas,printlam)
if(trace) print("Final lambda multipliers (summary):")
if(trace) print(suml)
if(trace) print(paste("CVLs",cvlnstot))
timetot <- proc.time()-pmt0
if(trace) print(paste("Total computation time:",timetot[3]))
names(predobj) <- nmp
colnames(almvecall) <- nmpweight
if(savepredobj=="last") {
predobj <- predobj[length(predobj)]
almvecall <- matrix(lmvecall,ncol=1)
}
if(savepredobj=="none") predobj <- NULL
if(trace) cat("\n")
return(list(true=response,cvfit = cvlnstot,lambdamults = lambdas, optl=optl, lambdamultvec = almvecall,
predobj=predobj,betas=newbeta, reslasso=reslasso,
resEN = resEN, model=model, arguments=arguments,allpreds=allpreds))
}
|
/R/grridge.R
|
no_license
|
magnusmunch/GRridge
|
R
| false
| false
| 25,802
|
r
|
grridge <- function(highdimdata, response, partitions, unpenal = ~1,
offset=NULL, method="exactstable",
niter=10, monotone=NULL, optl=NULL, innfold=NULL,
fixedfoldsinn=TRUE, maxsel=c(25,100),selectionEN=FALSE,cvlmarg=1,
savepredobj="all", dataunpen=NULL, ord = 1:length(partitions),
comparelasso=FALSE,optllasso=NULL,cvllasso=TRUE,
compareunpenal=FALSE,trace=FALSE,modus=1,
EBlambda=FALSE,standardizeX = TRUE){#
# highdimdata=simdata; response=round(exp(Y)/(1+exp(Y))); partitions=part5; unpenal = ~1; innfold=3;
# offset=NULL; method="exactstable";
# niter=10; monotone=NULL; optl=NULL; innfold=NULL;
# fixedfoldsinn=TRUE; maxsel=c(25,100);selectionEN=TRUE;cvlmarg=1;
# savepredobj="all"; dataunpen=NULL; ord = 1:length(partitions);
# comparelasso=FALSE;optllasso=NULL;cvllasso=TRUE;
# compareunpenal=FALSE;trace=FALSE;modus=1;
# EBlambda=FALSE;standardizeX = TRUE
if(class(response) =="factor") {
nlevel <- length(levels(response))
if(nlevel != 2){
print("Response is not binary, so not suitable for two-class classification.")
return(NULL)
} else {
model = "logistic"
if(trace) print("Binary response, executing logistic ridge regression")
lev <- levels(response)
if(trace) print(paste("Predicting probability on factor level",lev[2]))
}} else {
if(class(response) == "numeric" | class(response)=="integer"){
valresp <- sort(unique(response))
if(length(valresp)==2 & valresp[1]==0 & valresp[2]==1) {
model = "logistic"
if(trace) print("Binary response, executing logistic ridge regression")
} else {
model = "linear"
if(trace) print("Numeric continuous response, executing linear ridge regression")
return(.grridgelin(highdimdata=highdimdata, response=response,partitions= partitions, unpenal=unpenal,
offset=offset, method=method, niter=niter, monotone=monotone, optl=optl, innfold=innfold,
fixedfoldsinn=fixedfoldsinn, maxsel=maxsel,selectionEN=selectionEN,cvlmarg=cvlmarg,
savepredobj=savepredobj, dataunpen=dataunpen, ord = ord,
comparelasso=comparelasso,optllasso=optllasso,cvllasso=cvllasso,
compareunpenal=compareunpenal,trace=trace,modus=modus,
EBlambda=EBlambda,standardizeX = standardizeX))
}
} else {
if(class(response) == "Surv"){
model="survival"
if(trace) print("Survival response, executing cox ridge regression")
} else {
print("Non-valid response. Should be binary, numeric or survival.")
return(NULL)
}
}
}
if(standardizeX) {
if(trace) print("Covariates are standardized")
sds <- apply(highdimdata,1,sd)
sds2 <- sapply(sds,function(x) max(x,10^{-5}))
highdimdata <- (highdimdata-apply(highdimdata,1,mean))/sds2
}
nsam <- ncol(highdimdata)
if(method=="adaptridge" | method== "exact") niter <- 1
if(class(partitions[[1]]) =="integer"){
partitions=list(group=partitions)
ord=1
}
nclass <- length(partitions)
if(is.null(monotone)) monotone <- rep(FALSE,nclass)
if(length(monotone) != length(partitions)) {
print(paste("ERROR: length 'monotone' unequal to length 'partitions' "))
return(NULL)
}
partitions <- partitions[ord]
monotone <- monotone[ord]
nr <- nrow(highdimdata)
for(ncl in 1:nclass){
indexset <- unlist(partitions[[ncl]])
if(length(indexset) < nr){
print(paste("Warning: partition",ncl,"does not contain all row indices of the data"))
}
if(max(indexset) > nr | min(indexset)<1){
print(paste("ERROR: partition",ncl,"contains an invalid index, e.g. larger than number of data rows"))
return(NULL)
}
}
overlap <- c()
Wmat <- c()
nfeattot <- c()
for(ncl in 1:nclass){
indexset <- unlist(partitions[[ncl]])
nfeatcl <- length(unique(indexset))
nfeattot <- c(nfeattot,nfeatcl)
if(length(indexset) > nfeatcl){
if(trace) print(paste("Grouping",ncl,"contains overlapping groups"))
overlap <- c(overlap,TRUE)
whgroup <- partitions[[ncl]]
nover <- rep(0,nr)
for(k in 1:length(whgroup)){
wh <- whgroup[[k]]
nover[wh] <- nover[wh] + 1
}
Wmat <- cbind(Wmat,sqrt(1/nover))
} else {
if(trace) print(paste("Grouping",ncl,"contains mutually exclusive groups"))
overlap <- c(overlap,FALSE)
Wmat <- cbind(Wmat,rep(1,nr))
}
}
arguments <- list(partitions=partitions,unpenal=unpenal, offset=offset, method=method,
niter=niter, monotone=monotone, optl=optl, innfold=innfold,
fixedfoldsinn=fixedfoldsinn,
selectionEN=selectionEN,maxsel=maxsel,
cvlmarg=cvlmarg, dataunpen=dataunpen,savepredobj=savepredobj, ord=ord,
comparelasso=comparelasso, optllasso=optllasso,
compareunpenal=compareunpenal, modus=modus,EBlambda=EBlambda,standardizeX=standardizeX)
if(nr > 10000 & is.null(innfold)) print("NOTE: consider setting innfold=10 to save computing time")
nmp0 <- names(partitions)
if(is.null(nmp0)) nmp0 <- sapply(1:length(partitions),function(i) paste("Grouping",i))
nmp0 <- sapply(1:length(partitions),function(i) {
if(nmp0[i]=="") return(paste("Grouping",i)) else return(nmp0[i])})
nmp <- c("NoGroups","GroupRegul")
nmpweight <- nmp #to be used later
if(comparelasso) nmp <- c(nmp,"lasso")
#new 29/11
if(selectionEN) nmp <- c(nmp,paste("EN",maxsel,sep=""))
if(compareunpenal) nmp <- c(nmp,"modelunpen")
if((unpenal != ~0) & (unpenal != ~1)) {
if(is.null(dataunpen)) {print("If unpenal contains variables, data of
the unpenalized variables should be specified in the data slot!")
return(NULL)
}
}
nsam <- ncol(highdimdata)
if(!is.null(offset)){
noffs <- length(offset)
offsets <-"c("
if(noffs==1) {for(i in 1:(nsam-1)) offsets <- paste(offsets,offset,",",sep="")} else {
for(i in 1:(nsam-1)) offsets <- paste(offsets,offset[i],",",sep="")}
if(noffs==1) offsets <- paste(offsets,offset,")",sep="") else offsets <- paste(offsets,offset[nsam],")",sep="")
if((unpenal != ~0) & (unpenal != ~1)){
unpenal <- formula(paste(deparse(unpenal),"+ offset(",offsets,")",sep=""))
} else {
unpenal <- formula(paste("~","offset(",offsets,")",sep=""))
}
}
if(is.null(dataunpen)) datapred <- data.frame(fake=rep(NA,ncol(highdimdata))) else datapred <- dataunpen
nopen <- unpenal
if(is.null(innfold)) foldinit <- nsam else foldinit <- innfold
pmt0<- proc.time()
optl0 <- optl
if(is.null(optl)){
if(trace) print("Finding lambda for initial ridge regression")
if(fixedfoldsinn) set.seed(346477)
opt <- optL2(response, penalized = t(highdimdata),fold=foldinit,unpenalized=nopen,data=datapred,trace=trace)
time1 <- proc.time()-pmt0
if(trace) print(opt$cv)
if(trace) print(paste("Computation time for cross-validating main penalty parameter:",time1[3]))
optl <- opt$lambda
if(trace) print(paste("lambda2",optl))
if(is.infinite(optl)) {
if(trace) print("Infinite penalty returned. Data contains no signal. Penalty set to 10^10 ")
optl <- 10^10
}
arguments$optl <- optl
}
pmt <- proc.time()
nsam <- ncol(highdimdata)
nfeat <- nrow(highdimdata)
XM0 <- t(highdimdata)
response0 <- response
cvlnstot <- rep(0,(nclass+1))
allpreds <- c()
whsam <- 1:nsam
responsemin <- response0
pen0 <- penalized(responsemin, penalized = XM0, lambda2 = optl, unpenalized=nopen,data=cbind(XM0,datapred),
trace=trace)
nmunpen <- names(pen0@unpenalized)
if(is.element("(Intercept)",nmunpen)) addintercept <- TRUE else addintercept <- FALSE
if(is.null(innfold)) {nf <- nrow(XM0)} else {
if(!is.null(optl0)) {
nf <- innfold
if(fixedfoldsinn) set.seed(346477)
} else {nf <- opt$fold}
}
opt2 <- cvl(responsemin, penalized = XM0,fold=nf, lambda2 = optl,unpenalized=nopen,data=datapred,trace=trace)
nf <- opt2$fold
cvln0 <- opt2$cvl
cvlnprev <- cvln0
penprev <- pen0
pen <- pen0
if(trace) print(cvln0)
XMw0 <- XM0
XMw0prev <- XM0
converged <- FALSE
conv <- rep(FALSE,nclass)
controlbound1 <-1000
controlbound2 <- controlbound3 <- 10
almvecall <- rep(1,nfeat)
lambdas <- lapply(partitions, function(cla) {
ngroup <- length(cla)
return(rep(1,ngroup))
})
lmvec <- lmvecprev <- array(1,nfeat)
i <- 1
while(!converged & i <= niter){
cl <- 1
if(method=="adaptridge") cl <- nclass
while(cl <= nclass){
convcl <- conv[cl]
if(!convcl){
whgr <- partitions[[cl]]
lenggr <- unlist(lapply(whgr,length))
ngroup1 <- length(whgr)
names(lambdas[[cl]]) <- names(whgr)
coeff <- penprev@penalized
if(model == "survival"){
preds <- predict(penprev,XMw0,data=datapred)
} else {
preds <- predict(penprev,XMw0,data=datapred)[1:nsam]
}
coeffsq <- coeff^2
if(model=="logistic") {
Wi <- sqrt(preds*(1-preds))
constlam <- 2
}
# if(model == "linear"){ #running grridgelin when model =linear
# Wi <- rep(1,length(preds))
# constlam <- 1
# }
if(model == "survival"){
resptime <- response[,1]
predsnew <- -log(sapply(1:nsam,function(k) survival(preds,time=resptime[k])[k]))
Wi <- sqrt(predsnew)
constlam <- 2
}
if(!is.null(dataunpen)) {
mm <- model.matrix(nopen,dataunpen)
XMW <- t(t(cbind(XMw0,10^5*mm)) %*% diag(Wi))
} else {
if(addintercept) XMW <- t(t(cbind(XMw0,rep(10^5,nsam))) %*% diag(Wi)) else XMW <- t(t(XMw0) %*% diag(Wi))
}
SVD <- svd(XMW)
leftmat <- SVD$v %*% diag(1/((SVD$d)^2+constlam*optl)) %*% diag(SVD$d) %*% t(SVD$u)
# if(model=="linear"){
# Hatm <- XMW %*% leftmat
# df <- nsam - sum(diag(2*Hatm - Hatm %*% t(Hatm)))
# VarRes <- sum((response - preds)^2)/df
# print(paste("Sigma^2 estimate:",VarRes))
# vars3 <- VarRes*rowSums(leftmat^2)
# } else {
vars3 <- rowSums(leftmat^2)
#}
which0 <- which(vars3==0)
vars3[which0] <- 10^{-30}
# if(model=="linear"){
# mycoeff2svd <- (leftmat %*% response)^2
# }
if(model=="logistic"){
if(is.factor(response)) respnum <- as.numeric(response)-1 else respnum <- response
z <- matrix(log(preds/(1-preds))+(respnum - preds)/(preds*(1-preds)),ncol=1)
if(modus == 1) mycoeff2svd <- coeffsq
if(modus == 2) mycoeff2svd <- (leftmat %*% z)^2
}
if(model=="survival"){
mycoeff2svd <- coeffsq
}
cii2 <- (rowSums(leftmat * t(XMW)))^2
leftmat <- leftmat/sqrt(vars3)
lowerleft <- 10^(-30)
lefts2 <- function(group){
ind <- whgr[[group]]
ngr <- lenggr[group]
coefftau2 <- sum(sapply(mycoeff2svd[ind]/vars3[ind],function(x) max(x,1)))-length(ind)
return(max(lowerleft,coefftau2/ngr))
}
leftside <- sapply(1:length(whgr),lefts2)
ellarg0 <- length(leftside[leftside>lowerleft])/length(leftside)
if(ellarg0 <=.5){
if(trace) print(paste("Partition",nmp0[cl],"NOT ITERATED"))
conv[cl] <- TRUE
cvln1 <- cvlnprev
XMw0 <- XMw0prev
pen <- penprev
} else {
lefts2ran <- function(group,randomind){
ind <- whgr[[group]]
ngr <- lenggr[group]
coefftau2 <- sum(sapply(mycoeff2svd[1:nfeat][randomind[ind]]/vars3[1:nfeat][randomind[ind]],
function(x) max(x,1)))-length(randomind[ind])
return(max(lowerleft,coefftau2/ngr))
}
randomiz <- function(fakex){
randomind2 <- sample(1:nfeat)
leftsideran <- sapply(1:length(whgr),lefts2ran,randomind=randomind2)
return(leftsideran)}
nlefts <- 100
leftsran <- sapply(1:nlefts,randomiz)
means <- apply(leftsran,1,mean)
leftsrancen <- t(t(leftsran)-means)
relerror <- sum(abs(leftsrancen))/(nlefts*sum(abs(means)))
if(cl==1 & i==1) if(trace) print(cvln0)
if(trace) print(paste("Relative error:",relerror))
if(relerror>= 0.1) print("WARNING: large relative error (>=0.1). Consider using larger groups of variable.")
nadd <- ncol(XMW) - ncol(XMw0)
rightmat = t(t(XMW) * c(Wmat[,cl],rep(1,nadd)))
rightmats <- lapply(1:length(whgr),function(j){
rightj2 <- rightmat[,whgr[[j]]]
rcp <- rightj2 %*% t(rightj2)
return(rcp)
})
coefmatfast <- t(apply(matrix(1:length(whgr),nrow=length(whgr)),1,
function(i){
lefti2 <- leftmat[whgr[[i]],]
lcp <- t(lefti2) %*% lefti2
ckls <- sapply(1:length(whgr),function(j){
rcp <- rightmats[[j]]
return(sum(lcp*rcp))
})
return(ckls)
}))
coefmatfast <- coefmatfast/lenggr
CNfun <- function(lam,cfmmat=coefmatfast){
ng <- nrow(cfmmat)
dmax <- max(diag(cfmmat))
cfmlam <- (1-lam)*cfmmat + lam*diag(dmax,nrow=ng)
eigenvals <- eigen(cfmlam,only.values=TRUE)$values
CN <- eigenvals[1]/eigenvals[ng]
return(Re(CN))
}
lams <- seq(0,1,by=0.005)
CNsRan <- sapply(lams, CNfun,cfmmat=coefmatfast)
CNsRanre <- CNsRan*relerror
if(relerror<=0.1){
lam <- lams[which(CNsRanre<=0.1)[1]]
} else lam <- 1
if(trace) print(paste("Shrink Factor coefficient matrix",lam))
cfmmat <- coefmatfast;
ng <- nrow(cfmmat)
dmax <- max(diag(cfmmat))
cfmlam <- (1-lam)*cfmmat + lam*diag(dmax,nrow=ng)
if(method=="exactstable"){
soltau = solve(sum(cfmlam),sum(leftside))
sol = solve(cfmlam,leftside)
low <- soltau/controlbound1;up = soltau*controlbound1
parinint <- sapply(sol,function(x) min(max(low,x),up))
minopt <- optim(par = parinint,fn = function(pars=c(parinint)) sum(leftside - cfmlam %*% pars)^2,
method="L-BFGS-B",
lower=rep(low,ngroup1),upper=rep(up,ngroup1))
tausqest0 <- minopt$par
}
if(method=="exact"){
soltau = solve(sum(coefmatfast),sum(leftside))
sol = solve(coefmatfast,leftside)
low <- soltau/controlbound2;up = soltau*controlbound2
parinint <- sapply(sol,function(x) min(max(low,x),up))
minopt <- optim(par = parinint,fn = function(pars=c(parinint)) sum(leftside- cfmlam %*% pars)^2,
method="L-BFGS-B",
lower=rep(low,ngroup1),upper=rep(up,ngroup1))
tausqest0 <- minopt$par
}
if(method=="stable"){
soltau = solve(sum(coefmatfast),sum(leftside))
solhyb <- sapply(1:ngroup1,function(i){
leftsidei <- leftside[i]
rightsidei <- c(coefmatfast[i,i], sum(coefmatfast[i,-i]*soltau))
soli <- (leftsidei-rightsidei[2])/rightsidei[1]
return(max(min(soltau*controlbound3,soli),soltau/controlbound3))
})
}
if(method=="simple"){
solsim = leftside
tausqest <- solsim
if(trace) print("simple")
}
if(method=="exact") {
tausqest <- tausqest0
if(trace) print("exact")
}
if(method=="exactstable") {
tausqest <- tausqest0
if(trace) print("exactstable")
}
if(method=="stable") {
if(trace) print("stable")
tausqest <- solhyb
}
if(method=="adaptridge") {
if(trace) print("adaptive ridge")
}
if(method=="stable" | method=="exact" | method=="exactstable" | method=="simple"){
lambdanoncal <- 1/tausqest
if(monotone[cl]){
weigh = unlist(lapply(whgr,length))
lambdamultnoncal <- pava(lambdanoncal,w=weigh)
} else lambdamultnoncal <- lambdanoncal
tausqest<-1/lambdamultnoncal
nfeatcl <- nfeattot[cl]
overl <- overlap[cl]
if(!overl){
con3 <- sum(sapply(1:length(whgr),function(gr){return(length(whgr[[gr]])*tausqest[gr])}))
tausqestcal<- nfeatcl/con3*tausqest
lambdamult <- 1/tausqestcal
if(trace) print(lambdamult)
for(k in 1:length(whgr)){
wh <- whgr[[k]]
XMw0[,wh] <- XMw0[,wh]/sqrt(lambdamult[k])
}
} else {
tauk <- rep(0,nfeat)
Wsq <- (Wmat[,cl])^2
for(k in 1:length(whgr)){
wh <- whgr[[k]]
tauk[wh] <- tauk[wh] + tausqest[k]
}
tauk <- tauk*Wsq
whna <- which(is.na(tauk))
if(length(whna)>0) con3 <- sum(tauk[-whna]) else con3 <- sum(tauk)
tausqestcal0<- nfeatcl/con3*tausqest
lambdamult <- 1/tausqestcal0
if(trace) print(lambdamult)
tausqestcal<- (nfeatcl/con3)*tauk
lambdamultperk <- 1/tausqestcal
lambdamultperk[whna]<- 1
XMw0 <- t(t(XMw0)/sqrt(lambdamultperk))
}
} else {
tausqest <- coeffsq
con3 <- sum(tausqest)
tausqestcal<- nfeat/con3*tausqest
lambdamult <- 1/tausqestcal
XMw0 <- t(t(XMw0)/sqrt(lambdamult))
}
opt2w <- cvl(responsemin, penalized = XMw0,fold=nf,lambda2=optl,
unpenalized=nopen,data=datapred, trace=trace)
cvln1 <- opt2w$cvl
if(trace) print(cvln1)
if((cvln1 - cvlnprev)/abs(cvlnprev) > 1/100 | ((cvln1 - cvlnprev)/abs(cvlnprev) >= 0 & i==1)){
pen <- penalized(responsemin, penalized = XMw0, trace=trace,
lambda2 = optl,unpenalized=nopen,data=datapred)
if(niter>1){
if(!overl){
for(group in 1:ngroup1){
ind <- whgr[[group]]
lmvec[ind] <- lmvec[ind]*lambdamult[group]
}} else {
lmvec <- lmvec * lambdamultperk
}
}
lambdas[[cl]] <- lambdas[[cl]]*lambdamult
cvlnprev <- cvln1
penprev <- pen
XMw0prev <- XMw0
if(trace) print(paste("Partition",nmp0[cl],"improved results"))
} else {
if(niter>1) if(trace) print(paste("Partition",nmp0[cl],"CONVERGED after",i,"iterations"))
else if(trace) print(paste("Partition",nmp0[cl],"did not improve results"))
conv[cl] <- TRUE
cvln1 <- cvlnprev
XMw0 <- XMw0prev
pen <- penprev
}
}
}
cl <- cl+1
}
if(sum(conv)==nclass){
converged <- TRUE
if(niter>1) if(trace) print(paste("All partitions CONVERGED after",i,"iterations"))
}
i <- i+1
}
if(niter==0) {pen <- pen0;XMw0<-XM0;cvln1<-cvln0;soltau <- NULL}
if(model=="survival"){
pred0 <- predict(pen0,XM0,unpenalized=nopen,data=datapred)
predw <- predict(pen,XMw0,unpenalized=nopen,data=datapred)
} else {
pred0 <- predict(pen0,XM0,unpenalized=nopen,data=datapred)[1:nsam]
predw <- predict(pen,XMw0,unpenalized=nopen,data=datapred)[1:nsam]
}
predshere <- cbind(pred0,predw)
cvlnssam <- c(cvln0,cvln1)
lmvecall <- lmvec
almvecall <- cbind(almvecall,lmvecall)
predobj <- c(pen0,pen)
allpreds <- predshere
whichsel <- NULL
betassel <- NULL
npr <- length(predobj)
pred2 <-predobj[[npr]]
lambs <- lmvecall
oldbeta <- pred2@penalized
newbeta <- oldbeta/sqrt(lambs)
time2 <- proc.time()-pmt
if(trace) print(paste("Computation time for adaptive weigthing:",time2[3]))
cvlnstot <- cvlnssam
reslasso <- NULL
mm <- NULL
# if(model=="survival" & comparelasso){
# print("Comparison with Cox-lasso is not yet supported")
# comparelasso <- arguments$comparelasso <-FALSE
# }+
if(comparelasso){
if(model == "logistic") fam <- "binomial"
if(model == "linear") fam <- "gaussian"
if(model == "survival") fam <- "cox"
interc <- TRUE
if(unpenal == ~0 | fam=="cox") interc <- FALSE
if((is.null(dataunpen)) | (unpenal == ~0) | (unpenal == ~1)) {
X0 <- t(highdimdata)
pf <- rep(1,nr)
nunpen <- 0
} else {
mm <- model.matrix(unpenal,dataunpen)
if(prod(mm[,1]==rep(1,nsam))==1) {
interc <- TRUE
mm <- mm[,-1,drop=FALSE]
} else {
interc <- FALSE
}
nunpen <- ncol(mm)
pf <- c(rep(1,nr),rep(0,nunpen))
}
X0 <- cbind(t(highdimdata),mm)
if(is.null(offset)) offset <- rep(0,nsam)
if(trace) print("Starting lasso by glmnet")
if(is.null(optllasso)){
if(trace) print("Finding lambda for lasso regression")
if(fixedfoldsinn) set.seed(346477)
#alpha=1 implies lasso
opt <- cv.glmnet(x=X0,y=response,offset=offset,foldid=nf,penalty.factor=pf,alpha=1,family=fam,
intercept=interc)
optllasso <- opt$lambda.min
whmin <- which(opt$lambda==optllasso)
if(trace) print(paste("lambda1 (multiplied by N):",optllasso*nsam))
arguments$optllasso <- optllasso
cvliklasso <- opt$cvm[whmin]
} else {
cvliklasso0 <- if(cvllasso) try(cv.glmnet(x=X0,y=response,offset=offset,foldid=nf,lambda=c(optllasso,optllasso/2),
penalty.factor=pf,alpha=1,family=fam,intercept=interc))
if(class(cvliklasso0) == "try-error" | !cvllasso) cvliklasso <- NA else cvliklasso <- cvliklasso0$cvm[1]
}
cvlnstot <- c(cvlnstot,cvelasso=cvliklasso)
penlasso <- glmnet(x=X0,y=response,offset=offset,nlambda=1,lambda=optllasso,penalty.factor=pf,alpha=1,family=fam,
intercept=interc,standardize=FALSE)
betaspenalizedlasso <- penlasso$beta[1:nr]
whichlasso <- which(betaspenalizedlasso != 0)
betaslasso <- betaspenalizedlasso[whichlasso]
predobj <- c(predobj,list(penlasso))
reslasso <- list(cvllasso=cvliklasso,whichlasso=whichlasso,betaslasso=betaslasso)
if(trace) print(paste("lasso uses",length(whichlasso),"penalized variables"))
}
resEN <- list()
#one cannot select more than the nr of variables
if(selectionEN){
if(trace) print("Variable selection by elastic net started...")
for(maxsel0 in maxsel){
maxsel2 <- min(maxsel0,nr)
if(trace) print(paste("Maximum nr of variables",maxsel2))
fsel <- function(lam1,maxselec=maxsel2,lam2){
if(lam1==0) return(nfeat-maxselec) else {
penselEN <- penalized(responsemin,XMw0,lambda1=lam1,lambda2=lam2,
unpenalized=nopen,data=datapred,trace=FALSE,maxiter=100)
coef <- penselEN@penalized
return(length(coef[coef!=0])-maxselec)
}
}
lam1 <- uniroot(fsel,interval=c(0,optl*10),maxiter=50,lam2=optl)$root
penselEN0 <- penalized(responsemin,XMw0,lambda1=lam1,lambda2=optl, unpenalized=nopen,data=datapred,
trace=FALSE,maxiter=100)
coefEN0 <- penselEN0@penalized
whichEN <- which(coefEN0 != 0)
penselEN <- penalized(responsemin,XMw0[,whichEN,drop=FALSE],lambda2=optl, unpenalized=nopen,data=datapred,
trace=FALSE,maxiter=100)
coefEN <- penselEN@penalized
predobj <- c(predobj,penselEN)
resEN <- c(resEN,list(list(whichEN=whichEN,betasEN=coefEN)))
}
names(resEN) <- paste("resEN",maxsel,sep="")
}
if(compareunpenal){
if(model=="survival"){
if(trace) print("Starting unpenalized Cox-model")
bogus <- matrix(rnorm(nsam),ncol=1)
if(trace) print(dim(datapred))
penlambdas0 <- penalized(response,penalized = bogus,unpenalized = nopen,lambda1=0,lambda2=10^8, data=datapred,
trace=trace)
predobj <- c(predobj,penlambdas0)
} else {
if(model == "logistic") famglm <- "binomial" else famglm <- "gaussian"
if(trace) print("Starting unpenalized glm")
form <- formula(paste("response","~",as.character(unpenal)[2]))
modelglm <- glm(form,family=famglm,data=dataunpen)
predobj <- c(predobj,list(modelglm))
}
}
printlam <- function(lambs) {if(length(lambs)<=10) return(lambs) else return(summary(lambs))}
suml <- lapply(lambdas,printlam)
if(trace) print("Final lambda multipliers (summary):")
if(trace) print(suml)
if(trace) print(paste("CVLs",cvlnstot))
timetot <- proc.time()-pmt0
if(trace) print(paste("Total computation time:",timetot[3]))
names(predobj) <- nmp
colnames(almvecall) <- nmpweight
if(savepredobj=="last") {
predobj <- predobj[length(predobj)]
almvecall <- matrix(lmvecall,ncol=1)
}
if(savepredobj=="none") predobj <- NULL
if(trace) cat("\n")
return(list(true=response,cvfit = cvlnstot,lambdamults = lambdas, optl=optl, lambdamultvec = almvecall,
predobj=predobj,betas=newbeta, reslasso=reslasso,
resEN = resEN, model=model, arguments=arguments,allpreds=allpreds))
}
|
source("../requirements.R")
source("../base_functions.R")
folder <- "../rds/gaussian_hom/"
dir.create(folder, showWarnings = FALSE)
# if x is given, only generate response again
generate_hom_gaussian <- function(n,d,x=NULL)
{
if(is.null(x))
{
x=matrix(runif(n*d,-5,5),n,d)
}
# response
y=x[,1]+rnorm(nrow(x),0,1)
return(list(x=x,y=y))
}
n_fits <- 20 # total numer of I1 datasets
n_repetitions <- 250 # total numer of I2 datasets
n_each_set_grid <- c(200,500,1000,2500,5000) # size of I1 and I2
n_test <- 500 # to check coverage
d <- 20
k <- 100
percent_train <- 0.7
alpha <- 0.1
generate_data <- function(n,x=NULL) {generate_hom_gaussian(n=n,d=d,x=x)}
data_test_aux <- generate_data(n=n_test) # used to fix x test
cd_split_global <- list()
cd_split_local <- list()
dist_split <- list()
quantile_split <- list()
reg_split <- list()
reg_split_w <- list()
for(n_each_index in 1:length(n_each_set_grid))
{
print(n_each_index/length(n_each_set_grid))
rep <- 1
bands_global <- list()
bands_local <- list()
bands_dist <- list()
bands_quantile <- list()
bands_reg <- list()
bands_reg_w <- list()
for(n_fits_index in 1:n_fits)
{
cat(".")
data_I1 <- generate_data(n=n_each_set_grid[n_each_index])
which_train <- sample(1:length(data_I1$y),length(data_I1$y)*percent_train)
cde_fit <- fit_density_forest(xTrain=data_I1$x[which_train,,drop=FALSE],
yTrain = data_I1$y[which_train,drop=FALSE],
xValidation=data_I1$x[-which_train,,drop=FALSE],
yValidation = data_I1$y[-which_train,drop=FALSE])
for(ll in 1:n_repetitions)
{
data_I2 <- generate_data(n=n_each_set_grid[n_each_index])
pred_I2 <- predict(cde_fit,data_I2$x)
t_grid <- seq(0,max(pred_I2$CDE),length.out = 500)
# Dist-split
fit_dist_split <- dist_split_prediction_bands(cde_fit,
xTrain=data_I2$x,
yTrain = data_I2$y,
xTest=data_test_aux$x,
alpha=alpha,median=TRUE)
data_test <- generate_data(n=n_test,x=data_test_aux$x)
# Dist-split
bands_dist[[rep]] <- dist_split_prediction_bands_evalY(fit_dist_split,
yTest=data_test$y)
rep <- rep+1
gc()
}
}
dist_split[[n_each_index]] <- eval_prediction_bands(xTest=data_test$x,
bands_dist,
alpha=alpha)
dist_split[[n_each_index]]$n <- n_each_set_grid[n_each_index]
saveRDS(dist_split,file = paste0(folder,"dist_median_split.RDS"))
rm(bands_dist)
}
|
/simulations/comparisons_gaussian_hom_median_only.R
|
no_license
|
rizbicki/conformal-cde-experiments
|
R
| false
| false
| 2,944
|
r
|
source("../requirements.R")
source("../base_functions.R")
folder <- "../rds/gaussian_hom/"
dir.create(folder, showWarnings = FALSE)
# if x is given, only generate response again
generate_hom_gaussian <- function(n,d,x=NULL)
{
if(is.null(x))
{
x=matrix(runif(n*d,-5,5),n,d)
}
# response
y=x[,1]+rnorm(nrow(x),0,1)
return(list(x=x,y=y))
}
n_fits <- 20 # total numer of I1 datasets
n_repetitions <- 250 # total numer of I2 datasets
n_each_set_grid <- c(200,500,1000,2500,5000) # size of I1 and I2
n_test <- 500 # to check coverage
d <- 20
k <- 100
percent_train <- 0.7
alpha <- 0.1
generate_data <- function(n,x=NULL) {generate_hom_gaussian(n=n,d=d,x=x)}
data_test_aux <- generate_data(n=n_test) # used to fix x test
cd_split_global <- list()
cd_split_local <- list()
dist_split <- list()
quantile_split <- list()
reg_split <- list()
reg_split_w <- list()
for(n_each_index in 1:length(n_each_set_grid))
{
print(n_each_index/length(n_each_set_grid))
rep <- 1
bands_global <- list()
bands_local <- list()
bands_dist <- list()
bands_quantile <- list()
bands_reg <- list()
bands_reg_w <- list()
for(n_fits_index in 1:n_fits)
{
cat(".")
data_I1 <- generate_data(n=n_each_set_grid[n_each_index])
which_train <- sample(1:length(data_I1$y),length(data_I1$y)*percent_train)
cde_fit <- fit_density_forest(xTrain=data_I1$x[which_train,,drop=FALSE],
yTrain = data_I1$y[which_train,drop=FALSE],
xValidation=data_I1$x[-which_train,,drop=FALSE],
yValidation = data_I1$y[-which_train,drop=FALSE])
for(ll in 1:n_repetitions)
{
data_I2 <- generate_data(n=n_each_set_grid[n_each_index])
pred_I2 <- predict(cde_fit,data_I2$x)
t_grid <- seq(0,max(pred_I2$CDE),length.out = 500)
# Dist-split
fit_dist_split <- dist_split_prediction_bands(cde_fit,
xTrain=data_I2$x,
yTrain = data_I2$y,
xTest=data_test_aux$x,
alpha=alpha,median=TRUE)
data_test <- generate_data(n=n_test,x=data_test_aux$x)
# Dist-split
bands_dist[[rep]] <- dist_split_prediction_bands_evalY(fit_dist_split,
yTest=data_test$y)
rep <- rep+1
gc()
}
}
dist_split[[n_each_index]] <- eval_prediction_bands(xTest=data_test$x,
bands_dist,
alpha=alpha)
dist_split[[n_each_index]]$n <- n_each_set_grid[n_each_index]
saveRDS(dist_split,file = paste0(folder,"dist_median_split.RDS"))
rm(bands_dist)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/GauPro_S3.R
\name{plot.GauPro}
\alias{plot.GauPro}
\title{Plot for class GauPro}
\usage{
\method{plot}{GauPro}(x, ...)
}
\arguments{
\item{x}{Object of class GauPro}
\item{...}{Additional parameters}
}
\value{
Nothing
}
\description{
Plot for class GauPro
}
\examples{
n <- 12
x <- matrix(seq(0,1,length.out = n), ncol=1)
y <- sin(2*pi*x) + rnorm(n,0,1e-1)
gp <- GauPro(X=x, Z=y, parallel=FALSE)
if (requireNamespace("MASS", quietly = TRUE)) {
plot(gp)
}
}
|
/GauPro/man/plot.GauPro.Rd
|
no_license
|
akhikolla/TestedPackages-NoIssues
|
R
| false
| true
| 568
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/GauPro_S3.R
\name{plot.GauPro}
\alias{plot.GauPro}
\title{Plot for class GauPro}
\usage{
\method{plot}{GauPro}(x, ...)
}
\arguments{
\item{x}{Object of class GauPro}
\item{...}{Additional parameters}
}
\value{
Nothing
}
\description{
Plot for class GauPro
}
\examples{
n <- 12
x <- matrix(seq(0,1,length.out = n), ncol=1)
y <- sin(2*pi*x) + rnorm(n,0,1e-1)
gp <- GauPro(X=x, Z=y, parallel=FALSE)
if (requireNamespace("MASS", quietly = TRUE)) {
plot(gp)
}
}
|
# selecteer de rijen die female zijn
ais[ais$sex=='f',]
# selecteer de rijen die sporten row en netball bevatten
# daarna na de "," selecteert men de kolommen die u wilt tonen
ais[ais$sport=='Row' | ais$sport=='Netball',c("sex",'wt')]
|
/oefening3.6.R
|
no_license
|
embobrecht/dataanalyse
|
R
| false
| false
| 236
|
r
|
# selecteer de rijen die female zijn
ais[ais$sex=='f',]
# selecteer de rijen die sporten row en netball bevatten
# daarna na de "," selecteert men de kolommen die u wilt tonen
ais[ais$sport=='Row' | ais$sport=='Netball',c("sex",'wt')]
|
res <- simulate_farkle(10000, strategy_go_for_broke, parallel = TRUE)
res_max_dice <- simulate_farkle(10000, strategy_prefer_max_dice, parallel = TRUE)
res_three <- simulate_farkle(10000, strategy_prefer_threes, parallel = TRUE)
res_four <- simulate_farkle(10000, strategy_prefer_fours, parallel = TRUE)
list(
"Go For Broke" = summarize_strategy(res),
"Prefer Threes" = summarize_strategy(res_three),
"Prefer Fours" = summarize_strategy(res_four),
"Maximize Dice to Roll" = summarize_strategy(res_max_dice)
) %>%
purrr::map_dfr(~ ., .id = "strategy") %>%
ggplot() +
aes(n_rolled, stop_if_over, color = strategy) +
geom_line() +
coord_cartesian(ylim = c(0, 5250))
|
/ten-thousand-functions/notes/simulate.R
|
no_license
|
liston/slides
|
R
| false
| false
| 685
|
r
|
res <- simulate_farkle(10000, strategy_go_for_broke, parallel = TRUE)
res_max_dice <- simulate_farkle(10000, strategy_prefer_max_dice, parallel = TRUE)
res_three <- simulate_farkle(10000, strategy_prefer_threes, parallel = TRUE)
res_four <- simulate_farkle(10000, strategy_prefer_fours, parallel = TRUE)
list(
"Go For Broke" = summarize_strategy(res),
"Prefer Threes" = summarize_strategy(res_three),
"Prefer Fours" = summarize_strategy(res_four),
"Maximize Dice to Roll" = summarize_strategy(res_max_dice)
) %>%
purrr::map_dfr(~ ., .id = "strategy") %>%
ggplot() +
aes(n_rolled, stop_if_over, color = strategy) +
geom_line() +
coord_cartesian(ylim = c(0, 5250))
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data-help.R
\docType{data}
\name{pertussisIgGPTParams2}
\alias{pertussisIgGPTParams2}
\title{Pertussis IgG-PT Response Parameters Data for Model 2}
\format{A dataframe \code{IgG} containing 3000 rows with 7 parameters for IgG antibody.}
\usage{
pertussisIgGPTParams2
}
\description{
List of data frames of all longitudinal parameters. Each data frame contains
Monte Carlo samples for each antibody type.
}
\examples{
# Show first rows of every dataframe contained in pertussisIgGPTParams2
lapply(pertussisIgGPTParams2, head)
}
|
/man/pertussisIgGPTParams2.Rd
|
no_license
|
cran/seroincidence
|
R
| false
| true
| 627
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data-help.R
\docType{data}
\name{pertussisIgGPTParams2}
\alias{pertussisIgGPTParams2}
\title{Pertussis IgG-PT Response Parameters Data for Model 2}
\format{A dataframe \code{IgG} containing 3000 rows with 7 parameters for IgG antibody.}
\usage{
pertussisIgGPTParams2
}
\description{
List of data frames of all longitudinal parameters. Each data frame contains
Monte Carlo samples for each antibody type.
}
\examples{
# Show first rows of every dataframe contained in pertussisIgGPTParams2
lapply(pertussisIgGPTParams2, head)
}
|
\name{Bayesian Occupancy Single Season}
\alias{BoccSS}
\alias{BoccSS0}
\title{
Bayesian single-season occupancy modelling
}
\description{
Functions to estimate occupancy from detection/non-detection data for a single season using a Gibbs sampler coded in R or JAGS.
\code{BoccSS0} runs a model in R without covariates, and allows priors to be specified as beta distributions for probability of occupancy and probability of detection.
\code{BoccSS} runs a model in R allowing for covariates, using a probit link and conjugate normal priors, which can be specified as mean and covariance.
}
\usage{
BoccSS0(y, n, psiPrior=c(1,1), pPrior=c(1,1),
chains=3, draws=30000, burnin=100, ...)
BoccSS(DH, model=NULL, data=NULL, priors=list(),
chains=3, draws=30000, burnin=1000, thin=1, parallel,
seed=NULL, doWAIC=FALSE, ...)
}
\arguments{
\item{y}{
a vector with the number of detections at each site; or a 1/0/NA matrix (or data frame) of detection histories, sites x occasions.
}
\item{n}{
a scalar or vector with the number of visits (survey occasions) at each site; ignored if \code{y} is a matrix or data frame.
}
\item{psiPrior, pPrior}{
parameters for beta distributions to be used as priors for psi and p.
}
\item{DH}{
a 1/0/NA matrix (or data frame) of detection histories, sites x occasions.
}
\item{model}{
a list of formulae symbolically defining a linear predictor for each parameter in terms of covariates. If NULL, an intercept-only model is used, ie, psi(.) p(.).
}
\item{data}{
a data frame containing the variables in the model. For \code{occSStime}, a data frame with a row for each survey occasion; otherwise, a row for each site. Each site covariate has one column. Each survey covariate has one column for each occasion, and the column name must end with the occasion number (without leading zeros); eg, \code{Cov1, Cov2, ..., Cov15}. All covariates should be included in \code{data}, otherwise they will be sought in enclosing environments, which may not produce what you want -- and they won't be standardised.
}
\item{priors}{
a list with elements for prior mean and variance for coefficients; see Details. If NULL, improper flat priors are used.
}
\item{chains}{
number of MCMC chains to run.
}
\item{draws}{
minimum number of values to return. The actual number will be a multiple of the number of chains.
}
\item{burnin}{
number of iterations per chain to discard as burn-in.
}
\item{thin}{
the thinning interval between consecutive values in the chain.
}
\item{parallel}{
logical; if TRUE \emph{and} \code{n.chains} > 1 \emph{and} available cores (as returned by \code{parallel::detectCores}) > 2, chains will be run in parallel. If missing, chains will be run in parallel if \code{n.chains} < available cores.
}
\item{doWAIC}{
logical; if TRUE, the Watanabe-Akaike Information Criterion is calculated. NOTE: THIS FEATURE IS STILL EXPERIMENTAL.
}
\item{seed}{
for reproducible results; note that parallel and sequential methods use different random number generators, so will give different results with the same seed.
}
\item{...}{
other arguments to pass to the function.
}
}
\details{
\code{BoccSS0} implements a simple model with one parameter for probability of occupancy and one for probability of detection, ie. a \code{psi(.) p(.)} model, using a Gibbs sampler implemented in R.
Independent beta distributions are used as priors for \code{BoccSS0}, as specified by \code{psiPrior} and \code{pPrior}. The defaults, \code{c(1, 1)}, correspond to uniform priors on the probabilities of occupancy and detection.
\code{BoccSS} uses a probit link to model occupancy and detection as a function of site covariates or survey covariates, as specified by \code{model}(Dorazio and Rodriguez 2011). It includes a built in \code{.time} covariate which can be used for modelling p with time as a fixed effect, and \code{.Time, .Time2, .Time3} for a linear, quadratic or cubic trend. A built-in \code{.b} covariate corresponds to a behavioural effect, where detection depends on whether the species was detected on the previous occasion or not.
Note that most software uses a logistic (logit) link; see \link{Links}.
Coefficients on the probit scale are about half the size of the equivalent on the logit scale.
Priors for \code{BoccSS} are listed in the \code{priors} argument, which may contain elements:
\code{muPsi} and \code{muP} : the means for occupancy and detection coefficients respectively. This may be a vector with one value for each coefficient, including the intercept, or a scalar, which will be used for all. The default is 0.
\code{sigmaPsi} and \code{sigmaP} : the (co)variance for occupancy and detection coefficients respectively. This may be (1) a vector with one value for each coefficient, including the intercept, which represents the variance, assuming independence, or (2) a scalar, which will be used for all, or (3) a variance-covariance matrix. The default is 1, which for a probit link and standardized covariates is only mildly informative.
When specifying priors, note that numerical covariates are standardized internally before fitting the model. For an intercept-only model, a prior of Normal(0, 1) on the probit scale implies a Uniform(0, 1) or Beta(1, 1) prior on the probability scale.
If you are unsure of the order of predictors, do a short run and check the output, or pass unusable values (eg, \code{muPsi=numeric(100)}) and check the error message.
}
\value{
Returns an object of class \code{mcmcOutput}.
}
\references{
MacKenzie, D I; J D Nichols; A J Royle; K H Pollock; L L Bailey; J E Hines 2006. \emph{Occupancy Estimation and Modeling : Inferring Patterns and Dynamics of Species Occurrence}. Elsevier Publishing.
Dorazio and Rodriguez. 2012. A Gibbs sampler for Bayesian analysis of site-occupancy data. \emph{Methods in Ecology and Evolution}, 3, 1093-1098
}
\author{
Mike Meredith. \code{BoccSS} uses the Gibbs sampler described by Dorazio and Rodriguez (2012).
}
\seealso{
See the examples for the \code{\link{weta}} data set.
}
\examples{
# The blue ridge salamanders data from MacKenzie et al (2006) p99:
data(salamanders)
y <- rowSums(salamanders)
n <- rowSums(!is.na(salamanders))
tmp <- BoccSS0(y, n)
tmp
occSS0(y, n) # for comparison
plot(tmp)
}
|
/man/BoccSS.Rd
|
no_license
|
cran/wiqid
|
R
| false
| false
| 6,450
|
rd
|
\name{Bayesian Occupancy Single Season}
\alias{BoccSS}
\alias{BoccSS0}
\title{
Bayesian single-season occupancy modelling
}
\description{
Functions to estimate occupancy from detection/non-detection data for a single season using a Gibbs sampler coded in R or JAGS.
\code{BoccSS0} runs a model in R without covariates, and allows priors to be specified as beta distributions for probability of occupancy and probability of detection.
\code{BoccSS} runs a model in R allowing for covariates, using a probit link and conjugate normal priors, which can be specified as mean and covariance.
}
\usage{
BoccSS0(y, n, psiPrior=c(1,1), pPrior=c(1,1),
chains=3, draws=30000, burnin=100, ...)
BoccSS(DH, model=NULL, data=NULL, priors=list(),
chains=3, draws=30000, burnin=1000, thin=1, parallel,
seed=NULL, doWAIC=FALSE, ...)
}
\arguments{
\item{y}{
a vector with the number of detections at each site; or a 1/0/NA matrix (or data frame) of detection histories, sites x occasions.
}
\item{n}{
a scalar or vector with the number of visits (survey occasions) at each site; ignored if \code{y} is a matrix or data frame.
}
\item{psiPrior, pPrior}{
parameters for beta distributions to be used as priors for psi and p.
}
\item{DH}{
a 1/0/NA matrix (or data frame) of detection histories, sites x occasions.
}
\item{model}{
a list of formulae symbolically defining a linear predictor for each parameter in terms of covariates. If NULL, an intercept-only model is used, ie, psi(.) p(.).
}
\item{data}{
a data frame containing the variables in the model. For \code{occSStime}, a data frame with a row for each survey occasion; otherwise, a row for each site. Each site covariate has one column. Each survey covariate has one column for each occasion, and the column name must end with the occasion number (without leading zeros); eg, \code{Cov1, Cov2, ..., Cov15}. All covariates should be included in \code{data}, otherwise they will be sought in enclosing environments, which may not produce what you want -- and they won't be standardised.
}
\item{priors}{
a list with elements for prior mean and variance for coefficients; see Details. If NULL, improper flat priors are used.
}
\item{chains}{
number of MCMC chains to run.
}
\item{draws}{
minimum number of values to return. The actual number will be a multiple of the number of chains.
}
\item{burnin}{
number of iterations per chain to discard as burn-in.
}
\item{thin}{
the thinning interval between consecutive values in the chain.
}
\item{parallel}{
logical; if TRUE \emph{and} \code{n.chains} > 1 \emph{and} available cores (as returned by \code{parallel::detectCores}) > 2, chains will be run in parallel. If missing, chains will be run in parallel if \code{n.chains} < available cores.
}
\item{doWAIC}{
logical; if TRUE, the Watanabe-Akaike Information Criterion is calculated. NOTE: THIS FEATURE IS STILL EXPERIMENTAL.
}
\item{seed}{
for reproducible results; note that parallel and sequential methods use different random number generators, so will give different results with the same seed.
}
\item{...}{
other arguments to pass to the function.
}
}
\details{
\code{BoccSS0} implements a simple model with one parameter for probability of occupancy and one for probability of detection, ie. a \code{psi(.) p(.)} model, using a Gibbs sampler implemented in R.
Independent beta distributions are used as priors for \code{BoccSS0}, as specified by \code{psiPrior} and \code{pPrior}. The defaults, \code{c(1, 1)}, correspond to uniform priors on the probabilities of occupancy and detection.
\code{BoccSS} uses a probit link to model occupancy and detection as a function of site covariates or survey covariates, as specified by \code{model}(Dorazio and Rodriguez 2011). It includes a built in \code{.time} covariate which can be used for modelling p with time as a fixed effect, and \code{.Time, .Time2, .Time3} for a linear, quadratic or cubic trend. A built-in \code{.b} covariate corresponds to a behavioural effect, where detection depends on whether the species was detected on the previous occasion or not.
Note that most software uses a logistic (logit) link; see \link{Links}.
Coefficients on the probit scale are about half the size of the equivalent on the logit scale.
Priors for \code{BoccSS} are listed in the \code{priors} argument, which may contain elements:
\code{muPsi} and \code{muP} : the means for occupancy and detection coefficients respectively. This may be a vector with one value for each coefficient, including the intercept, or a scalar, which will be used for all. The default is 0.
\code{sigmaPsi} and \code{sigmaP} : the (co)variance for occupancy and detection coefficients respectively. This may be (1) a vector with one value for each coefficient, including the intercept, which represents the variance, assuming independence, or (2) a scalar, which will be used for all, or (3) a variance-covariance matrix. The default is 1, which for a probit link and standardized covariates is only mildly informative.
When specifying priors, note that numerical covariates are standardized internally before fitting the model. For an intercept-only model, a prior of Normal(0, 1) on the probit scale implies a Uniform(0, 1) or Beta(1, 1) prior on the probability scale.
If you are unsure of the order of predictors, do a short run and check the output, or pass unusable values (eg, \code{muPsi=numeric(100)}) and check the error message.
}
\value{
Returns an object of class \code{mcmcOutput}.
}
\references{
MacKenzie, D I; J D Nichols; A J Royle; K H Pollock; L L Bailey; J E Hines 2006. \emph{Occupancy Estimation and Modeling : Inferring Patterns and Dynamics of Species Occurrence}. Elsevier Publishing.
Dorazio and Rodriguez. 2012. A Gibbs sampler for Bayesian analysis of site-occupancy data. \emph{Methods in Ecology and Evolution}, 3, 1093-1098
}
\author{
Mike Meredith. \code{BoccSS} uses the Gibbs sampler described by Dorazio and Rodriguez (2012).
}
\seealso{
See the examples for the \code{\link{weta}} data set.
}
\examples{
# The blue ridge salamanders data from MacKenzie et al (2006) p99:
data(salamanders)
y <- rowSums(salamanders)
n <- rowSums(!is.na(salamanders))
tmp <- BoccSS0(y, n)
tmp
occSS0(y, n) # for comparison
plot(tmp)
}
|
set.seed( 61 )
library(mvtnorm)
library(fields)
library(Rcpp)
library(mclust)
library(kernlab)
library(ConsensusClusterPlus)
simu=function(s){
prob_glcm<-function(c,s=s,mc=30000){
mu<-c(2+c,14-c)
sigma<-matrix(s*c(1,-0.7,-0.7,1),nrow=2)
elip<-rmvnorm(mc,mu,sigma)
# par(xaxs='i',yaxs='i')
# plot(elip,xlim =c(0,16) ,ylim=c(0,16))
# abline(16,-1,col='red')
# abline(h=16);abline(h=15);abline(h=14);abline(h=13);abline(h=12);abline(h=11);abline(h=10);abline(h=9);
# abline(h=8);abline(h=7);abline(h=6);abline(h=5);abline(h=4);abline(h=3);abline(h=2);abline(h=1);abline(h=0)
# abline(v=16);abline(v=15);abline(v=14);abline(v=13);abline(v=12);abline(v=11);abline(v=10);abline(v=9);
# abline(v=0);abline(v=1);abline(v=2);abline(v=3);abline(v=4);abline(v=5);abline(v=6);abline(v=7);abline(v=8)
cell_count<-rep(0,16*16)
for (i in 1:mc)
{
for (m in 1:16) {
for (k in 16:1) {
if (( (m-1) <elip[i,1])&(elip[i,1]< m)&( (k-1) <elip[i,2])&(elip[i,2]< k)) {
cell_count[16-k+1+16*(m-1)]=cell_count[16-k+1+16*(m-1)]+1}
}
}
}
## -c(2:16,19:32,36:48,53:64,70:80,87:96,104:112,121:128,138:144,155:160,172:176,189:192,206:208,223:224,240)
z<-cell_count/sum(cell_count)
z_whole<-z[c(1,17,33,49,65,81,97,113,129,145,161,177,193,209,225,241,
17,18,34,50,66,82,98,114,130,146,162,178,194,210,226,242,
33,34,35,51,67,83,99,115,131,147,163,179,195,211,227,243,
49,50,51,52,68,84,100,116,132,148,164,180,196,212,228,244,
65,66,67,68,69,85,101,117,133,149,165,181,197,213,229,245,
81,82,83,84,85,86,102,118,134,150,166,182,198,214,230,246,
97,98,99,100,101,102,103,119,135,151,167,183,199,215,231,247,
113,114,115,116,117,118,119,120,136,152,168,184,200,216,232,248,
129,130,131,132,133,134,135,136,137,153,169,185,201,217,233,249,
145,146,147,148,149,150,151,152,153,154,170,186,202,218,234,250,
161,162,163,164,165,166,167,168,169,170,171,187,203,219,235,251,
177,178,179,180,181,182,183,184,185,186,187,188,204,220,236,252,
193,194,195,196,197,198,199,200,201,202,203,204,205,221,237,253,
209,210,211,212,213,214,215,216,217,218,219,220,221,222,238,254,
225,226,227,228,229,230,231,232,233,234,235,236,237,238,239,255,
241,242,243,244,245,246,247,248,249,250,251,252,253,254,255,256)]
arg. <- expand.grid(c(0.5:15.5),c(15.5:0.5))
I = as.image( Z=z_whole, x=arg., grid=list(x=seq(0.5,15.5,1), y=seq(0.5,15.5,1)))
image(I)
smooth.I <- image.smooth(I, theta=1);
#################################################
### notice the order of this sommthed image ###
#################################################
den=c()
for (r in 1:16) {
for (w in 1:r) {
den=c(den,smooth.I$z[r,16-(w-1)])
}
}
prob<-den/sum(den)
return(prob)
}
prob1=prob_glcm(c=5,s=s)
prob2=prob_glcm(c=5.5,s=s)
prob3=prob_glcm(c=6,s=s)
prob4=prob_glcm(c=6.5,s=s)
prob5=prob_glcm(c=7,s=s)
glcm=matrix(0,nrow=20*5,ncol=136)
for (j in 1:20)
{
t<-round(runif(1,500,20000),0)
glcm[j,]=round(t*prob1)
}
for (j in 21:40)
{
t<-round(runif(1,500,20000),0)
glcm[j,]=round(t*prob2)
}
for (j in 41:60)
{
t<-round(runif(1,500,20000),0)
glcm[j,]=round(t*prob3)
}
for (j in 61:80)
{
t<-round(runif(1,500,20000),0)
glcm[j,]=round(t*prob4)
}
for (j in 81:100)
{
t<-round(runif(1,500,20000),0)
glcm[j,]=round(t*prob5)
}
glcm
}
Z=simu(s=10)
Z_met=Z
T_met=nrow(Z_met)
n=ncol(Z_met)
X=apply(Z_met,1,sum)
X_met=X
sX_met=(X-mean(X))/sd(X)
R=array(data = NA,dim = c(2,n,T_met))
for (t in 1: nrow(Z_met)) R[,,t]=matrix(rep(c(1,sX_met[t]),times=n),byrow = FALSE,nrow=2,ncol=n)
############################################################################
########################## MCMC ########################
############################################################################
library(HI)
library(invgamma)
source('/gstore/scratch/u/lix233/RGSDP/sdp_functions_selfwriting_V12_cpp.R')
sourceCpp('/gstore/scratch/u/lix233/RGSDP/rgsdp.cpp')
D=read.csv('/gstore/scratch/u/lix233/RGSDP/D_16.csv',header=TRUE)
W=read.csv('/gstore/scratch/u/lix233/RGSDP/W_16.csv',header=TRUE)
N=20000;Burnin=N/2
Y_iter_met=Theta_iter_met=array(data=NA,dim = c(T_met,n,N))
try=matrix(0,nrow =T_met ,ncol = n)
for (i in 1:T_met){
for (j in 1:n){
if (Z_met[i,j]==0) {
try[i,j]=rnorm(1,mean=-10,sd=1)
} else {
try[i,j]=rnorm(1,mean=Z_met[i,j],sd=1)
}
}
}
g=update_Y(Z=Z_met,X=X_met,tau2=100,Theta = try,Beta =c(0.1,0.1),R)
sum(g==Inf)+sum(g==-Inf)
Theta_iter_met[,,1]=try
tau2_met=v_met=rho_met=sig2_met=rep(NA,N)
tau2_met[1]=50
v_met[1]=0.8
rho_met[1]=0.9
sig2_met[1]=10
# v_met=rep(1,N) # Fix v
av=bv=1
atau=0.0001 ;btau=0.0001
asig=0.0001 ;bsig=0.0001
Betam=c(0,0);Sigma_m=matrix(c(10^5,0,0,10^5),nrow=2,ncol=2)
Beta_iter_met=matrix(NA,nrow=N,ncol=nrow(R[,,1]))
Beta_iter_met[1,]=c(40,20)
for (iter in 2:N) {
Y_iter_met[,,iter]=update_Y(Z_met,X_met,tau2_met[iter-1],Theta_iter_met[,,iter-1],Beta_iter_met[iter-1,],R)
Theta_iter_met[,,iter]=update_theta(as.vector(X_met),Y_iter_met[,,iter],as.matrix(D),as.matrix(W),rho_met[iter-1],Theta_iter_met[,,iter-1],sig2_met[iter-1],tau2_met[iter-1],v_met[iter-1],Beta_iter_met[iter-1,],R)
Beta_iter_met[iter,]=update_Beta(Betam,Sigma_m,tau2_met[iter-1],X_met,Y_iter_met[,,iter],Theta_iter_met[,,iter],R)
tau2_met[iter] = update_tau2(X_met,Y_iter_met[,,iter],Theta_iter_met[,,iter],atau,btau,Beta_iter_met[iter,],R)
sig2_met[iter]= update_sig2(asig,bsig,D,W,rho_met[iter-1],Theta_iter_met[,,iter])
rho_met[iter] = update_rho(D,W,Theta_iter_met[,,iter],sig2_met[iter])
v_met[iter]=update_v(Z_met,v_met[iter-1],Tstar=nrow(unique.matrix(Theta_iter_met[,,iter])),av,bv)
}
library(coda)
mcmc_beta=mcmc(Beta_iter_met[(1+Burnin):N,])
pnorm(abs(geweke.diag(mcmc_beta)$z),lower.tail=FALSE)*2
mcmc_rho=mcmc(rho_met[(1+Burnin):N])
pnorm(abs(geweke.diag(mcmc_rho)$z),lower.tail=FALSE)*2
mcmc_sig2=mcmc(sig2_met[(1+Burnin):N])
pnorm(abs(geweke.diag(mcmc_sig2)$z),lower.tail=FALSE)*2
mcmc_tau2=mcmc(tau2_met[(1+Burnin):N])
pnorm(abs(geweke.diag(mcmc_tau2)$z),lower.tail=FALSE)*2
mcmc_v=mcmc(v_met[(1+Burnin):N])
pnorm(abs(geweke.diag(mcmc_v)$z),lower.tail=FALSE)*2
Theta_ave=Theta_sum=matrix(0,nrow=nrow(Theta_iter_met[,,1]),ncol=ncol(Theta_iter_met[,,1]))
for (i in (Burnin+1):N) {
Theta_sum=Theta_sum+Theta_iter_met[,,i]
}
Theta_ave=Theta_sum/(N-Burnin)
library('NbClust')
NbClust(Theta_ave,distance='euclidean',method='ward.D2',index='kl')
HRGSDP=NbClust(Theta_ave,distance='euclidean',method='ward.D2',index='kl')$Best.partition
glcm_whole=Z[,c(1,2,4,7,11,16,22,29,37,46,56,67,79,92,106,121,
2,3,5,8,12,17,23,30,38,47,57,68,80,93,107,122,
4,5,6,9,13,18,24,31,39,48,58,69,81,94,108,123,
7,8,9,10,14,19,25,32,40,49,59,70,82,95,109,124,
11,12,13,14,15,20,26,33,41,50,60,71,83,96,110,125,
16,17,18,19,20,21,27,34,42,51,61,72,84,97,111,126,
22,23,24,25,26,27,28,35,43,52,62,73,85,98,112,127,
29,30,31,32,33,34,35,36,44,53,63,74,86,99,113,128,
37,38,39,40,41,42,43,44,45,54,64,75,87,100,114,129,
46,47,48,49,50,51,52,53,54,55,65,76,88,101,115,130,
56,57,58,59,60,61,62,63,64,65,66,77,89,102,116,131,
67,68,69,70,71,72,73,74,75,76,77,78,90,103,117,132,
79,80,81,82,83,84,85,86,87,88,89,90,91,104,118,133,
92,93,94,95,96,97,98,99,100,101,102,103,104,105,119,134,
106,107,108,109,110,111,112,113,114,115,116,117,118,119,120,135,
121,122,123,124,125,126,127,128,129,130,131,132,133,134,135,136)]
source('/gstore/scratch/u/lix233/RGSDP/cal_stat.R')
features=cal_stat(glcm_whole)
GMM=Mclust(features,5)
my.dist <- function(x) dist(x, method='euclidean')
my.hclust <- function(d) hclust(d, method='ward.D2')
HC<-cutree(my.hclust(my.dist(data.matrix(features))),k=5)
KM=kmeans(features,5)
SC=specc(features,5)
CO <- ConsensusClusterPlus(t(features),maxK=9,reps=100,pItem=0.90, pFeature=1,
clusterAlg='hc',distance='euclidean',plot=FALSE)
CO <- CO[[5]]$consensusClass
aa <- table(rep(1:5,each=20), CO)
bb <- table(rep(1:5,each=20), GMM$classification)
cc <- table(rep(1:5,each=20), HC)
dd <- table(rep(1:5,each=20), KM$cluster)
ee <- table(rep(1:5,each=20), SC)
ff <- table(rep(1:5,each=20), HRGSDP)
res_FeaCO=c(chisq.test(aa,correct = TRUE)$statistic,ncol(aa),error_rate(aa), 'FeaCO')
res_FeaGMM=c(chisq.test(bb,correct = TRUE)$statistic,ncol(bb),error_rate(bb), 'FeaGMM')
res_FeaHC=c(chisq.test(cc,correct = TRUE)$statistic,ncol(cc),error_rate(cc), 'FeaHC')
res_FeaKM=c(chisq.test(dd,correct = TRUE)$statistic,ncol(dd),error_rate(dd), 'FeaKM')
res_FeaSC=c(chisq.test(ee,correct = TRUE)$statistic,ncol(ee),error_rate(ee), 'FeaSC')
res_HRGSDP=c(chisq.test(ff,correct = TRUE)$statistic,ncol(ff),error_rate(ff), 'HRGSDP')
xx = rbind(res_FeaCO, res_FeaGMM, res_FeaHC, res_FeaKM, res_FeaSC, res_HRGSDP)
colnames(xx) = c('pearson.chi.sq', 'nunber of clusters', 'error.rate', 'method')
xx = as.data.frame(xx)
print(xx)
|
/s=10/simu_61.R
|
no_license
|
mguindanigroup/Radiomics-Hierarchical-Rounded-Gaussian-Spatial-Dirichlet-Process
|
R
| false
| false
| 9,293
|
r
|
set.seed( 61 )
library(mvtnorm)
library(fields)
library(Rcpp)
library(mclust)
library(kernlab)
library(ConsensusClusterPlus)
simu=function(s){
prob_glcm<-function(c,s=s,mc=30000){
mu<-c(2+c,14-c)
sigma<-matrix(s*c(1,-0.7,-0.7,1),nrow=2)
elip<-rmvnorm(mc,mu,sigma)
# par(xaxs='i',yaxs='i')
# plot(elip,xlim =c(0,16) ,ylim=c(0,16))
# abline(16,-1,col='red')
# abline(h=16);abline(h=15);abline(h=14);abline(h=13);abline(h=12);abline(h=11);abline(h=10);abline(h=9);
# abline(h=8);abline(h=7);abline(h=6);abline(h=5);abline(h=4);abline(h=3);abline(h=2);abline(h=1);abline(h=0)
# abline(v=16);abline(v=15);abline(v=14);abline(v=13);abline(v=12);abline(v=11);abline(v=10);abline(v=9);
# abline(v=0);abline(v=1);abline(v=2);abline(v=3);abline(v=4);abline(v=5);abline(v=6);abline(v=7);abline(v=8)
cell_count<-rep(0,16*16)
for (i in 1:mc)
{
for (m in 1:16) {
for (k in 16:1) {
if (( (m-1) <elip[i,1])&(elip[i,1]< m)&( (k-1) <elip[i,2])&(elip[i,2]< k)) {
cell_count[16-k+1+16*(m-1)]=cell_count[16-k+1+16*(m-1)]+1}
}
}
}
## -c(2:16,19:32,36:48,53:64,70:80,87:96,104:112,121:128,138:144,155:160,172:176,189:192,206:208,223:224,240)
z<-cell_count/sum(cell_count)
z_whole<-z[c(1,17,33,49,65,81,97,113,129,145,161,177,193,209,225,241,
17,18,34,50,66,82,98,114,130,146,162,178,194,210,226,242,
33,34,35,51,67,83,99,115,131,147,163,179,195,211,227,243,
49,50,51,52,68,84,100,116,132,148,164,180,196,212,228,244,
65,66,67,68,69,85,101,117,133,149,165,181,197,213,229,245,
81,82,83,84,85,86,102,118,134,150,166,182,198,214,230,246,
97,98,99,100,101,102,103,119,135,151,167,183,199,215,231,247,
113,114,115,116,117,118,119,120,136,152,168,184,200,216,232,248,
129,130,131,132,133,134,135,136,137,153,169,185,201,217,233,249,
145,146,147,148,149,150,151,152,153,154,170,186,202,218,234,250,
161,162,163,164,165,166,167,168,169,170,171,187,203,219,235,251,
177,178,179,180,181,182,183,184,185,186,187,188,204,220,236,252,
193,194,195,196,197,198,199,200,201,202,203,204,205,221,237,253,
209,210,211,212,213,214,215,216,217,218,219,220,221,222,238,254,
225,226,227,228,229,230,231,232,233,234,235,236,237,238,239,255,
241,242,243,244,245,246,247,248,249,250,251,252,253,254,255,256)]
arg. <- expand.grid(c(0.5:15.5),c(15.5:0.5))
I = as.image( Z=z_whole, x=arg., grid=list(x=seq(0.5,15.5,1), y=seq(0.5,15.5,1)))
image(I)
smooth.I <- image.smooth(I, theta=1);
#################################################
### notice the order of this sommthed image ###
#################################################
den=c()
for (r in 1:16) {
for (w in 1:r) {
den=c(den,smooth.I$z[r,16-(w-1)])
}
}
prob<-den/sum(den)
return(prob)
}
prob1=prob_glcm(c=5,s=s)
prob2=prob_glcm(c=5.5,s=s)
prob3=prob_glcm(c=6,s=s)
prob4=prob_glcm(c=6.5,s=s)
prob5=prob_glcm(c=7,s=s)
glcm=matrix(0,nrow=20*5,ncol=136)
for (j in 1:20)
{
t<-round(runif(1,500,20000),0)
glcm[j,]=round(t*prob1)
}
for (j in 21:40)
{
t<-round(runif(1,500,20000),0)
glcm[j,]=round(t*prob2)
}
for (j in 41:60)
{
t<-round(runif(1,500,20000),0)
glcm[j,]=round(t*prob3)
}
for (j in 61:80)
{
t<-round(runif(1,500,20000),0)
glcm[j,]=round(t*prob4)
}
for (j in 81:100)
{
t<-round(runif(1,500,20000),0)
glcm[j,]=round(t*prob5)
}
glcm
}
Z=simu(s=10)
Z_met=Z
T_met=nrow(Z_met)
n=ncol(Z_met)
X=apply(Z_met,1,sum)
X_met=X
sX_met=(X-mean(X))/sd(X)
R=array(data = NA,dim = c(2,n,T_met))
for (t in 1: nrow(Z_met)) R[,,t]=matrix(rep(c(1,sX_met[t]),times=n),byrow = FALSE,nrow=2,ncol=n)
############################################################################
########################## MCMC ########################
############################################################################
library(HI)
library(invgamma)
source('/gstore/scratch/u/lix233/RGSDP/sdp_functions_selfwriting_V12_cpp.R')
sourceCpp('/gstore/scratch/u/lix233/RGSDP/rgsdp.cpp')
D=read.csv('/gstore/scratch/u/lix233/RGSDP/D_16.csv',header=TRUE)
W=read.csv('/gstore/scratch/u/lix233/RGSDP/W_16.csv',header=TRUE)
N=20000;Burnin=N/2
Y_iter_met=Theta_iter_met=array(data=NA,dim = c(T_met,n,N))
try=matrix(0,nrow =T_met ,ncol = n)
for (i in 1:T_met){
for (j in 1:n){
if (Z_met[i,j]==0) {
try[i,j]=rnorm(1,mean=-10,sd=1)
} else {
try[i,j]=rnorm(1,mean=Z_met[i,j],sd=1)
}
}
}
g=update_Y(Z=Z_met,X=X_met,tau2=100,Theta = try,Beta =c(0.1,0.1),R)
sum(g==Inf)+sum(g==-Inf)
Theta_iter_met[,,1]=try
tau2_met=v_met=rho_met=sig2_met=rep(NA,N)
tau2_met[1]=50
v_met[1]=0.8
rho_met[1]=0.9
sig2_met[1]=10
# v_met=rep(1,N) # Fix v
av=bv=1
atau=0.0001 ;btau=0.0001
asig=0.0001 ;bsig=0.0001
Betam=c(0,0);Sigma_m=matrix(c(10^5,0,0,10^5),nrow=2,ncol=2)
Beta_iter_met=matrix(NA,nrow=N,ncol=nrow(R[,,1]))
Beta_iter_met[1,]=c(40,20)
for (iter in 2:N) {
Y_iter_met[,,iter]=update_Y(Z_met,X_met,tau2_met[iter-1],Theta_iter_met[,,iter-1],Beta_iter_met[iter-1,],R)
Theta_iter_met[,,iter]=update_theta(as.vector(X_met),Y_iter_met[,,iter],as.matrix(D),as.matrix(W),rho_met[iter-1],Theta_iter_met[,,iter-1],sig2_met[iter-1],tau2_met[iter-1],v_met[iter-1],Beta_iter_met[iter-1,],R)
Beta_iter_met[iter,]=update_Beta(Betam,Sigma_m,tau2_met[iter-1],X_met,Y_iter_met[,,iter],Theta_iter_met[,,iter],R)
tau2_met[iter] = update_tau2(X_met,Y_iter_met[,,iter],Theta_iter_met[,,iter],atau,btau,Beta_iter_met[iter,],R)
sig2_met[iter]= update_sig2(asig,bsig,D,W,rho_met[iter-1],Theta_iter_met[,,iter])
rho_met[iter] = update_rho(D,W,Theta_iter_met[,,iter],sig2_met[iter])
v_met[iter]=update_v(Z_met,v_met[iter-1],Tstar=nrow(unique.matrix(Theta_iter_met[,,iter])),av,bv)
}
library(coda)
mcmc_beta=mcmc(Beta_iter_met[(1+Burnin):N,])
pnorm(abs(geweke.diag(mcmc_beta)$z),lower.tail=FALSE)*2
mcmc_rho=mcmc(rho_met[(1+Burnin):N])
pnorm(abs(geweke.diag(mcmc_rho)$z),lower.tail=FALSE)*2
mcmc_sig2=mcmc(sig2_met[(1+Burnin):N])
pnorm(abs(geweke.diag(mcmc_sig2)$z),lower.tail=FALSE)*2
mcmc_tau2=mcmc(tau2_met[(1+Burnin):N])
pnorm(abs(geweke.diag(mcmc_tau2)$z),lower.tail=FALSE)*2
mcmc_v=mcmc(v_met[(1+Burnin):N])
pnorm(abs(geweke.diag(mcmc_v)$z),lower.tail=FALSE)*2
Theta_ave=Theta_sum=matrix(0,nrow=nrow(Theta_iter_met[,,1]),ncol=ncol(Theta_iter_met[,,1]))
for (i in (Burnin+1):N) {
Theta_sum=Theta_sum+Theta_iter_met[,,i]
}
Theta_ave=Theta_sum/(N-Burnin)
library('NbClust')
NbClust(Theta_ave,distance='euclidean',method='ward.D2',index='kl')
HRGSDP=NbClust(Theta_ave,distance='euclidean',method='ward.D2',index='kl')$Best.partition
glcm_whole=Z[,c(1,2,4,7,11,16,22,29,37,46,56,67,79,92,106,121,
2,3,5,8,12,17,23,30,38,47,57,68,80,93,107,122,
4,5,6,9,13,18,24,31,39,48,58,69,81,94,108,123,
7,8,9,10,14,19,25,32,40,49,59,70,82,95,109,124,
11,12,13,14,15,20,26,33,41,50,60,71,83,96,110,125,
16,17,18,19,20,21,27,34,42,51,61,72,84,97,111,126,
22,23,24,25,26,27,28,35,43,52,62,73,85,98,112,127,
29,30,31,32,33,34,35,36,44,53,63,74,86,99,113,128,
37,38,39,40,41,42,43,44,45,54,64,75,87,100,114,129,
46,47,48,49,50,51,52,53,54,55,65,76,88,101,115,130,
56,57,58,59,60,61,62,63,64,65,66,77,89,102,116,131,
67,68,69,70,71,72,73,74,75,76,77,78,90,103,117,132,
79,80,81,82,83,84,85,86,87,88,89,90,91,104,118,133,
92,93,94,95,96,97,98,99,100,101,102,103,104,105,119,134,
106,107,108,109,110,111,112,113,114,115,116,117,118,119,120,135,
121,122,123,124,125,126,127,128,129,130,131,132,133,134,135,136)]
source('/gstore/scratch/u/lix233/RGSDP/cal_stat.R')
features=cal_stat(glcm_whole)
GMM=Mclust(features,5)
my.dist <- function(x) dist(x, method='euclidean')
my.hclust <- function(d) hclust(d, method='ward.D2')
HC<-cutree(my.hclust(my.dist(data.matrix(features))),k=5)
KM=kmeans(features,5)
SC=specc(features,5)
CO <- ConsensusClusterPlus(t(features),maxK=9,reps=100,pItem=0.90, pFeature=1,
clusterAlg='hc',distance='euclidean',plot=FALSE)
CO <- CO[[5]]$consensusClass
aa <- table(rep(1:5,each=20), CO)
bb <- table(rep(1:5,each=20), GMM$classification)
cc <- table(rep(1:5,each=20), HC)
dd <- table(rep(1:5,each=20), KM$cluster)
ee <- table(rep(1:5,each=20), SC)
ff <- table(rep(1:5,each=20), HRGSDP)
res_FeaCO=c(chisq.test(aa,correct = TRUE)$statistic,ncol(aa),error_rate(aa), 'FeaCO')
res_FeaGMM=c(chisq.test(bb,correct = TRUE)$statistic,ncol(bb),error_rate(bb), 'FeaGMM')
res_FeaHC=c(chisq.test(cc,correct = TRUE)$statistic,ncol(cc),error_rate(cc), 'FeaHC')
res_FeaKM=c(chisq.test(dd,correct = TRUE)$statistic,ncol(dd),error_rate(dd), 'FeaKM')
res_FeaSC=c(chisq.test(ee,correct = TRUE)$statistic,ncol(ee),error_rate(ee), 'FeaSC')
res_HRGSDP=c(chisq.test(ff,correct = TRUE)$statistic,ncol(ff),error_rate(ff), 'HRGSDP')
xx = rbind(res_FeaCO, res_FeaGMM, res_FeaHC, res_FeaKM, res_FeaSC, res_HRGSDP)
colnames(xx) = c('pearson.chi.sq', 'nunber of clusters', 'error.rate', 'method')
xx = as.data.frame(xx)
print(xx)
|
# Scrape data from KenPom
#http://kenpom.com/
library('data.table')
library('pbapply')
library('XML')
library('RCurl')
library('stringdist')
library('stringi')
library(xml2)
library(httr)
library(dplyr)
library(rvest)
rm(list=ls(all=TRUE))
gc(reset=TRUE)
set.seed(8865)
#Load Spellings
spell <- fread('data/MTeamSpellings.csv')
#Manually add some spellings
new1 <- copy(spell[TeamNameSpelling == 'citadel',])
new1[,TeamNameSpelling := 'the citadel']
new2 <- copy(spell[TeamNameSpelling == 'fort wayne(ipfw)',])
new2[,TeamNameSpelling := 'fort wayne']
new3 <- copy(spell[TeamNameSpelling == 'fort wayne(ipfw)',])
new3[,TeamNameSpelling := 'ft wayne']
spell <- rbindlist(list(
spell,
new1,
new2,
new3
))
#Scrape ratings
dat_list <- pblapply(2002:2021, function(x){
Sys.sleep(1)
page <- GET(paste0('https://kenpom.com/index.php?y=', x))
out <- read_html(page)%>%
html_table(fill=TRUE)%>%
`[[`(1)
data.table(
Season = x,
out
)
})
#Combine and cleanup dataframe
dat <- rbindlist(dat_list)
remove <- paste0('V', c(7, 9, 11, 13))
set(dat, j=remove, value=NULL)
setnames(dat, c(
'Season',
'Rank',
'Team',
'Conf',
'W-L',
'AdjEM',
'AdjO',
'AdjD',
'AdjT',
'Luck',
'schedule_AdjEM',
"remove1",
'schedule_OppO',
"remove2",
'schedule_OppD',
"remove3",
'conf_AdjEM',
"remove4"
))
dat <- dat %>%
dplyr::select(-remove1, -remove2, -remove3, -remove4)
dat <- dat[!(is.na(AdjEM) | is.na(Rank) | is.na(schedule_AdjEM)),]
for(var in names(dat)[6:ncol(dat)]){
set(dat, j=var, value=as.numeric(dat[[var]]))
}
#Match to spellings
cleanup <- function(x){
x <- gsub('[[:digit:]]+', ' ', x)
x <- gsub('[[:space:]]+', ' ', x)
x <- stringi::stri_trim_both(x)
x <- stringi::stri_trans_tolower(x)
return(x)
}
dat[,Team := cleanup(Team)]
spell[,TeamNameSpelling := cleanup(TeamNameSpelling)]
matches <- amatch(dat[['Team']], spell[['TeamNameSpelling']], method='cosine')
dat[,TeamID := spell[matches, 'TeamID']]
dat[,alternative_spelling := spell[matches, 'TeamNameSpelling']]
dat[is.na(TeamID),]
dat[,match_rating := 1-stringdist(Team, alternative_spelling, method='cosine')]
dat[Team != alternative_spelling,][order(match_rating),unique(data.table(Team, alternative_spelling, match_rating))]
naMask <- is.na(dat$AdjEM)
dat <- dat[naMask ==F,]
#Save
PomeryRatings <- dat
write.csv(PomeryRatings, 'data/PomeryRatings.csv', row.names=FALSE)
|
/1_KenPom_Scrape.R
|
no_license
|
Conor-McGrath/March_Madness
|
R
| false
| false
| 2,405
|
r
|
# Scrape data from KenPom
#http://kenpom.com/
library('data.table')
library('pbapply')
library('XML')
library('RCurl')
library('stringdist')
library('stringi')
library(xml2)
library(httr)
library(dplyr)
library(rvest)
rm(list=ls(all=TRUE))
gc(reset=TRUE)
set.seed(8865)
#Load Spellings
spell <- fread('data/MTeamSpellings.csv')
#Manually add some spellings
new1 <- copy(spell[TeamNameSpelling == 'citadel',])
new1[,TeamNameSpelling := 'the citadel']
new2 <- copy(spell[TeamNameSpelling == 'fort wayne(ipfw)',])
new2[,TeamNameSpelling := 'fort wayne']
new3 <- copy(spell[TeamNameSpelling == 'fort wayne(ipfw)',])
new3[,TeamNameSpelling := 'ft wayne']
spell <- rbindlist(list(
spell,
new1,
new2,
new3
))
#Scrape ratings
dat_list <- pblapply(2002:2021, function(x){
Sys.sleep(1)
page <- GET(paste0('https://kenpom.com/index.php?y=', x))
out <- read_html(page)%>%
html_table(fill=TRUE)%>%
`[[`(1)
data.table(
Season = x,
out
)
})
#Combine and cleanup dataframe
dat <- rbindlist(dat_list)
remove <- paste0('V', c(7, 9, 11, 13))
set(dat, j=remove, value=NULL)
setnames(dat, c(
'Season',
'Rank',
'Team',
'Conf',
'W-L',
'AdjEM',
'AdjO',
'AdjD',
'AdjT',
'Luck',
'schedule_AdjEM',
"remove1",
'schedule_OppO',
"remove2",
'schedule_OppD',
"remove3",
'conf_AdjEM',
"remove4"
))
dat <- dat %>%
dplyr::select(-remove1, -remove2, -remove3, -remove4)
dat <- dat[!(is.na(AdjEM) | is.na(Rank) | is.na(schedule_AdjEM)),]
for(var in names(dat)[6:ncol(dat)]){
set(dat, j=var, value=as.numeric(dat[[var]]))
}
#Match to spellings
cleanup <- function(x){
x <- gsub('[[:digit:]]+', ' ', x)
x <- gsub('[[:space:]]+', ' ', x)
x <- stringi::stri_trim_both(x)
x <- stringi::stri_trans_tolower(x)
return(x)
}
dat[,Team := cleanup(Team)]
spell[,TeamNameSpelling := cleanup(TeamNameSpelling)]
matches <- amatch(dat[['Team']], spell[['TeamNameSpelling']], method='cosine')
dat[,TeamID := spell[matches, 'TeamID']]
dat[,alternative_spelling := spell[matches, 'TeamNameSpelling']]
dat[is.na(TeamID),]
dat[,match_rating := 1-stringdist(Team, alternative_spelling, method='cosine')]
dat[Team != alternative_spelling,][order(match_rating),unique(data.table(Team, alternative_spelling, match_rating))]
naMask <- is.na(dat$AdjEM)
dat <- dat[naMask ==F,]
#Save
PomeryRatings <- dat
write.csv(PomeryRatings, 'data/PomeryRatings.csv', row.names=FALSE)
|
#' Classical estimates for tables
#'
#' Some standard/classical (non-compositional) statistics
#'
#' @param x a data.frame, matrix or table
#' @param margins margins
#' @param statistics statistics of interest
#' @param maggr a function for calculating the mean margins of a table, default is the arithmetic mean
#' @details statistics \sQuote{phi} is the values of the table divided by the product of margins. \sQuote{cramer} normalize these values according to the dimension of the table. \sQuote{chisq} are the expected values according to Pearson while \sQuote{yates} according to Yates.
#'
#' For the \code{maggr} function argument, arithmetic means (\code{mean}) should be chosen to obtain the classical results. Any other user-provided functions should be take with care since the classical estimations relies on the arithmetic mean.
#' @author Matthias Templ
#' @return List containing all statistics
#' @references
#' Egozcue, J.J., Pawlowsky-Glahn, V., Templ, M., Hron, K. (2015)
#' Independence in contingency tables using simplicial geometry.
#' \emph{Communications in Statistics - Theory and Methods}, 44 (18), 3978--3996.
#'
#' @export
#' @examples
#' data(precipitation)
#' tab1 <- indTab(precipitation)
#' stats(precipitation)
#' stats(precipitation, statistics = "cramer")
#' stats(precipitation, statistics = "chisq")
#' stats(precipitation, statistics = "yates")
#'
#' ## take with care
#' ## (the provided statistics are not designed for that case):
#' stats(precipitation, statistics = "chisq", maggr = gmean)
stats <- function(x, margins=NULL,
statistics = c("phi", "cramer", "chisq", "yates"), maggr = mean){
## x ... prop.table
if (!is.matrix(x))
stop("Function only defined for 2-way tables.")
if( is.null( margins ) ){
m1 <- apply(x, 1, maggr) #function(x) get(sum.stat)(x))
m2 <- apply(x, 2, maggr) #function(x) get(sum.stat)(x))
} else {
if(is.list(margins)){
m1 <- margins[[1]]
m2 <- margins[[2]]
}
if(is.matrix(margins) || is.data.frame(margins)){
m1 <- margins[,1]
m2 <- margins[,2]
}
if(!is.null(margins) || !is.list(margins) || !is.matrix(margins) || !is.data.frame(margins)){
stop(paste("class", class(margins)[1], "of margins is not supported"))
}
if((length(m1) != nrow(x) || length(m2) != ncol(x))) stop("wrong length of margins")
}
method <- match.arg(statistics)
stat <- function(x, method, m1, m2) {
evals <- m1 %*% t(m2)
phi <- x / evals
switch(method,
phi = x / m1 %*% t(m2),
cramer = sqrt(phi^2 / min(dim(x) - 1)),
chisq = sqrt((x - evals)^2/evals),
yates = sqrt( (abs(x - evals) - 0.5)^2 / evals )
)
}
return(stat(x, method, m1, m2))
# evals <- m1 %*% t(m2)
# phi <- x / evals
# cramer <- sqrt(phi^2 / min(dim(x) - 1))
# chisq <- sqrt((x - evals)^2/evals)
# yates <- sqrt( (abs(x - evals) - 0.5)^2 / evals )
# list(phi=phi, cramer=cramer, chisq=chisq, yates=yates)
}
|
/R/stats.R
|
no_license
|
matthias-da/robCompositions
|
R
| false
| false
| 2,986
|
r
|
#' Classical estimates for tables
#'
#' Some standard/classical (non-compositional) statistics
#'
#' @param x a data.frame, matrix or table
#' @param margins margins
#' @param statistics statistics of interest
#' @param maggr a function for calculating the mean margins of a table, default is the arithmetic mean
#' @details statistics \sQuote{phi} is the values of the table divided by the product of margins. \sQuote{cramer} normalize these values according to the dimension of the table. \sQuote{chisq} are the expected values according to Pearson while \sQuote{yates} according to Yates.
#'
#' For the \code{maggr} function argument, arithmetic means (\code{mean}) should be chosen to obtain the classical results. Any other user-provided functions should be take with care since the classical estimations relies on the arithmetic mean.
#' @author Matthias Templ
#' @return List containing all statistics
#' @references
#' Egozcue, J.J., Pawlowsky-Glahn, V., Templ, M., Hron, K. (2015)
#' Independence in contingency tables using simplicial geometry.
#' \emph{Communications in Statistics - Theory and Methods}, 44 (18), 3978--3996.
#'
#' @export
#' @examples
#' data(precipitation)
#' tab1 <- indTab(precipitation)
#' stats(precipitation)
#' stats(precipitation, statistics = "cramer")
#' stats(precipitation, statistics = "chisq")
#' stats(precipitation, statistics = "yates")
#'
#' ## take with care
#' ## (the provided statistics are not designed for that case):
#' stats(precipitation, statistics = "chisq", maggr = gmean)
stats <- function(x, margins=NULL,
statistics = c("phi", "cramer", "chisq", "yates"), maggr = mean){
## x ... prop.table
if (!is.matrix(x))
stop("Function only defined for 2-way tables.")
if( is.null( margins ) ){
m1 <- apply(x, 1, maggr) #function(x) get(sum.stat)(x))
m2 <- apply(x, 2, maggr) #function(x) get(sum.stat)(x))
} else {
if(is.list(margins)){
m1 <- margins[[1]]
m2 <- margins[[2]]
}
if(is.matrix(margins) || is.data.frame(margins)){
m1 <- margins[,1]
m2 <- margins[,2]
}
if(!is.null(margins) || !is.list(margins) || !is.matrix(margins) || !is.data.frame(margins)){
stop(paste("class", class(margins)[1], "of margins is not supported"))
}
if((length(m1) != nrow(x) || length(m2) != ncol(x))) stop("wrong length of margins")
}
method <- match.arg(statistics)
stat <- function(x, method, m1, m2) {
evals <- m1 %*% t(m2)
phi <- x / evals
switch(method,
phi = x / m1 %*% t(m2),
cramer = sqrt(phi^2 / min(dim(x) - 1)),
chisq = sqrt((x - evals)^2/evals),
yates = sqrt( (abs(x - evals) - 0.5)^2 / evals )
)
}
return(stat(x, method, m1, m2))
# evals <- m1 %*% t(m2)
# phi <- x / evals
# cramer <- sqrt(phi^2 / min(dim(x) - 1))
# chisq <- sqrt((x - evals)^2/evals)
# yates <- sqrt( (abs(x - evals) - 0.5)^2 / evals )
# list(phi=phi, cramer=cramer, chisq=chisq, yates=yates)
}
|
#' Retrieve the lowest common taxon and rank for a given taxon name or ID
#'
#' @export
#' @param x Vector of taxa names (character) or id (character or numeric) to query.
#' @param db character; database to query. either \code{ncbi}, \code{itis}, or
#' \code{gbif}.
#' @param rows (numeric) Any number from 1 to inifity. If the default NA, all rows are
#' considered. Note that this parameter is ignored if you pass in a taxonomic id of any of the
#' acceptable classes: tsn, colid. NCBI has a method for this function but rows doesn't work.
#' @param class_list (list) A list of classifications, as returned from
#' \code{\link[taxize]{classification}}
#' @param low_rank (character) taxonomic rank to return, of length 1
#' @param ... Other arguments passed to \code{\link[taxize]{get_tsn}},
#' \code{\link[taxize]{get_uid}}, \code{\link[taxize]{get_eolid}},
#' \code{\link[taxize]{get_colid}}, \code{\link[taxize]{get_tpsid}},
#' \code{\link[taxize]{get_gbifid}}.
#'
#' @return NA when no match, or a data.frame with columns
#' \itemize{
#' \item name
#' \item rank
#' \item id
#' }
#' @author Jimmy O'Donnell \email{jodonnellbio@@gmail.com}
#' Scott Chamberlain \email{myrmecocystus@gmail.com}
#' @examples \dontrun{
#' id <- c("9031", "9823", "9606", "9470")
#' id_class <- classification(id, db = 'ncbi')
#' lowest_common(id[2:4], db = "ncbi")
#' lowest_common(id[2:4], db = "ncbi", low_rank = 'class')
#' lowest_common(id[2:4], db = "ncbi", low_rank = 'family')
#' lowest_common(id[2:4], class_list = id_class)
#' lowest_common(id[2:4], class_list = id_class, low_rank = 'class')
#' lowest_common(id[2:4], class_list = id_class, low_rank = 'family')
#'
#' spp <- c("Sus scrofa", "Homo sapiens", "Nycticebus coucang")
#' lowest_common(spp, db = "ncbi")
#' lowest_common(get_uid(spp))
#'
#' lowest_common(spp, db = "itis")
#' lowest_common(get_tsn(spp))
#'
#' gbifid <- c("2704179", "3119195")
#' lowest_common(gbifid, db = "gbif")
#'
#' spp <- c("Poa annua", "Helianthus annuus")
#' lowest_common(spp, db = "gbif")
#' lowest_common(get_gbifid(spp))
#'
#' cool_orchid <- c("Angraecum sesquipedale", "Dracula vampira", "Masdevallia coccinea")
#' orchid_ncbi <- get_uid(cool_orchid)
#' orchid_gbif <- get_gbifid(cool_orchid)
#' orchid_itis <- get_tsn(cool_orchid)
#'
#' orchid_hier_ncbi <- classification(orchid_ncbi, db = 'ncbi')
#' orchid_hier_gbif <- classification(orchid_gbif, db = 'gbif')
#' orchid_hier_itis <- classification(orchid_itis, db = 'itis')
#'
#' lowest_common(orchid_ncbi, low_rank = 'class')
#' lowest_common(orchid_ncbi, class_list = orchid_hier_ncbi, low_rank = 'class')
#' lowest_common(orchid_gbif, low_rank = 'class')
#' lowest_common(orchid_gbif, orchid_hier_gbif, low_rank = 'class')
#' lowest_common(get_uid(cool_orchid), low_rank = 'class')
#' lowest_common(get_uid(cool_orchid), low_rank = 'family')
#'
#' lowest_common(orchid_ncbi, class_list = orchid_hier_ncbi, low_rank = 'subfamily')
#' lowest_common(orchid_gbif, class_list = orchid_hier_gbif, low_rank = 'subfamily')
#'
#' ## Pass in sci. names
#' nms <- c("Angraecum sesquipedale", "Dracula vampira", "Masdevallia coccinea")
#' lowest_common(x = nms, db = "ncbi")
#' lowest_common(x = nms, db = "gbif")
#' # lowest_common(x = nms, db = "itis")
#'
#' ## NAs due to taxon not found, stops with error message
#' # lowest_common(orchid_itis, db = "itis")
#' # lowest_common(get_tsn(cool_orchid))
#' }
lowest_common <- function(...){
UseMethod("lowest_common")
}
#' @export
#' @rdname lowest_common
lowest_common.default <- function(x, db = NULL, rows = NA, class_list = NULL, low_rank = NULL, ...) {
if (is.null(db)) if (!is.null(class_list)) db <- attr(class_list, "db")
nstop(db)
switch(db,
itis = {
id <- process_lowest_ids(x, db, get_tsn, rows = rows, ...)
lowest_common(id, class_list, ...)
},
ncbi = {
id <- process_lowest_ids(x, db, get_uid, rows = rows, ...)
lowest_common(id, class_list, ...)
},
gbif = {
id <- process_lowest_ids(x, db, get_gbifid, rows = rows, ...)
lowest_common(id, class_list, ...)
},
stop("the provided db value was not recognised", call. = FALSE)
)
}
#' @export
#' @rdname lowest_common
lowest_common.uid <- function(x, class_list = NULL, low_rank = NULL, ...) {
check_lowest_ids(x)
class_list <- get_class(x, class_list, db = "uid", ...)
lc_helper(x, class_list, low_rank, ...)
}
#' @export
#' @rdname lowest_common
lowest_common.tsn <- function(x, class_list = NULL, low_rank = NULL, ...) {
check_lowest_ids(x)
class_list <- get_class(x, class_list, db = "itis", ...)
lc_helper(x, class_list, low_rank, ...)
}
#' @export
#' @rdname lowest_common
lowest_common.gbifid <- function(x, class_list = NULL, low_rank = NULL, ...) {
check_lowest_ids(x)
class_list <- get_class(x, class_list, db = "gbif", ...)
lc_helper(x, class_list, low_rank, ...)
}
# helpers -------------------------------------------------
lc_helper <- function(x, class_list, low_rank = NULL, ...) {
idc <- class_list[x]
# next line NCBI specific
cseq <- vapply(idc, function(x) x[1, 1] != "unclassified sequences", logical(1))
idc <- idc[cseq]
if (is.null(low_rank)) {
x_row <- length(Reduce(intersect, lapply(idc, "[[", 1)))
x <- idc[[1]][x_row, ]
if (x[1, "rank"] == "no rank") {
x[1, "rank"] <- next_best_taxon(idc[[1]][1:x_row, ])
}
return(x)
} else {
valid_ranks <- tolower(getranknames()[,"rankname"])
if (!(low_rank %in% valid_ranks)) {
warning('the supplied rank is not valid')
}
# low_rank_names <- as.character(unique(unlist(lapply(idc, function(x) x$name[which(x$rank == low_rank)]))))
low_rank_names <- unique(setDF(rbindlist(lapply(idc, function(x) x[which(x$rank == low_rank),]))))
if (NROW(low_rank_names) == 1) {
return(low_rank_names)
} else {
return(NA)
}
}
}
next_best_taxon <- function(x){
paste("below-",
tail(x[, "rank"][!duplicated(x[, "rank"])], n = 1
), sep = "")
}
get_class <- function(x, y, db, ...) {
if (is.null(y)) {
classification(x, db = db, ...)
} else {
yattr <- str_replace(attr(y, "db"), "ncbi", "uid")
if (yattr != db) {
stop(sprintf("class_list input must be of class '%s'", db), call. = FALSE)
}
y
}
}
check_lowest_ids <- function(x) {
notmiss <- na.omit(x)
if (length(notmiss) < 2) {
stop(length(notmiss), " found, ", length(x) - length(notmiss), " were NA; > 1 needed", call. = FALSE)
}
}
process_lowest_ids <- function(input, db, fxn, ...){
g <- tryCatch(as.numeric(as.character(input)), warning = function(e) e)
if (is(g, "numeric") || is.character(input) && grepl("[[:digit:]]", input)) {
as_fxn <- switch(db, itis = as.tsn, gbif = as.gbifid, ncbi = as.uid)
as_fxn(input, check = FALSE)
} else {
eval(fxn)(input, ...)
}
}
|
/taxize/R/lowest_common.R
|
no_license
|
ingted/R-Examples
|
R
| false
| false
| 6,904
|
r
|
#' Retrieve the lowest common taxon and rank for a given taxon name or ID
#'
#' @export
#' @param x Vector of taxa names (character) or id (character or numeric) to query.
#' @param db character; database to query. either \code{ncbi}, \code{itis}, or
#' \code{gbif}.
#' @param rows (numeric) Any number from 1 to inifity. If the default NA, all rows are
#' considered. Note that this parameter is ignored if you pass in a taxonomic id of any of the
#' acceptable classes: tsn, colid. NCBI has a method for this function but rows doesn't work.
#' @param class_list (list) A list of classifications, as returned from
#' \code{\link[taxize]{classification}}
#' @param low_rank (character) taxonomic rank to return, of length 1
#' @param ... Other arguments passed to \code{\link[taxize]{get_tsn}},
#' \code{\link[taxize]{get_uid}}, \code{\link[taxize]{get_eolid}},
#' \code{\link[taxize]{get_colid}}, \code{\link[taxize]{get_tpsid}},
#' \code{\link[taxize]{get_gbifid}}.
#'
#' @return NA when no match, or a data.frame with columns
#' \itemize{
#' \item name
#' \item rank
#' \item id
#' }
#' @author Jimmy O'Donnell \email{jodonnellbio@@gmail.com}
#' Scott Chamberlain \email{myrmecocystus@gmail.com}
#' @examples \dontrun{
#' id <- c("9031", "9823", "9606", "9470")
#' id_class <- classification(id, db = 'ncbi')
#' lowest_common(id[2:4], db = "ncbi")
#' lowest_common(id[2:4], db = "ncbi", low_rank = 'class')
#' lowest_common(id[2:4], db = "ncbi", low_rank = 'family')
#' lowest_common(id[2:4], class_list = id_class)
#' lowest_common(id[2:4], class_list = id_class, low_rank = 'class')
#' lowest_common(id[2:4], class_list = id_class, low_rank = 'family')
#'
#' spp <- c("Sus scrofa", "Homo sapiens", "Nycticebus coucang")
#' lowest_common(spp, db = "ncbi")
#' lowest_common(get_uid(spp))
#'
#' lowest_common(spp, db = "itis")
#' lowest_common(get_tsn(spp))
#'
#' gbifid <- c("2704179", "3119195")
#' lowest_common(gbifid, db = "gbif")
#'
#' spp <- c("Poa annua", "Helianthus annuus")
#' lowest_common(spp, db = "gbif")
#' lowest_common(get_gbifid(spp))
#'
#' cool_orchid <- c("Angraecum sesquipedale", "Dracula vampira", "Masdevallia coccinea")
#' orchid_ncbi <- get_uid(cool_orchid)
#' orchid_gbif <- get_gbifid(cool_orchid)
#' orchid_itis <- get_tsn(cool_orchid)
#'
#' orchid_hier_ncbi <- classification(orchid_ncbi, db = 'ncbi')
#' orchid_hier_gbif <- classification(orchid_gbif, db = 'gbif')
#' orchid_hier_itis <- classification(orchid_itis, db = 'itis')
#'
#' lowest_common(orchid_ncbi, low_rank = 'class')
#' lowest_common(orchid_ncbi, class_list = orchid_hier_ncbi, low_rank = 'class')
#' lowest_common(orchid_gbif, low_rank = 'class')
#' lowest_common(orchid_gbif, orchid_hier_gbif, low_rank = 'class')
#' lowest_common(get_uid(cool_orchid), low_rank = 'class')
#' lowest_common(get_uid(cool_orchid), low_rank = 'family')
#'
#' lowest_common(orchid_ncbi, class_list = orchid_hier_ncbi, low_rank = 'subfamily')
#' lowest_common(orchid_gbif, class_list = orchid_hier_gbif, low_rank = 'subfamily')
#'
#' ## Pass in sci. names
#' nms <- c("Angraecum sesquipedale", "Dracula vampira", "Masdevallia coccinea")
#' lowest_common(x = nms, db = "ncbi")
#' lowest_common(x = nms, db = "gbif")
#' # lowest_common(x = nms, db = "itis")
#'
#' ## NAs due to taxon not found, stops with error message
#' # lowest_common(orchid_itis, db = "itis")
#' # lowest_common(get_tsn(cool_orchid))
#' }
lowest_common <- function(...){
UseMethod("lowest_common")
}
#' @export
#' @rdname lowest_common
lowest_common.default <- function(x, db = NULL, rows = NA, class_list = NULL, low_rank = NULL, ...) {
if (is.null(db)) if (!is.null(class_list)) db <- attr(class_list, "db")
nstop(db)
switch(db,
itis = {
id <- process_lowest_ids(x, db, get_tsn, rows = rows, ...)
lowest_common(id, class_list, ...)
},
ncbi = {
id <- process_lowest_ids(x, db, get_uid, rows = rows, ...)
lowest_common(id, class_list, ...)
},
gbif = {
id <- process_lowest_ids(x, db, get_gbifid, rows = rows, ...)
lowest_common(id, class_list, ...)
},
stop("the provided db value was not recognised", call. = FALSE)
)
}
#' @export
#' @rdname lowest_common
lowest_common.uid <- function(x, class_list = NULL, low_rank = NULL, ...) {
check_lowest_ids(x)
class_list <- get_class(x, class_list, db = "uid", ...)
lc_helper(x, class_list, low_rank, ...)
}
#' @export
#' @rdname lowest_common
lowest_common.tsn <- function(x, class_list = NULL, low_rank = NULL, ...) {
check_lowest_ids(x)
class_list <- get_class(x, class_list, db = "itis", ...)
lc_helper(x, class_list, low_rank, ...)
}
#' @export
#' @rdname lowest_common
lowest_common.gbifid <- function(x, class_list = NULL, low_rank = NULL, ...) {
check_lowest_ids(x)
class_list <- get_class(x, class_list, db = "gbif", ...)
lc_helper(x, class_list, low_rank, ...)
}
# helpers -------------------------------------------------
lc_helper <- function(x, class_list, low_rank = NULL, ...) {
idc <- class_list[x]
# next line NCBI specific
cseq <- vapply(idc, function(x) x[1, 1] != "unclassified sequences", logical(1))
idc <- idc[cseq]
if (is.null(low_rank)) {
x_row <- length(Reduce(intersect, lapply(idc, "[[", 1)))
x <- idc[[1]][x_row, ]
if (x[1, "rank"] == "no rank") {
x[1, "rank"] <- next_best_taxon(idc[[1]][1:x_row, ])
}
return(x)
} else {
valid_ranks <- tolower(getranknames()[,"rankname"])
if (!(low_rank %in% valid_ranks)) {
warning('the supplied rank is not valid')
}
# low_rank_names <- as.character(unique(unlist(lapply(idc, function(x) x$name[which(x$rank == low_rank)]))))
low_rank_names <- unique(setDF(rbindlist(lapply(idc, function(x) x[which(x$rank == low_rank),]))))
if (NROW(low_rank_names) == 1) {
return(low_rank_names)
} else {
return(NA)
}
}
}
next_best_taxon <- function(x){
paste("below-",
tail(x[, "rank"][!duplicated(x[, "rank"])], n = 1
), sep = "")
}
get_class <- function(x, y, db, ...) {
if (is.null(y)) {
classification(x, db = db, ...)
} else {
yattr <- str_replace(attr(y, "db"), "ncbi", "uid")
if (yattr != db) {
stop(sprintf("class_list input must be of class '%s'", db), call. = FALSE)
}
y
}
}
check_lowest_ids <- function(x) {
notmiss <- na.omit(x)
if (length(notmiss) < 2) {
stop(length(notmiss), " found, ", length(x) - length(notmiss), " were NA; > 1 needed", call. = FALSE)
}
}
process_lowest_ids <- function(input, db, fxn, ...){
g <- tryCatch(as.numeric(as.character(input)), warning = function(e) e)
if (is(g, "numeric") || is.character(input) && grepl("[[:digit:]]", input)) {
as_fxn <- switch(db, itis = as.tsn, gbif = as.gbifid, ncbi = as.uid)
as_fxn(input, check = FALSE)
} else {
eval(fxn)(input, ...)
}
}
|
##' Update data_key by processing the tracking document and renaming/adding
##' files to data_key.
##'
##' @param path Path to AFS root directory (getOption('afs.path))
##' @param tracker tracking file (getOption('afs.tracker'))
##' @param data Data key (getOption('afs.key')
##' @param save_key Save the returned key (default to "pkg/extdata/data_key.rda")
##' @param outfile Where to save key, only used if save_key is TRUE.
##' @export
update_key <- function(path=getOption('afs.path'),
tracker=getOption('afs.tracker'),
data=get_key(),
save_key=TRUE, outfile=NULL) {
dkey <- data.table::copy(data)
dat <- process_tracker(tracker=tracker)
dat$files <- tolower(dat$files)
rname <- filename <- NULL
## Renamed files
## Just update 'filename', 'rname' stays the same if defined
if (length(dat$renamed)) {
old_names <- unlist(lapply(dat$renamed, `[[`, 1), use.names=FALSE)
new_names <- unlist(lapply(dat$renamed, `[[`, 2), use.names=FALSE)
dkey[filename %in% old_names, filename := tolower(new_names)]
}
## New files
new_files <- dat$files[(!(dat$files %in% dkey[['filename']]))]
## Get file info -- including new files/renamed
files <- c(dkey$filename, new_files)
rnames <- dkey[data.table(filename=files), rname, on='filename']
finfo <- file_info(path=path, files=files, rnames=rnames)
## Save
if (save_key) {
data_key <- finfo
if (is.null(outfile)) {
save(data_key, system.file('extdata', package='sync.afs'))
}
save(data_key, file=outfile)
}
invisible(finfo[])
}
##' Update R file names associated with master files.
##'
##' @param rnames New R data names
##' @param filenames Corresponding master file names.
##' @param key Data key to update (getOption('afs.key))
##' @export
set_key_names <- function(rnames, filenames, key=getOption('afs.key')) {
dkey <- get_key()
dkey[list(filename = filenames), rname := rnames, on='filename']
p <- file.path(system.file('extdata', package='sync.afs'), getOption('afs.key'))
data_key <- dkey
save(data_key, file=p)
}
|
/R/update_key.R
|
no_license
|
nverno/sync.afs
|
R
| false
| false
| 2,121
|
r
|
##' Update data_key by processing the tracking document and renaming/adding
##' files to data_key.
##'
##' @param path Path to AFS root directory (getOption('afs.path))
##' @param tracker tracking file (getOption('afs.tracker'))
##' @param data Data key (getOption('afs.key')
##' @param save_key Save the returned key (default to "pkg/extdata/data_key.rda")
##' @param outfile Where to save key, only used if save_key is TRUE.
##' @export
update_key <- function(path=getOption('afs.path'),
tracker=getOption('afs.tracker'),
data=get_key(),
save_key=TRUE, outfile=NULL) {
dkey <- data.table::copy(data)
dat <- process_tracker(tracker=tracker)
dat$files <- tolower(dat$files)
rname <- filename <- NULL
## Renamed files
## Just update 'filename', 'rname' stays the same if defined
if (length(dat$renamed)) {
old_names <- unlist(lapply(dat$renamed, `[[`, 1), use.names=FALSE)
new_names <- unlist(lapply(dat$renamed, `[[`, 2), use.names=FALSE)
dkey[filename %in% old_names, filename := tolower(new_names)]
}
## New files
new_files <- dat$files[(!(dat$files %in% dkey[['filename']]))]
## Get file info -- including new files/renamed
files <- c(dkey$filename, new_files)
rnames <- dkey[data.table(filename=files), rname, on='filename']
finfo <- file_info(path=path, files=files, rnames=rnames)
## Save
if (save_key) {
data_key <- finfo
if (is.null(outfile)) {
save(data_key, system.file('extdata', package='sync.afs'))
}
save(data_key, file=outfile)
}
invisible(finfo[])
}
##' Update R file names associated with master files.
##'
##' @param rnames New R data names
##' @param filenames Corresponding master file names.
##' @param key Data key to update (getOption('afs.key))
##' @export
set_key_names <- function(rnames, filenames, key=getOption('afs.key')) {
dkey <- get_key()
dkey[list(filename = filenames), rname := rnames, on='filename']
p <- file.path(system.file('extdata', package='sync.afs'), getOption('afs.key'))
data_key <- dkey
save(data_key, file=p)
}
|
testlist <- list(b = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), p1 = c(8.5728629954997e-312, -2.59103114190503e-82, 8.96970809549085e-158, -1.3258495253834e-113, 2.79620616433656e-119, -6.80033518839696e+41, 2.68298522855314e-211, 1444042902784.06, 6.68889884134308e+51, -4.05003163986346e-308, -3.52601820453991e+43, -1.49815227045093e+197, -2.61605817623304e+76, -1.18078903777423e-90, 1.86807199752012e+112, -5.58551357556946e+160, 2.00994342527714e-162, 1.81541609400943e-79, 7.89363005545926e+139, 2.3317908961407e-93, 2.16562581831091e+161), p2 = -1.72131968218895e+83)
result <- do.call(metacoder:::intersect_line_rectangle,testlist)
str(result)
|
/metacoder/inst/testfiles/intersect_line_rectangle/AFL_intersect_line_rectangle/intersect_line_rectangle_valgrind_files/1615769982-test.R
|
permissive
|
akhikolla/updatedatatype-list3
|
R
| false
| false
| 728
|
r
|
testlist <- list(b = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), p1 = c(8.5728629954997e-312, -2.59103114190503e-82, 8.96970809549085e-158, -1.3258495253834e-113, 2.79620616433656e-119, -6.80033518839696e+41, 2.68298522855314e-211, 1444042902784.06, 6.68889884134308e+51, -4.05003163986346e-308, -3.52601820453991e+43, -1.49815227045093e+197, -2.61605817623304e+76, -1.18078903777423e-90, 1.86807199752012e+112, -5.58551357556946e+160, 2.00994342527714e-162, 1.81541609400943e-79, 7.89363005545926e+139, 2.3317908961407e-93, 2.16562581831091e+161), p2 = -1.72131968218895e+83)
result <- do.call(metacoder:::intersect_line_rectangle,testlist)
str(result)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/simulate.R
\name{simulate}
\alias{simulate}
\title{Simulate Spread of Gene for Altruism}
\usage{
simulate(initial_pop = list(m0 = 90, m1 = 0, m2 = 10, f0 = 90, f1 = 0, f2 =
10), average_litter_size = 5, birth_rate_natural = 0.05,
death_rate_natural = 0, prob_attack = 0.2, warner_death_prob = 0.4,
nonwarner_death_prob = 0.2, hider_death_prob = 0, sim_gens = 2,
capacity = 2000, mating_behavior = NULL, culling_behavior = NULL,
attack_behavior = NULL, relationship_method = c("matrix", "graph",
"none"), graph = TRUE)
}
\arguments{
\item{initial_pop}{List comprised of six named elements:
\describe{
\item{m0}{initial number of males with 0 altruist alleles;}
\item{m1}{initial number of males with 1 altruist allele;}
\item{m2}{initial number of males with 2 altruist alleles;}
\item{f0}{initial number of females with 0 altruist alleles;}
\item{f1}{initial number of females with 1 altruist allele;}
\item{f2}{initial number of females with 2 altruist alleles.}
}}
\item{average_litter_size}{Mean size of a litter.}
\item{birth_rate_natural}{Birth rate for the population.}
\item{death_rate_natural}{Death rate for a size-zero population. Rises
linearly to \code{birth_rate_natrual} as populaiion approaches
\code{capacity}.}
\item{prob_attack}{The probability of a predator attack in any given
generation.}
\item{warner_death_prob}{Probability that an individual who warns
others during an attack will be killed.}
\item{nonwarner_death_prob}{Probability of death for an individual who
does not warn others but who was not forewarned by a warner.}
\item{hider_death_prob}{Probability of death during an attack for an
individual who accepts a warning.}
\item{sim_gens}{Number of generations to simulate.}
\item{capacity}{Carrying capacity of the population.}
\item{mating_behavior}{Custom function to govern how eligible
partners form couples.}
\item{culling_behavior}{Custom function to determine death
probabilities of individuals at the culling stage..}
\item{attack_behavior}{Custom function to govern behavior of
warners when population is under attack.}
\item{relationship_method}{One of \code{"matrix"}, \code{"graph"}
and \code{"none"}. Defaults to \code{"matrix"}. Use \code{"none"}
only if no relationships (other than mother and father id) need to
be tracked.}
\item{graph}{If \code{TRUE}, provides a graph of the total
per-capita warner alleles in population over the generations.}
}
\value{
A data frame with information on the population at
each generation. Each row describes the population at the end of
a single birth-death-attack cycle. Variables are:
\describe{
\item{populationSize}{total population}
\item{males}{total number of males}
\item{males0}{number of males with no alleles for altruism}
\item{males1}{number of males with one allele for altruism}
\item{males2}{number of males with two alleles for altruism}
\item{females}{total number of females}
\item{females0}{number of females with no alleles for altruism}
\item{females1}{number of females with one allele for altruism}
\item{females2}{number of females with two alleles for altruism}
}
}
\description{
Simulate Spread of Gene for Altruism
}
\note{
For details on \code{mating_behavior}, \code{mating_behavior}
and \code{attack_behavior}
consult the
\href{https://homerhanumat.github.io/simaltruist}{package documentation}.
}
\examples{
\dontrun{
# use defaults, get a graph:
pop <- simulate(sim_gens = 400)
# attacks are infrequent, and it's dangerous to warn:
pop <- simulate(sim_gens = 200,
warner_death_prob = 0.8,
attack_prob = 0.05)
# use an alternative mating function exported by package:
pop <- simulate(sim_gens = 200,
warner_death_prob = 0.8,
mating_behavior = list(
fn = sexualSelection,
args = list(
matrix(
c(1, 5, 10, 1, 1, 1, 10, 5, 1),
nrow = 3,
ncol = 3))))
}
}
|
/man/simulate.Rd
|
permissive
|
homerhanumat/simaltruist
|
R
| false
| true
| 4,090
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/simulate.R
\name{simulate}
\alias{simulate}
\title{Simulate Spread of Gene for Altruism}
\usage{
simulate(initial_pop = list(m0 = 90, m1 = 0, m2 = 10, f0 = 90, f1 = 0, f2 =
10), average_litter_size = 5, birth_rate_natural = 0.05,
death_rate_natural = 0, prob_attack = 0.2, warner_death_prob = 0.4,
nonwarner_death_prob = 0.2, hider_death_prob = 0, sim_gens = 2,
capacity = 2000, mating_behavior = NULL, culling_behavior = NULL,
attack_behavior = NULL, relationship_method = c("matrix", "graph",
"none"), graph = TRUE)
}
\arguments{
\item{initial_pop}{List comprised of six named elements:
\describe{
\item{m0}{initial number of males with 0 altruist alleles;}
\item{m1}{initial number of males with 1 altruist allele;}
\item{m2}{initial number of males with 2 altruist alleles;}
\item{f0}{initial number of females with 0 altruist alleles;}
\item{f1}{initial number of females with 1 altruist allele;}
\item{f2}{initial number of females with 2 altruist alleles.}
}}
\item{average_litter_size}{Mean size of a litter.}
\item{birth_rate_natural}{Birth rate for the population.}
\item{death_rate_natural}{Death rate for a size-zero population. Rises
linearly to \code{birth_rate_natrual} as populaiion approaches
\code{capacity}.}
\item{prob_attack}{The probability of a predator attack in any given
generation.}
\item{warner_death_prob}{Probability that an individual who warns
others during an attack will be killed.}
\item{nonwarner_death_prob}{Probability of death for an individual who
does not warn others but who was not forewarned by a warner.}
\item{hider_death_prob}{Probability of death during an attack for an
individual who accepts a warning.}
\item{sim_gens}{Number of generations to simulate.}
\item{capacity}{Carrying capacity of the population.}
\item{mating_behavior}{Custom function to govern how eligible
partners form couples.}
\item{culling_behavior}{Custom function to determine death
probabilities of individuals at the culling stage..}
\item{attack_behavior}{Custom function to govern behavior of
warners when population is under attack.}
\item{relationship_method}{One of \code{"matrix"}, \code{"graph"}
and \code{"none"}. Defaults to \code{"matrix"}. Use \code{"none"}
only if no relationships (other than mother and father id) need to
be tracked.}
\item{graph}{If \code{TRUE}, provides a graph of the total
per-capita warner alleles in population over the generations.}
}
\value{
A data frame with information on the population at
each generation. Each row describes the population at the end of
a single birth-death-attack cycle. Variables are:
\describe{
\item{populationSize}{total population}
\item{males}{total number of males}
\item{males0}{number of males with no alleles for altruism}
\item{males1}{number of males with one allele for altruism}
\item{males2}{number of males with two alleles for altruism}
\item{females}{total number of females}
\item{females0}{number of females with no alleles for altruism}
\item{females1}{number of females with one allele for altruism}
\item{females2}{number of females with two alleles for altruism}
}
}
\description{
Simulate Spread of Gene for Altruism
}
\note{
For details on \code{mating_behavior}, \code{mating_behavior}
and \code{attack_behavior}
consult the
\href{https://homerhanumat.github.io/simaltruist}{package documentation}.
}
\examples{
\dontrun{
# use defaults, get a graph:
pop <- simulate(sim_gens = 400)
# attacks are infrequent, and it's dangerous to warn:
pop <- simulate(sim_gens = 200,
warner_death_prob = 0.8,
attack_prob = 0.05)
# use an alternative mating function exported by package:
pop <- simulate(sim_gens = 200,
warner_death_prob = 0.8,
mating_behavior = list(
fn = sexualSelection,
args = list(
matrix(
c(1, 5, 10, 1, 1, 1, 10, 5, 1),
nrow = 3,
ncol = 3))))
}
}
|
#' An animated map for vector data
#' This project is just at its very beginning!
#'
#' @param viewpoint Text. Following the pattern "$projection=$x,$y,$z".
#' @param mode Currently only two values: "air" or "ocean".
#' @param fn filename.
#' @param width Width of the display area of the widget.
#' @param height Height of the display area of the widget.
#' @param elementId Name of the widget.
#' @import htmlwidgets
#' @export
Rearth <- function(viewpoint="", mode="air", fn,
width = NULL, height = NULL, elementId = NULL) {
widget_path <- system.file("htmlwidgets", package="Rearth")
Fname <- paste(normalizePath(widget_path),"_visualization_.json",sep="\\data\\")
URL_path = "lib/index-1.0.0/earth_index.html#current/"
if (mode == "air") {
URL_path = paste(URL_path, "wind/surface/level/", sep="")
} else {
URL_path = paste(URL_path, "ocean/surface/currents/", sep="")
}
URL_path = paste(URL_path, viewpoint, sep="")
fok <- file.copy(from=fn, to=Fname, overwrite = TRUE)
if (fok) {
# print(URL_path)
} else {
stop("Failed to get the data file.")
}
# forward options using x
x = list(
viewpoint = URL_path,
fname = Fname
)
# create widget
htmlwidgets::createWidget(
name = 'Rearth',
x,
width = width,
height = height,
package = 'Rearth',
elementId = elementId
)
}
#' Shiny bindings for Rearth
#'
#' Output and render functions for using Rearth within Shiny
#' applications and interactive Rmd documents.
#'
#' @param outputId output variable to read from
#' @param width,height Must be a valid CSS unit (like \code{'100\%'},
#' \code{'400px'}, \code{'auto'}) or a number, which will be coerced to a
#' string and have \code{'px'} appended.
#' @param expr An expression that generates a Rearth
#' @param env The environment in which to evaluate \code{expr}.
#' @param quoted Is \code{expr} a quoted expression (with \code{quote()})? This
#' is useful if you want to save an expression in a variable.
#'
#' @name Rearth-shiny
#'
#' @export
RearthOutput <- function(outputId, width = '100%', height = '400px'){
htmlwidgets::shinyWidgetOutput(outputId, 'Rearth', width, height, package = 'Rearth')
}
#' @rdname Rearth-shiny
#' @export
renderRearth <- function(expr, env = parent.frame(), quoted = FALSE) {
if (!quoted) { expr <- substitute(expr) } # force quoted
htmlwidgets::shinyRenderWidget(expr, RearthOutput, env, quoted = TRUE)
}
|
/R/Rearth.R
|
permissive
|
Rmonsoon/Rearth
|
R
| false
| false
| 2,534
|
r
|
#' An animated map for vector data
#' This project is just at its very beginning!
#'
#' @param viewpoint Text. Following the pattern "$projection=$x,$y,$z".
#' @param mode Currently only two values: "air" or "ocean".
#' @param fn filename.
#' @param width Width of the display area of the widget.
#' @param height Height of the display area of the widget.
#' @param elementId Name of the widget.
#' @import htmlwidgets
#' @export
Rearth <- function(viewpoint="", mode="air", fn,
width = NULL, height = NULL, elementId = NULL) {
widget_path <- system.file("htmlwidgets", package="Rearth")
Fname <- paste(normalizePath(widget_path),"_visualization_.json",sep="\\data\\")
URL_path = "lib/index-1.0.0/earth_index.html#current/"
if (mode == "air") {
URL_path = paste(URL_path, "wind/surface/level/", sep="")
} else {
URL_path = paste(URL_path, "ocean/surface/currents/", sep="")
}
URL_path = paste(URL_path, viewpoint, sep="")
fok <- file.copy(from=fn, to=Fname, overwrite = TRUE)
if (fok) {
# print(URL_path)
} else {
stop("Failed to get the data file.")
}
# forward options using x
x = list(
viewpoint = URL_path,
fname = Fname
)
# create widget
htmlwidgets::createWidget(
name = 'Rearth',
x,
width = width,
height = height,
package = 'Rearth',
elementId = elementId
)
}
#' Shiny bindings for Rearth
#'
#' Output and render functions for using Rearth within Shiny
#' applications and interactive Rmd documents.
#'
#' @param outputId output variable to read from
#' @param width,height Must be a valid CSS unit (like \code{'100\%'},
#' \code{'400px'}, \code{'auto'}) or a number, which will be coerced to a
#' string and have \code{'px'} appended.
#' @param expr An expression that generates a Rearth
#' @param env The environment in which to evaluate \code{expr}.
#' @param quoted Is \code{expr} a quoted expression (with \code{quote()})? This
#' is useful if you want to save an expression in a variable.
#'
#' @name Rearth-shiny
#'
#' @export
RearthOutput <- function(outputId, width = '100%', height = '400px'){
htmlwidgets::shinyWidgetOutput(outputId, 'Rearth', width, height, package = 'Rearth')
}
#' @rdname Rearth-shiny
#' @export
renderRearth <- function(expr, env = parent.frame(), quoted = FALSE) {
if (!quoted) { expr <- substitute(expr) } # force quoted
htmlwidgets::shinyRenderWidget(expr, RearthOutput, env, quoted = TRUE)
}
|
## Create the third plot
# Load the dplyr package for later use
library(dplyr)
# Read the data from a file into a dataframe
dat <- read.table("household_power_consumption.txt", header = TRUE, sep = ";", na.strings = "?")
# Split out just the dates that we're looking at (Feb. 1st and 2nd, 2007)
# The dates in the file appear to be in day/month/year format
df <- dat[ dat$Date %in% c("1/2/2007", "2/2/2007"), ]
# You may find it useful to convert the Date and Time variables to Date/Time classes in R using the strptime() and as.Date() functions
# Convert the Date column, which is currently a string to an R Date type
df$Date <- as.Date(df$Date, "%d/%m/%Y")
# Paste the date and time together and convert to POSIX.lt class
x <- paste(df$Date, df$Time)
DateTime <- strptime(x, "%Y-%m-%d %H:%M:%S")
# Add the DateTime column back to the original dataset. I've left the Date and Time variable
# to check myself
df <- cbind(DateTime, df)
## Construct the plots
# open a png device
png(file = "plot4.png", width = 480, height = 480)
# Create a 2 by 2 "Canvas"
par(mfcol = c(2,2))
# Build plot 1
plot(df$DateTime, df$Global_active_power , ylab = "Global Active Power", xlab = "", type = "l")
# Build plot 2
plot(df$DateTime, df$Sub_metering_1 , ylab = "Energy sub metering", xlab = "", type = "l")
lines(df$DateTime, df$Sub_metering_2, type="l", col="red")
lines(df$DateTime, df$Sub_metering_3, type="l", col="blue")
legend("topright", legend = c("Sub_metering_1","Sub_metering_2","Sub_metering_3")
,lty = c(1,1,1)
,bty = "n"
,col = c("black","red", "blue"))
# Build plot 3
plot(df$DateTime, df$Voltage , ylab = "Voltage", xlab = "datetime", type = "l")
# Build plot 4
plot(df$DateTime, df$Global_reactive_power , ylab = "Global_reactive_power", xlab = "datetime", type = "l")
dev.off()
|
/plot4.R
|
no_license
|
DavidSilbermann/ExData_Plotting1
|
R
| false
| false
| 1,828
|
r
|
## Create the third plot
# Load the dplyr package for later use
library(dplyr)
# Read the data from a file into a dataframe
dat <- read.table("household_power_consumption.txt", header = TRUE, sep = ";", na.strings = "?")
# Split out just the dates that we're looking at (Feb. 1st and 2nd, 2007)
# The dates in the file appear to be in day/month/year format
df <- dat[ dat$Date %in% c("1/2/2007", "2/2/2007"), ]
# You may find it useful to convert the Date and Time variables to Date/Time classes in R using the strptime() and as.Date() functions
# Convert the Date column, which is currently a string to an R Date type
df$Date <- as.Date(df$Date, "%d/%m/%Y")
# Paste the date and time together and convert to POSIX.lt class
x <- paste(df$Date, df$Time)
DateTime <- strptime(x, "%Y-%m-%d %H:%M:%S")
# Add the DateTime column back to the original dataset. I've left the Date and Time variable
# to check myself
df <- cbind(DateTime, df)
## Construct the plots
# open a png device
png(file = "plot4.png", width = 480, height = 480)
# Create a 2 by 2 "Canvas"
par(mfcol = c(2,2))
# Build plot 1
plot(df$DateTime, df$Global_active_power , ylab = "Global Active Power", xlab = "", type = "l")
# Build plot 2
plot(df$DateTime, df$Sub_metering_1 , ylab = "Energy sub metering", xlab = "", type = "l")
lines(df$DateTime, df$Sub_metering_2, type="l", col="red")
lines(df$DateTime, df$Sub_metering_3, type="l", col="blue")
legend("topright", legend = c("Sub_metering_1","Sub_metering_2","Sub_metering_3")
,lty = c(1,1,1)
,bty = "n"
,col = c("black","red", "blue"))
# Build plot 3
plot(df$DateTime, df$Voltage , ylab = "Voltage", xlab = "datetime", type = "l")
# Build plot 4
plot(df$DateTime, df$Global_reactive_power , ylab = "Global_reactive_power", xlab = "datetime", type = "l")
dev.off()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dprime.R
\name{dprime}
\alias{dprime}
\title{Calculates Signal Detection Theory indices.}
\usage{
dprime(n_hit, n_miss, n_fa, n_cr)
}
\arguments{
\item{n_hit}{Number of hits.}
\item{n_miss}{Number of misses.}
\item{n_fa}{Number of false alarms.}
\item{n_cr}{Number of correct rejections.}
}
\description{
Calculates the d', the beta, the A' and the B''D based on the
signal detection theory (SRT). See Pallier (2002) for the algorithms.
}
\value{
A list containing 4 objects.
\item{dprime}{The d'. d' reflects the distance between the two distributions: signal, and signal+noise and corresponds to the Z value of the hit-rate minus that of the false-alarm rate.}
\item{beta}{The beta. The value for beta is the ratio of the normal density functions at the criterion of the Z values used in the computation of d'. This reflects an observer's bias to say 'yes' or 'no' with the unbiased observer having a value around 1.0. As the bias to say 'yes' increases, resulting in a higher hit-rate and false-alarm-rate, beta approaches 0.0. As the bias to say 'no' increases, resulting in a lower hit-rate and false-alarm rate, beta increases over 1.0 on an open-ended scale.}
\item{aprime}{The A'. Non-parametric estimate of discriminability. An A' near 1.0 indicates good discriminability, while a value near 0.5 means chance performance.}
\item{bppd}{The B''D. Non-parametric estimate of bias. A B''D equal to 0.0 indicates no bias, positive numbers represent conservative bias (i.e. a tendency to answer 'no'), negative numbers represent liberal bias (i.e. a tendency to answer 'yes'). The maximum absolute value is 1.0.}
\item{c}{The Criterion. the number of standard deviations from the midpoint between these two distributions, i.e. a measure on a continuum from "conservative" to "liberal".}
}
\details{
For d' and beta, adjustement for extreme values are made following the recommandations Hautus (1995).
}
%\section{A Custom Section}{
%
%
%Text accompanying the custom section.
%}
\examples{
n_hit <- 9
n_miss <- 1
n_fa <- 2
n_cr <- 7
indices <- dprime(n_hit, n_miss, n_fa, n_cr)
}
|
/man/dprime.Rd
|
no_license
|
bgautijonsson/neuropsychology.R
|
R
| false
| true
| 2,182
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dprime.R
\name{dprime}
\alias{dprime}
\title{Calculates Signal Detection Theory indices.}
\usage{
dprime(n_hit, n_miss, n_fa, n_cr)
}
\arguments{
\item{n_hit}{Number of hits.}
\item{n_miss}{Number of misses.}
\item{n_fa}{Number of false alarms.}
\item{n_cr}{Number of correct rejections.}
}
\description{
Calculates the d', the beta, the A' and the B''D based on the
signal detection theory (SRT). See Pallier (2002) for the algorithms.
}
\value{
A list containing 4 objects.
\item{dprime}{The d'. d' reflects the distance between the two distributions: signal, and signal+noise and corresponds to the Z value of the hit-rate minus that of the false-alarm rate.}
\item{beta}{The beta. The value for beta is the ratio of the normal density functions at the criterion of the Z values used in the computation of d'. This reflects an observer's bias to say 'yes' or 'no' with the unbiased observer having a value around 1.0. As the bias to say 'yes' increases, resulting in a higher hit-rate and false-alarm-rate, beta approaches 0.0. As the bias to say 'no' increases, resulting in a lower hit-rate and false-alarm rate, beta increases over 1.0 on an open-ended scale.}
\item{aprime}{The A'. Non-parametric estimate of discriminability. An A' near 1.0 indicates good discriminability, while a value near 0.5 means chance performance.}
\item{bppd}{The B''D. Non-parametric estimate of bias. A B''D equal to 0.0 indicates no bias, positive numbers represent conservative bias (i.e. a tendency to answer 'no'), negative numbers represent liberal bias (i.e. a tendency to answer 'yes'). The maximum absolute value is 1.0.}
\item{c}{The Criterion. the number of standard deviations from the midpoint between these two distributions, i.e. a measure on a continuum from "conservative" to "liberal".}
}
\details{
For d' and beta, adjustement for extreme values are made following the recommandations Hautus (1995).
}
%\section{A Custom Section}{
%
%
%Text accompanying the custom section.
%}
\examples{
n_hit <- 9
n_miss <- 1
n_fa <- 2
n_cr <- 7
indices <- dprime(n_hit, n_miss, n_fa, n_cr)
}
|
library(tree)
### Name: prune.tree
### Title: Cost-complexity Pruning of Tree Object
### Aliases: prune.tree prune.misclass
### Keywords: tree
### ** Examples
data(fgl, package="MASS")
fgl.tr <- tree(type ~ ., fgl)
plot(print(fgl.tr))
fgl.cv <- cv.tree(fgl.tr,, prune.tree)
for(i in 2:5) fgl.cv$dev <- fgl.cv$dev +
cv.tree(fgl.tr,, prune.tree)$dev
fgl.cv$dev <- fgl.cv$dev/5
plot(fgl.cv)
|
/data/genthat_extracted_code/tree/examples/prune.tree.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false
| false
| 399
|
r
|
library(tree)
### Name: prune.tree
### Title: Cost-complexity Pruning of Tree Object
### Aliases: prune.tree prune.misclass
### Keywords: tree
### ** Examples
data(fgl, package="MASS")
fgl.tr <- tree(type ~ ., fgl)
plot(print(fgl.tr))
fgl.cv <- cv.tree(fgl.tr,, prune.tree)
for(i in 2:5) fgl.cv$dev <- fgl.cv$dev +
cv.tree(fgl.tr,, prune.tree)$dev
fgl.cv$dev <- fgl.cv$dev/5
plot(fgl.cv)
|
################################################################################
## Allen Roberts
## April 23, 2020
## Stat 534
## Homework 4: Helper functions
################################################################################
require(MASS)
## Calculate the log determinant of a matrix
logdet <- function(mat)
{
return(sum(log(eigen(mat)$values)))
}
#this function uses 'glm' to fit a logistic regression
#and obtain the MLEs of the two coefficients beta0 and beta1
getcoefglm <- function(response,explanatory,data)
{
return(coef(glm(data[,response] ~ data[,explanatory],
family=binomial(link=logit))));
}
#the inverse of the logit function
inverseLogit <- function(x)
{
return(exp(x)/(1+exp(x)));
}
#function for the computation of the Hessian
inverseLogit2 <- function(x)
{
return(exp(x)/(1+exp(x))^2);
}
#computes pi_i = P(y_i = 1 | x_i)
getPi <- function(x,beta)
{
x0 = cbind(rep(1,length(x)),x);
return(inverseLogit(x0%*%beta));
}
#another function for the computation of the Hessian
getPi2 <- function(x,beta)
{
x0 = cbind(rep(1,length(x)),x);
return(inverseLogit2(x0%*%beta));
}
#logistic log-likelihood (formula (3) in your handout)
logisticLoglik <- function(y,x,beta)
{
Pi = getPi(x,beta);
return(sum(y*log(Pi))+sum((1-y)*log(1-Pi)));
}
## Logistic log-likelihood star (from Bayesian logistic regression eq 2.5)
logisticLoglikStar <- function(y, x, beta) {
loglik <- logisticLoglik(y,x, beta)
return(-log(2*pi) - 1/2*(sum(beta^2)) + loglik)
}
#obtain the gradient for Newton-Raphson
getGradient <- function(y,x,beta)
{
gradient = matrix(0,2,1);
Pi = getPi(x,beta);
# gradient[1,1] = sum(y-Pi);
# gradient[2,1] = sum((y-Pi)*x);
## Updated to work with Bayesian model
gradient[1,1] = sum(y-Pi) - beta[1];
gradient[2,1] = sum((y-Pi)*x) - beta[2];
return(gradient);
}
#obtain the Hessian for Newton-Raphson
getHessian <- function(y,x,beta)
{
hessian = matrix(0,2,2);
Pi2 = getPi2(x,beta);
# hessian[1,1] = sum(Pi2);
# hessian[1,2] = sum(Pi2*x);
# hessian[2,1] = hessian[1,2];
# hessian[2,2] = sum(Pi2*x^2);
## Updated to work with Bayesian model
hessian[1,1] = sum(Pi2) + 1;
hessian[1,2] = sum(Pi2*x);
hessian[2,1] = hessian[1,2];
hessian[2,2] = sum(Pi2*x^2) + 1;
return(-hessian);
}
#this function implements our own Newton-Raphson procedure
getcoefNR <- function(response,explanatory,data, maxIter = 1000)
{
#2x1 matrix of coefficients`
beta = matrix(0,2,1);
y = data[,response];
x = data[,explanatory];
#current value of log-likelihood
currentLoglik = logisticLoglikStar(y,x,beta);
iter <- 0
#infinite loop unless we stop it someplace inside
while(iter < maxIter)
{
iter <- iter + 1
newBeta = beta - solve(getHessian(y,x,beta))%*%getGradient(y,x,beta);
newLoglik = logisticLoglikStar(y,x,newBeta);
#at each iteration the log-likelihood must increase
if(newLoglik<currentLoglik)
{
cat("CODING ERROR!!\n");
break;
}
beta = newBeta;
#stop if the log-likelihood does not improve by too much
if(newLoglik-currentLoglik<1e-6)
{
break;
}
currentLoglik = newLoglik;
}
return(beta);
}
## getLaplaceApprox uses the Laplace Approximation to calcuate an approximate
## marginal likelihood for univariate Bayesian logistic regression. Note that
## this function calculates and returns the log-likehood, as specified by the
## TA on the discussion board
getLaplaceApprox <- function(response, explanatory, data, betaMode) {
y <- data[,response]
x <- data[,explanatory]
## Calculate l*(beta_0, beta_1)
loglik_star <- logisticLoglikStar(y,x,
beta = betaMode)
## Calculate the log marginal likelihood
log_p_d <- log(2*pi) + loglik_star - (1/2)*logdet(-getHessian(y,x,betaMode))
return(log_p_d)
}
## sampleMH performs Metropolis-Hastings sampling from the posterior distribution
## P(beta0, beta1 | D) of Bayesian univariate logistic regression. It returns
## a matrix with two columns (beta0 and beta1) and niter rows (one for each)
## sample.
sampleMH <- function(response, explanatory, data, betaMode, niter) {
y <- data[, response]
x <- data[, explanatory]
samples <- matrix(0, nrow = niter, ncol = 2)
## Proposal distribution covariance matrix
covMat <- -solve(getHessian(y, x, beta = betaMode))
## Initial state
currentBeta <- betaMode
## Start Markov chain
for(k in 1:niter) {
## Sample candidate beta from proposal distribution
candidateBeta <- mvrnorm(n = 1, mu = currentBeta, Sigma = covMat)
## Accept or reject candidate beta
score <- logisticLoglikStar(y, x, candidateBeta) - logisticLoglikStar(y, x, currentBeta)
if(score >= 1) {
currentBeta <- candidateBeta
} else {
u <- runif(n = 1, min = 0, max = 1)
if(log(u) <= score) {
currentBeta <- candidateBeta
}
}
## Update chain
samples[k, ] <- currentBeta
}
# hist(samples[, 1])
# plot(density(samples[,1]))
# abline(v = mean(samples[,1]))
# hist(samples[, 2])
# plot(density(samples[,2]))
# abline(v = mean(samples[,2]))
return(samples)
}
## Calculates the posterior means of niter samples from the joint distribution
## of the betas given the observed data. Sampling is done via Metropolis-
## Hastings. Returns a matrix of two beta values.
getPosteriorMeans <- function(response, explanatory, data, betaMode, niter) {
## Simulate 1000 samples using Metropolis Hastings
samples <- sampleMH(response, explanatory, data, betaMode, niter)
## Calculate sample means
sampleMeans <- apply(samples, 2, mean)
## Return vector of beta values
return(sampleMeans)
}
|
/hw4/helperFunc.R
|
no_license
|
dallenroberts/stat-534
|
R
| false
| false
| 5,839
|
r
|
################################################################################
## Allen Roberts
## April 23, 2020
## Stat 534
## Homework 4: Helper functions
################################################################################
require(MASS)
## Calculate the log determinant of a matrix
logdet <- function(mat)
{
return(sum(log(eigen(mat)$values)))
}
#this function uses 'glm' to fit a logistic regression
#and obtain the MLEs of the two coefficients beta0 and beta1
getcoefglm <- function(response,explanatory,data)
{
return(coef(glm(data[,response] ~ data[,explanatory],
family=binomial(link=logit))));
}
#the inverse of the logit function
inverseLogit <- function(x)
{
return(exp(x)/(1+exp(x)));
}
#function for the computation of the Hessian
inverseLogit2 <- function(x)
{
return(exp(x)/(1+exp(x))^2);
}
#computes pi_i = P(y_i = 1 | x_i)
getPi <- function(x,beta)
{
x0 = cbind(rep(1,length(x)),x);
return(inverseLogit(x0%*%beta));
}
#another function for the computation of the Hessian
getPi2 <- function(x,beta)
{
x0 = cbind(rep(1,length(x)),x);
return(inverseLogit2(x0%*%beta));
}
#logistic log-likelihood (formula (3) in your handout)
logisticLoglik <- function(y,x,beta)
{
Pi = getPi(x,beta);
return(sum(y*log(Pi))+sum((1-y)*log(1-Pi)));
}
## Logistic log-likelihood star (from Bayesian logistic regression eq 2.5)
logisticLoglikStar <- function(y, x, beta) {
loglik <- logisticLoglik(y,x, beta)
return(-log(2*pi) - 1/2*(sum(beta^2)) + loglik)
}
#obtain the gradient for Newton-Raphson
getGradient <- function(y,x,beta)
{
gradient = matrix(0,2,1);
Pi = getPi(x,beta);
# gradient[1,1] = sum(y-Pi);
# gradient[2,1] = sum((y-Pi)*x);
## Updated to work with Bayesian model
gradient[1,1] = sum(y-Pi) - beta[1];
gradient[2,1] = sum((y-Pi)*x) - beta[2];
return(gradient);
}
#obtain the Hessian for Newton-Raphson
getHessian <- function(y,x,beta)
{
hessian = matrix(0,2,2);
Pi2 = getPi2(x,beta);
# hessian[1,1] = sum(Pi2);
# hessian[1,2] = sum(Pi2*x);
# hessian[2,1] = hessian[1,2];
# hessian[2,2] = sum(Pi2*x^2);
## Updated to work with Bayesian model
hessian[1,1] = sum(Pi2) + 1;
hessian[1,2] = sum(Pi2*x);
hessian[2,1] = hessian[1,2];
hessian[2,2] = sum(Pi2*x^2) + 1;
return(-hessian);
}
#this function implements our own Newton-Raphson procedure
getcoefNR <- function(response,explanatory,data, maxIter = 1000)
{
#2x1 matrix of coefficients`
beta = matrix(0,2,1);
y = data[,response];
x = data[,explanatory];
#current value of log-likelihood
currentLoglik = logisticLoglikStar(y,x,beta);
iter <- 0
#infinite loop unless we stop it someplace inside
while(iter < maxIter)
{
iter <- iter + 1
newBeta = beta - solve(getHessian(y,x,beta))%*%getGradient(y,x,beta);
newLoglik = logisticLoglikStar(y,x,newBeta);
#at each iteration the log-likelihood must increase
if(newLoglik<currentLoglik)
{
cat("CODING ERROR!!\n");
break;
}
beta = newBeta;
#stop if the log-likelihood does not improve by too much
if(newLoglik-currentLoglik<1e-6)
{
break;
}
currentLoglik = newLoglik;
}
return(beta);
}
## getLaplaceApprox uses the Laplace Approximation to calcuate an approximate
## marginal likelihood for univariate Bayesian logistic regression. Note that
## this function calculates and returns the log-likehood, as specified by the
## TA on the discussion board
getLaplaceApprox <- function(response, explanatory, data, betaMode) {
y <- data[,response]
x <- data[,explanatory]
## Calculate l*(beta_0, beta_1)
loglik_star <- logisticLoglikStar(y,x,
beta = betaMode)
## Calculate the log marginal likelihood
log_p_d <- log(2*pi) + loglik_star - (1/2)*logdet(-getHessian(y,x,betaMode))
return(log_p_d)
}
## sampleMH performs Metropolis-Hastings sampling from the posterior distribution
## P(beta0, beta1 | D) of Bayesian univariate logistic regression. It returns
## a matrix with two columns (beta0 and beta1) and niter rows (one for each)
## sample.
sampleMH <- function(response, explanatory, data, betaMode, niter) {
y <- data[, response]
x <- data[, explanatory]
samples <- matrix(0, nrow = niter, ncol = 2)
## Proposal distribution covariance matrix
covMat <- -solve(getHessian(y, x, beta = betaMode))
## Initial state
currentBeta <- betaMode
## Start Markov chain
for(k in 1:niter) {
## Sample candidate beta from proposal distribution
candidateBeta <- mvrnorm(n = 1, mu = currentBeta, Sigma = covMat)
## Accept or reject candidate beta
score <- logisticLoglikStar(y, x, candidateBeta) - logisticLoglikStar(y, x, currentBeta)
if(score >= 1) {
currentBeta <- candidateBeta
} else {
u <- runif(n = 1, min = 0, max = 1)
if(log(u) <= score) {
currentBeta <- candidateBeta
}
}
## Update chain
samples[k, ] <- currentBeta
}
# hist(samples[, 1])
# plot(density(samples[,1]))
# abline(v = mean(samples[,1]))
# hist(samples[, 2])
# plot(density(samples[,2]))
# abline(v = mean(samples[,2]))
return(samples)
}
## Calculates the posterior means of niter samples from the joint distribution
## of the betas given the observed data. Sampling is done via Metropolis-
## Hastings. Returns a matrix of two beta values.
getPosteriorMeans <- function(response, explanatory, data, betaMode, niter) {
## Simulate 1000 samples using Metropolis Hastings
samples <- sampleMH(response, explanatory, data, betaMode, niter)
## Calculate sample means
sampleMeans <- apply(samples, 2, mean)
## Return vector of beta values
return(sampleMeans)
}
|
rm(list=ls())
setwd("c:/data/clean")
#clear worksapce and set WD
fileURL <- "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip"
zipfile="UCI_HAR_data.zip"
download.file(fileURL, destfile=zipfile)
unzip(zipfile, exdir="data")
#download and unzip data
time<-Sys.time()
write.csv(time, file = "mostrecentdatadownload.csv")
# record time of download
#read raw data
message("reading X_train.txt")
training.x <- read.table("data/UCI HAR Dataset/train/X_train.txt")
message("reading y_train.txt")
training.y <- read.table("data/UCI HAR Dataset/train/y_train.txt")
message("reading subject_train.txt")
training.subject <- read.table("data/UCI HAR Dataset/train/subject_train.txt")
message("reading X_test.txt")
test.x <- read.table("data/UCI HAR Dataset/test/X_test.txt")
message("reading y_test.txt")
test.y <- read.table("data/UCI HAR Dataset/test/y_test.txt")
message("reading subject_test.txt")
test.subject <- read.table("data/UCI HAR Dataset/test/subject_test.txt")
# Merge data sets
merged.x <- rbind(training.x, test.x)
merged.y <- rbind(training.y, test.y)
merged.subject <- rbind(training.subject, test.subject)
# merge train and test datasets and return
features <- read.table("data/UCI HAR Dataset/features.txt")
featuresa<-unlist(features[,2])
colnames(merged.x) <- featuresa
colnames(merged.y)<-"activity"
colnames(merged.subject)<- "subject"
mean.col <- sapply(featuresa, function(x) grepl("mean()", x, fixed=T))
std.col <- sapply(featuresa, function(x) grepl("std()", x, fixed=T))
keepsmeans <- mean.col
means.keeps<-merged.x[,keepsmeans,drop=FALSE]
keepsstd <- std.col
std.keeps<-merged.x[,keepsstd,drop=FALSE]
combined<-cbind(merged.subject,merged.y, means.keeps,std.keeps)
#calculate means
tidy <- ddply(combined, .(subject, activity), function(x) colMeans(x[,1:62]))
rm(training.x, test.x, training.y, test.y,training.subject, test.subject,merged.subject,merged.y, merged.x, features)
#rename row one to descriptive term instead of numercial code
tidy$activity[tidy$activity == 1] = "WALKING"
tidy$activity[tidy$activity == 2] = "WALKING_UPSTAIRS"
tidy$activity[tidy$activity == 3] = "WALKING_DOWNSTAIRS"
tidy$activity[tidy$activity == 4] = "SITTING"
tidy$activity[tidy$activity == 5] = "STANDING"
tidy$activity[tidy$activity == 6] = "LAYING"
write.table(tidy, file = "cleaneddata_UCI_final.txt", row.names=FALSE)
#remove junk
rm(list=ls())
|
/run_analysis.R
|
no_license
|
bdhope/coursera_uci_data_cleaning
|
R
| false
| false
| 2,409
|
r
|
rm(list=ls())
setwd("c:/data/clean")
#clear worksapce and set WD
fileURL <- "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip"
zipfile="UCI_HAR_data.zip"
download.file(fileURL, destfile=zipfile)
unzip(zipfile, exdir="data")
#download and unzip data
time<-Sys.time()
write.csv(time, file = "mostrecentdatadownload.csv")
# record time of download
#read raw data
message("reading X_train.txt")
training.x <- read.table("data/UCI HAR Dataset/train/X_train.txt")
message("reading y_train.txt")
training.y <- read.table("data/UCI HAR Dataset/train/y_train.txt")
message("reading subject_train.txt")
training.subject <- read.table("data/UCI HAR Dataset/train/subject_train.txt")
message("reading X_test.txt")
test.x <- read.table("data/UCI HAR Dataset/test/X_test.txt")
message("reading y_test.txt")
test.y <- read.table("data/UCI HAR Dataset/test/y_test.txt")
message("reading subject_test.txt")
test.subject <- read.table("data/UCI HAR Dataset/test/subject_test.txt")
# Merge data sets
merged.x <- rbind(training.x, test.x)
merged.y <- rbind(training.y, test.y)
merged.subject <- rbind(training.subject, test.subject)
# merge train and test datasets and return
features <- read.table("data/UCI HAR Dataset/features.txt")
featuresa<-unlist(features[,2])
colnames(merged.x) <- featuresa
colnames(merged.y)<-"activity"
colnames(merged.subject)<- "subject"
mean.col <- sapply(featuresa, function(x) grepl("mean()", x, fixed=T))
std.col <- sapply(featuresa, function(x) grepl("std()", x, fixed=T))
keepsmeans <- mean.col
means.keeps<-merged.x[,keepsmeans,drop=FALSE]
keepsstd <- std.col
std.keeps<-merged.x[,keepsstd,drop=FALSE]
combined<-cbind(merged.subject,merged.y, means.keeps,std.keeps)
#calculate means
tidy <- ddply(combined, .(subject, activity), function(x) colMeans(x[,1:62]))
rm(training.x, test.x, training.y, test.y,training.subject, test.subject,merged.subject,merged.y, merged.x, features)
#rename row one to descriptive term instead of numercial code
tidy$activity[tidy$activity == 1] = "WALKING"
tidy$activity[tidy$activity == 2] = "WALKING_UPSTAIRS"
tidy$activity[tidy$activity == 3] = "WALKING_DOWNSTAIRS"
tidy$activity[tidy$activity == 4] = "SITTING"
tidy$activity[tidy$activity == 5] = "STANDING"
tidy$activity[tidy$activity == 6] = "LAYING"
write.table(tidy, file = "cleaneddata_UCI_final.txt", row.names=FALSE)
#remove junk
rm(list=ls())
|
source('init.R')
pckgs <- loadPackages()
lvl2num <- function(x) as.numeric(levels(x)[x])
showdiag <- function(lm.obj){
par(mfrow = c(2, 2))
plot(lm.obj)
}
MAKE_PLOT <- FALSE
cols <- c('egr_diff', # public employement rate
'egr_lagged',
'TIME', # year
'gdpv_annpct', # gdp growth
'unr', # 'unemployment rate'
'ypgtq', # Total disburrsements, general government percent of GDP
## 'pop1574', # population proxy (aged from 15 to 74) # absolute number
'lpop', # log population -> take approximation
#'country'
'ydrh_to_gdpv' # Ratio of net money income per capita to gdp (volume)
)
# For dynamic model and plm
## cols.dyn <- c('egr', 'TIME', 'gdpv_annpct', 'unr', 'ypgtq', 'lpop', 'country',
## 'ydrh_to_gdpv')
eos <- readRDS('../data/eo-data.rds')
eo.desc <- readRDS('../data/eo-colnames-dt.rds')
setkey(eo.desc, VARIABLE) # enable eo.desc['bsii'] => Balance of income, value, BOP basis
## change time
setkey(eos[[1]], 'country')
eos[[1]][ , list(country, eg)] %>% na.omit %>% {unique(.$country)} -> country.a
missing.country <- eos[[1]][, setdiff(unique(country), country.a)]
x <- eos[[1]][country.a]
x[, egr := 100*eg/et, by='country'] # et: General Government employment, et: Total employment
x[, egr_diff:= c(NA, diff(egr)), by='country'] # et: General Government employment, et: Total employment
## x <- x[TIME < 2014] # make sure there are no projections
x <- x[TIME < 2012 & TIME >= 1990] # make sure there are no projections
x[, egr_lagged:= c(NA, egr_diff[1:length(egr_diff)-1]), by='country'] # et: General Government employment, et: Total employment
x[, TIME:=as.factor(TIME)]
x[, country:=as.factor(country)]
x[, lpop := log(pop1574/1e6)] # log pop of millions
x[, ydrh_to_gdpv:=100*ydrh/gdpv]
x <- x[!is.na(egr)] # Non na observation
if (MAKE_PLOT){
par(mfrow=c(5,5))
x[, { pacf(egr, main=.BY[[1]]); 0 }, by='country']
}
## Expand the formula
x.lm <- lm(egr_diff~ ., data=x[, cols, with=FALSE])
x.lm.s <- summary(x.lm)
x.lm.s
x.model.lm <- na.omit(x[, c(cols, 'country'), with=FALSE])
x.model.lm$TIME <- x.model.lm[, lvl2num(TIME)]
y.fit.simple.lm <- fitted(x.lm)
x.simple.model <- as.data.table(x.lm$model)
################################################################################
## Additional Data
## execrlc
## 1 is right 2 center 3 left
x[, TIME:=lvl2num(TIME)] # Revert back because of joining data
new.data.names <- new.data <-
c('gini', 'population', 'gdp_capita', 'imf_gfs_scores', 'gini_toth', 'dpi_excecrlc', 'year_until_elections')
new.data %<>% {paste0('../data/', ., '_cleaned.csv')} %>% lapply(fread) %>%
lapply(function(dt) {
dt[, V1:=NULL]
setnames(dt, colnames(dt), tolower(colnames(dt)))
setnames(dt, 'time', 'TIME')
dt[, TIME:=as.numeric(TIME)]
setkeyv(dt, c('location', 'TIME'))}) %>% joinDataTable
setnames(new.data, 'location', 'country')
setkeyv(x, c('country', 'TIME'))
imf.gfs <- fread('../data/imf_gfs_scores.csv', header=TRUE) %>%
{.[, list(ISO, `Index Score`)]}
setnames(imf.gfs, c('ISO', 'Index Score'), c('country', 'imf_gfs'))
x.new <- new.data[J(x)]
x.new[, lpoptot:=log(pop)]
x.new[, pop:=NULL]
x.new[execrlc %in% c(0, NA, 2), execrlc:=NA]
x.new[, execrlc:=factor(execrlc, labels=c('right', 'left'))]
x.new <- merge(x.new, imf.gfs, by='country') # add img_gfs fiscal_scores
new.var.names <- list(incomeineq='gini', pop='population',
gdp_per_capita='gdp_per_capita',
fiscal_transparency='fiscal_transparency')
################################################################################
### Robustness Analysis
new.cols <- c('imf_gfs', 'incomeineq', 'lpoptot', 'gdp_per_capita', 'execrlc',
'yrcurnt_corrected')
cols.extended <- c(cols, new.cols)
x.new[, imf_gfs:=scale(imf_gfs, scale=TRUE)]
robustnessAnalysis <- function(data, cols, to.drop, formula=egr ~ .){
cols.extended <- unselectVector(cols, to.drop)
x.lm <- lm(formula, data[, cols.extended, with=FALSE])
print(summary(x.lm))
x.lm
}
# All variables
ff <- egr_diff~ . #+ imf_gfs*gdpv_annpct
x.new[, TIME:=as.factor(TIME)]
## W/o countries
print(cols)
country.wo.lm <- robustnessAnalysis(x.new, cols, 'country', ff)
## Not so much difference, but loss of 150 data points, so keep lpop.
lpoptot.lm <- robustnessAnalysis(x.new, c(cols, 'lpoptot'), 'lpop', ff)
lpop.lm <- robustnessAnalysis(x.new[!is.na(lpoptot)], c(unselectVector(cols, 'lpop'), 'lpop'), '', ff)
## Income inquality is not significant and also limit the size of the dataset
## gdp_per_capita and lpop are represented by ydrh and lpoptot, hence we can drop them
incomeineq.lm <- robustnessAnalysis(x.new, c(cols, 'incomeineq'), '', ff)
incomeineq.wo.lm <- as.data.table(incomeineq.lm$model) %>%
robustnessAnalysis(cols, '', ff)
gini.lm <- robustnessAnalysis(x.new, c(cols, 'gini_toth'), '', ff)
gini.wo.lm <- robustnessAnalysis(as.data.table(gini.lm$model), cols, '', ff)
## gdp_per_capita seems to be significant # Loss of 500 observations though
gdp.per.capita.lm <- robustnessAnalysis(x.new, c(cols, 'gdp_per_capita'), '', ff)
gdp.per.capita.wo.lm <- as.data.table(gdp.per.capita.lm$model) %>%
robustnessAnalysis(cols, '', ff)
## Fiscal Transparency
fiscal.lm <- robustnessAnalysis(x.new, c(cols, 'fiscal_transparency'), '', ff)
fiscal.wo.lm <- as.data.table(fiscal.lm$model) %>%
robustnessAnalysis(c(cols), '', ff)
## Using the trick fiscal_transparencyscore*gdp_growth
lassen <- fread('../data/lassen_fiscal_scores.csv')
x.lassen <- merge(x.new, lassen, by.x='country', by.y='ISO')
x.imf.gfs <- merge(x.new, imf.gfs, by='country')
setnames(x.lassen, 'Index Score', 'fiscal_transparency_score')
setnames(x.imf.gfs, 'imf_gfs.y', 'fiscal_transparency_score')
ff.fiscal <-
egr_diff ~ . - fiscal_transparency_score + fiscal_transparency_score*gdpv_annpct
lassen.lm <- robustnessAnalysis(x.lassen, c(cols, 'fiscal_transparency_score'),
'', ff.fiscal)
lassen.wo.lm <- robustnessAnalysis(as.data.table(lassen.lm$model), cols, '', ff)
imf.gfs.lm <-
robustnessAnalysis(x.imf.gfs, c(cols, 'fiscal_transparency_score'),
'', ff.fiscal)
imf.gfs.wo.lm <- robustnessAnalysis(as.data.table(imf.gfs.lm$model), cols, '', ff)
## Left or right goverment
govrlc.lm <- robustnessAnalysis(x.new, c(cols, 'execrlc'), '', ff)
govrlc.wo.lm <- robustnessAnalysis(as.data.table(govrlc.lm$model), cols, '', ff)
## Years until election
yrcurnt.lm <- robustnessAnalysis(x.new, c(cols, 'yrcurnt_corrected'), '', ff)
yrcurnt.wo.lm <- robustnessAnalysis(as.data.table(yrcurnt.lm$model), cols, '', ff)
################################################################################
## Plots
descriptions <- list(`gdpv\\_annpct`='GDP growth',
unr='Unemployment rate',
ypgtq='Total disbursements, general government, in percent of GDP',
egr='Public employment rate',
lpop='Log of adult population in million',
`ydrh\\_to\\_gdpv`='Household net income, in percent of GDP',
`gdp\\_per\\_capita`='GDP per capita in USD Millions',
`fiscal\\_transparency`='IMF GFS Index',
incomeineq='Gini coefficient',
lpoptot='Log of total population in million',
'TIME'='Time',
egr_diff='Change in Public Employment Rate (CPER)',
egr_lagged='Lagged change in Public Employment Rate',
execrlc='Left or right government',
yrcurnt_corrected='Years until election')
## Data plots
if (MAKE_PLOT){
data.plot <- x.new[, c(cols.extended, 'fiscal_transparency'), with=FALSE] %>%
melt(id.vars=c('country', 'TIME'))
data.plot[, variable:=gsub('_', '\\\\_', variable)] # LaTeX Escaping
## List of variable names and their corresponding title
## Plot
data.plot[, {
options(tikzDefaultEngine = 'pdftex')
s <- paste0('plot/simple_model_', .BY[[1]], '.tex')
s <- gsub('\\', '', s, fixed=TRUE)
gg2 <- ggplot(.SD, aes(TIME, value)) + geom_line() + facet_wrap(~ country) +
ggtitle(paste0(descriptions[[.BY[[1]]]], ' by country'))
tikz(s, height=6, width=9)
print(gg2)
dev.off()
}, by='variable']
## Diagnostic plot
tikz('plot/model_diagnostic.tex', width=6, height=6)
par(mfrow=c(2,2))
plot(x.lm)
dev.off()
## Plots the pacf for the diff of egr
tikz('plot/model_pacf.tex', width=6, height=6)
par(mfrow=c(4, 4))
x[, {pacf(egr, main=.BY[[1]]);0}, by='country']
dev.off()
colnames(x.model.lm) <- gsub('\\_', '\\\\_', colnames(x.model.lm))
y.fit <- y.fit.simple.lm
tikz('plot/model_fit_quality.tex', width=8.5, height=6)
colnames(x.model.lm) <- gsub('egr\\\\_diff', 'Change in public\nemployment rate', colnames(x.model.lm))
gg <- compareValue(as.data.table(cbind(x.model.lm, y.fit)), y.fit='y.fit', egr='Change in public\nemployment rate')
print(gg)
dev.off()
}
## If you want to compare visually the variables
if (FALSE){
## compareValue(x.new, total_disburrsements='ypgtq')
## compareValue(x.new, ydrh='ydrh', gdp_cap='gdp_per_capita')
## compareValue(x.new, gdp_cap='gdp_per_capita')
plot.data <- melt(x.new[, list(TIME, country, popwork=exp(lpop), poptot=exp(lpoptot)), ],
id.vars=c('TIME', 'country'))
gg <- ggplot(plot.data, aes(TIME, value)) + geom_line(aes(color=variable)) +
facet_wrap(~country)
print(ggplotly(gg))
}
################################################################################
### Generate lm output for latex
queryList <- function(l, kx){
kx %>%
lapply(function(s) if(is.null(d <- l[[s]])) NA else d) %>%
unlist
}
description <-
c(list(gdpv_annpct='GDP growth',
ydrh_to_gdpv='Household net income, in \\% of GDP',
gdp_per_capita='GDP per capita, in USD Millions',
fiscal_transparency='IMF GFS Index',
ypgtq='Government expenditure in \\% of GDP',
country='Country',
`gdpv_annpct:fiscal_transparency_score`='Effect of Fiscal Transparency on GDP Growth',
fiscal_transparency_score='Fiscal Transparency',
'gini_toth'='Gini coefficient (Toth 2015)',
egr_diff='Change in Public Employment Rate (CPER)',
egr_lagged='Lagged change in Public Employment Rate',
yrcurnt_corrected='Years left until election',
execrlcleft='Left government effect'),
descriptions)
x.lm$model$TIME <- lvl2num(x.lm$model$TIME)
queryList(description, colnames(x.lm$model)) %>% {
stargazer(x.lm$model, out='model_output/simple_statistic.tex',
covariate.labels=.,
font.size='footnotesize', title='Data statistics')
}
description[['egr_lagged']] <- NA
toTexModel <- function(li.lm, title, out, dep.name='Difference in public employment rate'){
cov.labs <- na.omit(queryList(description, names(coef(li.lm[[1]]))[-1]))
argx <- c(li.lm, list(title=title, out=out, covariate.labels=cov.labs,
dep.var.labels=dep.name, omit=c('TIME', 'egr_lagged'),
omit.labels = c('Year fixed-effect',
'Auto-correlation effect')))
do.call(stargazer, argx)
}
dep.name <- 'Difference in public employment rate'
toTexModel(list(x.lm),
'Main variable result',
'model_output/simple_lm.tex')
toTexModel(list(lpoptot.lm, lpop.lm),
'Robustness of log of adult population',
'model_output/simple_lm_lpop.tex')
toTexModel(list(incomeineq.lm, incomeineq.wo.lm),
'Effect of income inequality',
'model_output/simple_lm_incomeineq.tex')
toTexModel(list(gini.lm, gini.wo.lm),
'Effect of the gini coefficient (Toth 2015)',
'model_output/simple_lm_gini.tex')
toTexModel(list(gdp.per.capita.lm, gdp.per.capita.wo.lm),
'Effect of the GDP per capita',
'model_output/simple_lm_gdp_per_capita.tex')
toTexModel(list(fiscal.lm, fiscal.wo.lm),
'Effect of IMF fiscal transparency index',
'model_output/simple_lm_fiscal_transparency.tex')
toTexModel(list(lassen.lm, lassen.wo.lm),
'Effect of Lassen Fiscal Transparency index',
'model_output/simple_lm_lassen_transparency.tex')
toTexModel(list(imf.gfs.lm, imf.gfs.wo.lm),
'Effect of Lassen Fiscal Transparency index',
'model_output/simple_lm_imf_transparency.tex')
toTexModel(list(govrlc.lm, govrlc.wo.lm),
'Effect of Government Political Side',
'model_output/simple_lm_govrlc.tex')
toTexModel(list(yrcurnt.lm, yrcurnt.wo.lm),
'Effect of years left until election',
'model_output/simple_lm_yrcurnt.tex')
################################################################################
|
/PublicEmploymentAnalysis/R/eda_annual.R
|
no_license
|
davidpham87/public_employment_analysis
|
R
| false
| false
| 12,902
|
r
|
source('init.R')
pckgs <- loadPackages()
lvl2num <- function(x) as.numeric(levels(x)[x])
showdiag <- function(lm.obj){
par(mfrow = c(2, 2))
plot(lm.obj)
}
MAKE_PLOT <- FALSE
cols <- c('egr_diff', # public employement rate
'egr_lagged',
'TIME', # year
'gdpv_annpct', # gdp growth
'unr', # 'unemployment rate'
'ypgtq', # Total disburrsements, general government percent of GDP
## 'pop1574', # population proxy (aged from 15 to 74) # absolute number
'lpop', # log population -> take approximation
#'country'
'ydrh_to_gdpv' # Ratio of net money income per capita to gdp (volume)
)
# For dynamic model and plm
## cols.dyn <- c('egr', 'TIME', 'gdpv_annpct', 'unr', 'ypgtq', 'lpop', 'country',
## 'ydrh_to_gdpv')
eos <- readRDS('../data/eo-data.rds')
eo.desc <- readRDS('../data/eo-colnames-dt.rds')
setkey(eo.desc, VARIABLE) # enable eo.desc['bsii'] => Balance of income, value, BOP basis
## change time
setkey(eos[[1]], 'country')
eos[[1]][ , list(country, eg)] %>% na.omit %>% {unique(.$country)} -> country.a
missing.country <- eos[[1]][, setdiff(unique(country), country.a)]
x <- eos[[1]][country.a]
x[, egr := 100*eg/et, by='country'] # et: General Government employment, et: Total employment
x[, egr_diff:= c(NA, diff(egr)), by='country'] # et: General Government employment, et: Total employment
## x <- x[TIME < 2014] # make sure there are no projections
x <- x[TIME < 2012 & TIME >= 1990] # make sure there are no projections
x[, egr_lagged:= c(NA, egr_diff[1:length(egr_diff)-1]), by='country'] # et: General Government employment, et: Total employment
x[, TIME:=as.factor(TIME)]
x[, country:=as.factor(country)]
x[, lpop := log(pop1574/1e6)] # log pop of millions
x[, ydrh_to_gdpv:=100*ydrh/gdpv]
x <- x[!is.na(egr)] # Non na observation
if (MAKE_PLOT){
par(mfrow=c(5,5))
x[, { pacf(egr, main=.BY[[1]]); 0 }, by='country']
}
## Expand the formula
x.lm <- lm(egr_diff~ ., data=x[, cols, with=FALSE])
x.lm.s <- summary(x.lm)
x.lm.s
x.model.lm <- na.omit(x[, c(cols, 'country'), with=FALSE])
x.model.lm$TIME <- x.model.lm[, lvl2num(TIME)]
y.fit.simple.lm <- fitted(x.lm)
x.simple.model <- as.data.table(x.lm$model)
################################################################################
## Additional Data
## execrlc
## 1 is right 2 center 3 left
x[, TIME:=lvl2num(TIME)] # Revert back because of joining data
new.data.names <- new.data <-
c('gini', 'population', 'gdp_capita', 'imf_gfs_scores', 'gini_toth', 'dpi_excecrlc', 'year_until_elections')
new.data %<>% {paste0('../data/', ., '_cleaned.csv')} %>% lapply(fread) %>%
lapply(function(dt) {
dt[, V1:=NULL]
setnames(dt, colnames(dt), tolower(colnames(dt)))
setnames(dt, 'time', 'TIME')
dt[, TIME:=as.numeric(TIME)]
setkeyv(dt, c('location', 'TIME'))}) %>% joinDataTable
setnames(new.data, 'location', 'country')
setkeyv(x, c('country', 'TIME'))
imf.gfs <- fread('../data/imf_gfs_scores.csv', header=TRUE) %>%
{.[, list(ISO, `Index Score`)]}
setnames(imf.gfs, c('ISO', 'Index Score'), c('country', 'imf_gfs'))
x.new <- new.data[J(x)]
x.new[, lpoptot:=log(pop)]
x.new[, pop:=NULL]
x.new[execrlc %in% c(0, NA, 2), execrlc:=NA]
x.new[, execrlc:=factor(execrlc, labels=c('right', 'left'))]
x.new <- merge(x.new, imf.gfs, by='country') # add img_gfs fiscal_scores
new.var.names <- list(incomeineq='gini', pop='population',
gdp_per_capita='gdp_per_capita',
fiscal_transparency='fiscal_transparency')
################################################################################
### Robustness Analysis
new.cols <- c('imf_gfs', 'incomeineq', 'lpoptot', 'gdp_per_capita', 'execrlc',
'yrcurnt_corrected')
cols.extended <- c(cols, new.cols)
x.new[, imf_gfs:=scale(imf_gfs, scale=TRUE)]
robustnessAnalysis <- function(data, cols, to.drop, formula=egr ~ .){
cols.extended <- unselectVector(cols, to.drop)
x.lm <- lm(formula, data[, cols.extended, with=FALSE])
print(summary(x.lm))
x.lm
}
# All variables
ff <- egr_diff~ . #+ imf_gfs*gdpv_annpct
x.new[, TIME:=as.factor(TIME)]
## W/o countries
print(cols)
country.wo.lm <- robustnessAnalysis(x.new, cols, 'country', ff)
## Not so much difference, but loss of 150 data points, so keep lpop.
lpoptot.lm <- robustnessAnalysis(x.new, c(cols, 'lpoptot'), 'lpop', ff)
lpop.lm <- robustnessAnalysis(x.new[!is.na(lpoptot)], c(unselectVector(cols, 'lpop'), 'lpop'), '', ff)
## Income inquality is not significant and also limit the size of the dataset
## gdp_per_capita and lpop are represented by ydrh and lpoptot, hence we can drop them
incomeineq.lm <- robustnessAnalysis(x.new, c(cols, 'incomeineq'), '', ff)
incomeineq.wo.lm <- as.data.table(incomeineq.lm$model) %>%
robustnessAnalysis(cols, '', ff)
gini.lm <- robustnessAnalysis(x.new, c(cols, 'gini_toth'), '', ff)
gini.wo.lm <- robustnessAnalysis(as.data.table(gini.lm$model), cols, '', ff)
## gdp_per_capita seems to be significant # Loss of 500 observations though
gdp.per.capita.lm <- robustnessAnalysis(x.new, c(cols, 'gdp_per_capita'), '', ff)
gdp.per.capita.wo.lm <- as.data.table(gdp.per.capita.lm$model) %>%
robustnessAnalysis(cols, '', ff)
## Fiscal Transparency
fiscal.lm <- robustnessAnalysis(x.new, c(cols, 'fiscal_transparency'), '', ff)
fiscal.wo.lm <- as.data.table(fiscal.lm$model) %>%
robustnessAnalysis(c(cols), '', ff)
## Using the trick fiscal_transparencyscore*gdp_growth
lassen <- fread('../data/lassen_fiscal_scores.csv')
x.lassen <- merge(x.new, lassen, by.x='country', by.y='ISO')
x.imf.gfs <- merge(x.new, imf.gfs, by='country')
setnames(x.lassen, 'Index Score', 'fiscal_transparency_score')
setnames(x.imf.gfs, 'imf_gfs.y', 'fiscal_transparency_score')
ff.fiscal <-
egr_diff ~ . - fiscal_transparency_score + fiscal_transparency_score*gdpv_annpct
lassen.lm <- robustnessAnalysis(x.lassen, c(cols, 'fiscal_transparency_score'),
'', ff.fiscal)
lassen.wo.lm <- robustnessAnalysis(as.data.table(lassen.lm$model), cols, '', ff)
imf.gfs.lm <-
robustnessAnalysis(x.imf.gfs, c(cols, 'fiscal_transparency_score'),
'', ff.fiscal)
imf.gfs.wo.lm <- robustnessAnalysis(as.data.table(imf.gfs.lm$model), cols, '', ff)
## Left or right goverment
govrlc.lm <- robustnessAnalysis(x.new, c(cols, 'execrlc'), '', ff)
govrlc.wo.lm <- robustnessAnalysis(as.data.table(govrlc.lm$model), cols, '', ff)
## Years until election
yrcurnt.lm <- robustnessAnalysis(x.new, c(cols, 'yrcurnt_corrected'), '', ff)
yrcurnt.wo.lm <- robustnessAnalysis(as.data.table(yrcurnt.lm$model), cols, '', ff)
################################################################################
## Plots
descriptions <- list(`gdpv\\_annpct`='GDP growth',
unr='Unemployment rate',
ypgtq='Total disbursements, general government, in percent of GDP',
egr='Public employment rate',
lpop='Log of adult population in million',
`ydrh\\_to\\_gdpv`='Household net income, in percent of GDP',
`gdp\\_per\\_capita`='GDP per capita in USD Millions',
`fiscal\\_transparency`='IMF GFS Index',
incomeineq='Gini coefficient',
lpoptot='Log of total population in million',
'TIME'='Time',
egr_diff='Change in Public Employment Rate (CPER)',
egr_lagged='Lagged change in Public Employment Rate',
execrlc='Left or right government',
yrcurnt_corrected='Years until election')
## Data plots
if (MAKE_PLOT){
data.plot <- x.new[, c(cols.extended, 'fiscal_transparency'), with=FALSE] %>%
melt(id.vars=c('country', 'TIME'))
data.plot[, variable:=gsub('_', '\\\\_', variable)] # LaTeX Escaping
## List of variable names and their corresponding title
## Plot
data.plot[, {
options(tikzDefaultEngine = 'pdftex')
s <- paste0('plot/simple_model_', .BY[[1]], '.tex')
s <- gsub('\\', '', s, fixed=TRUE)
gg2 <- ggplot(.SD, aes(TIME, value)) + geom_line() + facet_wrap(~ country) +
ggtitle(paste0(descriptions[[.BY[[1]]]], ' by country'))
tikz(s, height=6, width=9)
print(gg2)
dev.off()
}, by='variable']
## Diagnostic plot
tikz('plot/model_diagnostic.tex', width=6, height=6)
par(mfrow=c(2,2))
plot(x.lm)
dev.off()
## Plots the pacf for the diff of egr
tikz('plot/model_pacf.tex', width=6, height=6)
par(mfrow=c(4, 4))
x[, {pacf(egr, main=.BY[[1]]);0}, by='country']
dev.off()
colnames(x.model.lm) <- gsub('\\_', '\\\\_', colnames(x.model.lm))
y.fit <- y.fit.simple.lm
tikz('plot/model_fit_quality.tex', width=8.5, height=6)
colnames(x.model.lm) <- gsub('egr\\\\_diff', 'Change in public\nemployment rate', colnames(x.model.lm))
gg <- compareValue(as.data.table(cbind(x.model.lm, y.fit)), y.fit='y.fit', egr='Change in public\nemployment rate')
print(gg)
dev.off()
}
## If you want to compare visually the variables
if (FALSE){
## compareValue(x.new, total_disburrsements='ypgtq')
## compareValue(x.new, ydrh='ydrh', gdp_cap='gdp_per_capita')
## compareValue(x.new, gdp_cap='gdp_per_capita')
plot.data <- melt(x.new[, list(TIME, country, popwork=exp(lpop), poptot=exp(lpoptot)), ],
id.vars=c('TIME', 'country'))
gg <- ggplot(plot.data, aes(TIME, value)) + geom_line(aes(color=variable)) +
facet_wrap(~country)
print(ggplotly(gg))
}
################################################################################
### Generate lm output for latex
queryList <- function(l, kx){
kx %>%
lapply(function(s) if(is.null(d <- l[[s]])) NA else d) %>%
unlist
}
description <-
c(list(gdpv_annpct='GDP growth',
ydrh_to_gdpv='Household net income, in \\% of GDP',
gdp_per_capita='GDP per capita, in USD Millions',
fiscal_transparency='IMF GFS Index',
ypgtq='Government expenditure in \\% of GDP',
country='Country',
`gdpv_annpct:fiscal_transparency_score`='Effect of Fiscal Transparency on GDP Growth',
fiscal_transparency_score='Fiscal Transparency',
'gini_toth'='Gini coefficient (Toth 2015)',
egr_diff='Change in Public Employment Rate (CPER)',
egr_lagged='Lagged change in Public Employment Rate',
yrcurnt_corrected='Years left until election',
execrlcleft='Left government effect'),
descriptions)
x.lm$model$TIME <- lvl2num(x.lm$model$TIME)
queryList(description, colnames(x.lm$model)) %>% {
stargazer(x.lm$model, out='model_output/simple_statistic.tex',
covariate.labels=.,
font.size='footnotesize', title='Data statistics')
}
description[['egr_lagged']] <- NA
toTexModel <- function(li.lm, title, out, dep.name='Difference in public employment rate'){
cov.labs <- na.omit(queryList(description, names(coef(li.lm[[1]]))[-1]))
argx <- c(li.lm, list(title=title, out=out, covariate.labels=cov.labs,
dep.var.labels=dep.name, omit=c('TIME', 'egr_lagged'),
omit.labels = c('Year fixed-effect',
'Auto-correlation effect')))
do.call(stargazer, argx)
}
dep.name <- 'Difference in public employment rate'
toTexModel(list(x.lm),
'Main variable result',
'model_output/simple_lm.tex')
toTexModel(list(lpoptot.lm, lpop.lm),
'Robustness of log of adult population',
'model_output/simple_lm_lpop.tex')
toTexModel(list(incomeineq.lm, incomeineq.wo.lm),
'Effect of income inequality',
'model_output/simple_lm_incomeineq.tex')
toTexModel(list(gini.lm, gini.wo.lm),
'Effect of the gini coefficient (Toth 2015)',
'model_output/simple_lm_gini.tex')
toTexModel(list(gdp.per.capita.lm, gdp.per.capita.wo.lm),
'Effect of the GDP per capita',
'model_output/simple_lm_gdp_per_capita.tex')
toTexModel(list(fiscal.lm, fiscal.wo.lm),
'Effect of IMF fiscal transparency index',
'model_output/simple_lm_fiscal_transparency.tex')
toTexModel(list(lassen.lm, lassen.wo.lm),
'Effect of Lassen Fiscal Transparency index',
'model_output/simple_lm_lassen_transparency.tex')
toTexModel(list(imf.gfs.lm, imf.gfs.wo.lm),
'Effect of Lassen Fiscal Transparency index',
'model_output/simple_lm_imf_transparency.tex')
toTexModel(list(govrlc.lm, govrlc.wo.lm),
'Effect of Government Political Side',
'model_output/simple_lm_govrlc.tex')
toTexModel(list(yrcurnt.lm, yrcurnt.wo.lm),
'Effect of years left until election',
'model_output/simple_lm_yrcurnt.tex')
################################################################################
|
\name{prLogisticBootMarg}
\alias{prLogisticBootMarg}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{Estimation of Prevalence Ratios using Logistic Models and Bootstrap Confidence
Intervals for Marginal Standardization}
\description{
This function estimates prevalence ratios (PRs)
and bootstrap confidence intervals using logistic models for marginal standardization.
The estimation of standard errors for PRs is obtained through use of bootstrapping.
Confidence intervals of (1-alpha)\% for PRs are available for standard logistic regression
and for random-effects logistic models (Santos et al, 2008). The function
\code{prLogisticBootMarg} allows estimation of PRs using marginal standardization procedure
(Wilcosky and Chambless, 1985).
%\code{glm}, \code{lmer}, \code{prLogisticDelta}, \code{prLogisticBootCond}
}
\usage{
prLogisticBootMarg(object, data, conf = 0.95, R = 99, ...)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{object}{any fitted model object from which fixed effects estimates can be
extracted. The details of model specification are given below.}
\item{data}{a required data frame containing the variables named in \code{object}.}
\item{conf}{scalar or vector specifying confidence level(s) for estimation. The default is
\code{conf}= 0.95.}
\item{R}{the number of bootstrap replicates. The default is \code{R}=99.}
\item{\dots}{optional additional arguments. Currently none are used in any methods.}
}
\details{
The fitted model object can be obtained using \code{glm()} function for binary responses
when unit samples are independent. The \code{glmer()} function should be used
for correlated binary responses. Only binary predictors are allowed. If categorization for predictors
is other than (0,1), \code{factor()} should be considered.
}
\value{
Returns prevalence ratio and its 95\% bootstrap confidence intervals for marginal
standardization. Both normal and percentile bootstrap confidence intervals are presented.}
\author{Raydonal Ospina, Department of Statistics, Federal University of Pernambuco, Brazil \cr (raydonal@de.ufpe.br) \cr
Leila D. Amorim, Department of Statistics, Federal University of Bahia, Brazil \cr (leiladen@ufba.br).
}
\references{
Localio AR, Margolis DJ, Berlin JA (2007). Relative risks and confidence intervals were easily
computed indirectly from
multivariate logistic regression. \emph{Journal of Clinical Epidemiology},
\bold{60}, 874-882.
Oliveira NF, Santana VS, Lopes AA (1997). Ratio of proportions and the use of the delta method
for confidence interval estimation
in logistic regression. \emph{Journal of Public Health}, \bold{31}(1), 90-99.
Santos CAST et al (2008).
Estimating adjusted prevalence ratio in clustered cross-sectional epidemiological data.
\emph{BMC Medical Research Methodology}, \bold{8} (80). Available from \cr
http://www.biomedcentral.com/1471-2280/8/80.
Wilcosky TC, Chambless LE (1985). A comparison of direct adjustment and regression adjustment
of
epidemiologic measures. \emph{Journal of Chronic Diseases}, \bold{34}, 849-856.
}
%\note{
%This work is part of .... }
\seealso{ \code{\link{glm}}, \code{\link{glmer}},
\code{\link{prLogisticDelta}},\code{\link{prLogisticBootCond}}}
\examples{
### For independent observations:
## Estimates from logistic regression with bootstrap confidence intervals -
## marginal standardization
# Not run:
# data("titanic", package = "prLogistic")
# attach(titanic)
# fit.logistic=glm(survived~ sex + pclass + embarked, family=binomial,
# data = titanic)
# prLogisticBootMarg(fit.logistic, data = titanic)
# End (Not run:)
# Another way for fitting the same model:
# Not run:
# prLogisticBootMarg(glm(survived~ sex + pclass + embarked,
# family=binomial, data = titanic), data=titanic)
# End (Not run:)
### For clustered data
# Estimates from random-effects logistic regression
## with bootstrap confidence intervals - marginal standardization
# Not run:
# library(lme4)
# data("Thailand", package = "prLogistic")
# attach(Thailand)
# ML = glmer(rgi ~ sex + pped + (1|schoolid),
# family = binomial, data = Thailand)
# prLogisticBootMarg(ML, data = Thailand)
# End (Not run:)
}
\keyword{distribution}
\keyword{regression}
|
/man/prLogisticBootMarg.Rd
|
no_license
|
Raydonal/prLogistic
|
R
| false
| false
| 4,267
|
rd
|
\name{prLogisticBootMarg}
\alias{prLogisticBootMarg}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{Estimation of Prevalence Ratios using Logistic Models and Bootstrap Confidence
Intervals for Marginal Standardization}
\description{
This function estimates prevalence ratios (PRs)
and bootstrap confidence intervals using logistic models for marginal standardization.
The estimation of standard errors for PRs is obtained through use of bootstrapping.
Confidence intervals of (1-alpha)\% for PRs are available for standard logistic regression
and for random-effects logistic models (Santos et al, 2008). The function
\code{prLogisticBootMarg} allows estimation of PRs using marginal standardization procedure
(Wilcosky and Chambless, 1985).
%\code{glm}, \code{lmer}, \code{prLogisticDelta}, \code{prLogisticBootCond}
}
\usage{
prLogisticBootMarg(object, data, conf = 0.95, R = 99, ...)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{object}{any fitted model object from which fixed effects estimates can be
extracted. The details of model specification are given below.}
\item{data}{a required data frame containing the variables named in \code{object}.}
\item{conf}{scalar or vector specifying confidence level(s) for estimation. The default is
\code{conf}= 0.95.}
\item{R}{the number of bootstrap replicates. The default is \code{R}=99.}
\item{\dots}{optional additional arguments. Currently none are used in any methods.}
}
\details{
The fitted model object can be obtained using \code{glm()} function for binary responses
when unit samples are independent. The \code{glmer()} function should be used
for correlated binary responses. Only binary predictors are allowed. If categorization for predictors
is other than (0,1), \code{factor()} should be considered.
}
\value{
Returns prevalence ratio and its 95\% bootstrap confidence intervals for marginal
standardization. Both normal and percentile bootstrap confidence intervals are presented.}
\author{Raydonal Ospina, Department of Statistics, Federal University of Pernambuco, Brazil \cr (raydonal@de.ufpe.br) \cr
Leila D. Amorim, Department of Statistics, Federal University of Bahia, Brazil \cr (leiladen@ufba.br).
}
\references{
Localio AR, Margolis DJ, Berlin JA (2007). Relative risks and confidence intervals were easily
computed indirectly from
multivariate logistic regression. \emph{Journal of Clinical Epidemiology},
\bold{60}, 874-882.
Oliveira NF, Santana VS, Lopes AA (1997). Ratio of proportions and the use of the delta method
for confidence interval estimation
in logistic regression. \emph{Journal of Public Health}, \bold{31}(1), 90-99.
Santos CAST et al (2008).
Estimating adjusted prevalence ratio in clustered cross-sectional epidemiological data.
\emph{BMC Medical Research Methodology}, \bold{8} (80). Available from \cr
http://www.biomedcentral.com/1471-2280/8/80.
Wilcosky TC, Chambless LE (1985). A comparison of direct adjustment and regression adjustment
of
epidemiologic measures. \emph{Journal of Chronic Diseases}, \bold{34}, 849-856.
}
%\note{
%This work is part of .... }
\seealso{ \code{\link{glm}}, \code{\link{glmer}},
\code{\link{prLogisticDelta}},\code{\link{prLogisticBootCond}}}
\examples{
### For independent observations:
## Estimates from logistic regression with bootstrap confidence intervals -
## marginal standardization
# Not run:
# data("titanic", package = "prLogistic")
# attach(titanic)
# fit.logistic=glm(survived~ sex + pclass + embarked, family=binomial,
# data = titanic)
# prLogisticBootMarg(fit.logistic, data = titanic)
# End (Not run:)
# Another way for fitting the same model:
# Not run:
# prLogisticBootMarg(glm(survived~ sex + pclass + embarked,
# family=binomial, data = titanic), data=titanic)
# End (Not run:)
### For clustered data
# Estimates from random-effects logistic regression
## with bootstrap confidence intervals - marginal standardization
# Not run:
# library(lme4)
# data("Thailand", package = "prLogistic")
# attach(Thailand)
# ML = glmer(rgi ~ sex + pped + (1|schoolid),
# family = binomial, data = Thailand)
# prLogisticBootMarg(ML, data = Thailand)
# End (Not run:)
}
\keyword{distribution}
\keyword{regression}
|
# Exercise 8: Pulitzer Prizes
# Read in the data
pulitzer <- read.csv("data/pulitzer-circulation-data.csv", stringsAsFactors = FALSE)
# Install and load the needed libraries
# Be sure to comment out the install.packages function so it won't install it every time it runs
# Remeber you only need to install a package once
#install.packages(dplyr)
# library(dplyr)
# View in the data set. Start to understand what the data columns contains
# Be sure to comment out the function so it won't view everytime you run the code.
View(pulitzer)
# Use 'colnames' to print out the names of the columns
colnames(pulitzer)
# Use 'str' to print what types of values are contained in each column
# Did any value type surprise you? Why do you think they are that type?
str(pulitzer)
# Add a column in a dataframe called 'Pulitzer.Prize.Change` that contains the diffrence in changes
# in Pulitzer Prize Winners from 2004 to 2013 and Pultizer Prize Winners from 1990 to 2003.
Pulitzer.Prize.Change <- pulitzer$Pulitzer.Prize.Winners.and.Finalists..2004.2014 - pulitzer$Pulitzer.Prize.Winners.and.Finalists..2004.2014
# What publication gained the most pulitzer prizes from 2004-2014?
# Be sure to use the pipe operator!
# Which publication with at least 5 Pulitzers won from 2004-2014 had the biggest decrease(negative) in Daily circulation numbers?
# This publication should have Pulitzer prizes won a minimum of 5 Pulitzers, as well as the biggest decrease in circulation
# Your turn! An important part about being a data scientist is asking questions.
# Create a question and use dplyr to figure out the answer.
|
/exercise-8/exercise.R
|
permissive
|
kgoodman3/m11-dplyr
|
R
| false
| false
| 1,614
|
r
|
# Exercise 8: Pulitzer Prizes
# Read in the data
pulitzer <- read.csv("data/pulitzer-circulation-data.csv", stringsAsFactors = FALSE)
# Install and load the needed libraries
# Be sure to comment out the install.packages function so it won't install it every time it runs
# Remeber you only need to install a package once
#install.packages(dplyr)
# library(dplyr)
# View in the data set. Start to understand what the data columns contains
# Be sure to comment out the function so it won't view everytime you run the code.
View(pulitzer)
# Use 'colnames' to print out the names of the columns
colnames(pulitzer)
# Use 'str' to print what types of values are contained in each column
# Did any value type surprise you? Why do you think they are that type?
str(pulitzer)
# Add a column in a dataframe called 'Pulitzer.Prize.Change` that contains the diffrence in changes
# in Pulitzer Prize Winners from 2004 to 2013 and Pultizer Prize Winners from 1990 to 2003.
Pulitzer.Prize.Change <- pulitzer$Pulitzer.Prize.Winners.and.Finalists..2004.2014 - pulitzer$Pulitzer.Prize.Winners.and.Finalists..2004.2014
# What publication gained the most pulitzer prizes from 2004-2014?
# Be sure to use the pipe operator!
# Which publication with at least 5 Pulitzers won from 2004-2014 had the biggest decrease(negative) in Daily circulation numbers?
# This publication should have Pulitzer prizes won a minimum of 5 Pulitzers, as well as the biggest decrease in circulation
# Your turn! An important part about being a data scientist is asking questions.
# Create a question and use dplyr to figure out the answer.
|
summary.TwoStageSurvSurv <- function(object, ..., Object){
if (missing(Object)){Object <- object}
cat("\nFunction call:\n\n")
print(Object$Call)
cat("\n\n# Data summary and descriptives")
cat("\n#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~")
cat("\n\nTotal number of trials: ", length(Object$Results.Stage.1$Trial.Name))
cat("\nM(SD) trial size: ", format(round(mean((Object$Results.Stage.1$Trial.Size)), 4), nsmall = 4),
" (", format(round(sd((Object$Results.Stage.1$Trial.Size)), 4), nsmall = 4), ")",
" [min: ", min((Object$Results.Stage.1$Trial.Size)), "; max: ", max((Object$Results.Stage.1$Trial.Size)), "]", sep="")
cat("\n\n\n# R^2_{ht} results")
cat("\n#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~")
cat("\n\n")
print(format(round(Object$R2.ht, 4), nsmall = 4))
cat("\n")
print(format(round(Object$R.ht, 4), nsmall = 4))
cat("\n")
}
|
/R/summary.TwoStageSurvSurv.R
|
no_license
|
cran/Surrogate
|
R
| false
| false
| 980
|
r
|
summary.TwoStageSurvSurv <- function(object, ..., Object){
if (missing(Object)){Object <- object}
cat("\nFunction call:\n\n")
print(Object$Call)
cat("\n\n# Data summary and descriptives")
cat("\n#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~")
cat("\n\nTotal number of trials: ", length(Object$Results.Stage.1$Trial.Name))
cat("\nM(SD) trial size: ", format(round(mean((Object$Results.Stage.1$Trial.Size)), 4), nsmall = 4),
" (", format(round(sd((Object$Results.Stage.1$Trial.Size)), 4), nsmall = 4), ")",
" [min: ", min((Object$Results.Stage.1$Trial.Size)), "; max: ", max((Object$Results.Stage.1$Trial.Size)), "]", sep="")
cat("\n\n\n# R^2_{ht} results")
cat("\n#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~")
cat("\n\n")
print(format(round(Object$R2.ht, 4), nsmall = 4))
cat("\n")
print(format(round(Object$R.ht, 4), nsmall = 4))
cat("\n")
}
|
# Information Loss: IL3 ---------------------------------------------------
# Domingo-Ferrer e Torra (2001) p.8
IL3 <- function(dat, dat.agreg) {
n <- nrow(dat)
p <- ncol(dat)
IL <- vector('numeric', 5L)
IL[1] <-
abs(dat - dat.agreg) %>%
`/`(abs(dat)) %>%
sum() %>%
`/`(n * p)
dat.mean <- dat[, lapply(.SD, mean)]
dat.agreg.mean <- dat.agreg[, lapply(.SD, mean)]
IL[2] <- # verificar a necesssidade desse termo
(abs(dat.mean - dat.agreg.mean) / abs(dat.mean)) %>%
sum() %>%
`/`(p)
dat.cov <- cov(dat)
dat.agreg.cov <- cov(dat.agreg)
triang.sup <- upper.tri(dat.cov, diag = T)
IL[3] <-
(abs(dat.cov[triang.sup] - dat.agreg.cov[triang.sup]) %>%
`/`(abs(dat.cov[triang.sup]))) %>%
sum() %>%
`/`((p * (p + 1))/2)
IL[4] <-
(abs(diag(dat.cov) - diag(dat.agreg.cov)) / diag(dat.cov)) %>% #dat normalizado
sum() %>%
`/`(p)
dat.cor <- cor(dat)
dat.agreg.cor <- cor(dat.agreg)
IL[5] <-
(abs(dat.cor[triang.sup] - dat.agreg.cor[triang.sup])) %>%
sum() %>%
`/`((p * (p + 1))/2)
return(sum(IL) / 5)
}
|
/IL3.R
|
permissive
|
augustofadel/sdc
|
R
| false
| false
| 1,188
|
r
|
# Information Loss: IL3 ---------------------------------------------------
# Domingo-Ferrer e Torra (2001) p.8
IL3 <- function(dat, dat.agreg) {
n <- nrow(dat)
p <- ncol(dat)
IL <- vector('numeric', 5L)
IL[1] <-
abs(dat - dat.agreg) %>%
`/`(abs(dat)) %>%
sum() %>%
`/`(n * p)
dat.mean <- dat[, lapply(.SD, mean)]
dat.agreg.mean <- dat.agreg[, lapply(.SD, mean)]
IL[2] <- # verificar a necesssidade desse termo
(abs(dat.mean - dat.agreg.mean) / abs(dat.mean)) %>%
sum() %>%
`/`(p)
dat.cov <- cov(dat)
dat.agreg.cov <- cov(dat.agreg)
triang.sup <- upper.tri(dat.cov, diag = T)
IL[3] <-
(abs(dat.cov[triang.sup] - dat.agreg.cov[triang.sup]) %>%
`/`(abs(dat.cov[triang.sup]))) %>%
sum() %>%
`/`((p * (p + 1))/2)
IL[4] <-
(abs(diag(dat.cov) - diag(dat.agreg.cov)) / diag(dat.cov)) %>% #dat normalizado
sum() %>%
`/`(p)
dat.cor <- cor(dat)
dat.agreg.cor <- cor(dat.agreg)
IL[5] <-
(abs(dat.cor[triang.sup] - dat.agreg.cor[triang.sup])) %>%
sum() %>%
`/`((p * (p + 1))/2)
return(sum(IL) / 5)
}
|
#' Central Limit Theorem Function
#'
#' @description Takes in n, iter, a, and b and returns a summation of the y's in iter as a matrix and also returns a histogram of the summations. Under the CLT, the distribution represented by the histogram should be normal as the number of samples of the population, being uniform with paramters n*iter, a, and b; become increasingly large.
#'
#' @param n Represent the number of y's used in each sample
#' @param iter Represents the number of columns i.e. the number of samples
#' @param a parameter of uniform distribution
#' @param b second parameter of uniform distribution
#'
#' @return summation of the y's over iter samples with n y's per sample. The higher the value of n, the closer the resulting distribution will be to a normal distribution
#' @export
#'
#' @examples
#' w=myclt(n=50,iter=10000,a=5,b=10)
myclt=function(n,iter,a=0,b=5){
y=runif(n*iter,a,b)
data=matrix(y,nr=n,nc=iter,byrow=TRUE)
sm=apply(data,2,sum)
h=hist(sm,plot=FALSE)
hist(sm,col=rainbow(length(h$mids)),freq=FALSE,main="Distribution of the sum of uniforms")
curve(dnorm(x,mean=n*(a+b)/2,sd=sqrt(n*(b-a)^2/12)),add=TRUE,lwd=2,col="Blue")
sm
}
|
/R/myclt.R
|
no_license
|
w142236/math4753
|
R
| false
| false
| 1,178
|
r
|
#' Central Limit Theorem Function
#'
#' @description Takes in n, iter, a, and b and returns a summation of the y's in iter as a matrix and also returns a histogram of the summations. Under the CLT, the distribution represented by the histogram should be normal as the number of samples of the population, being uniform with paramters n*iter, a, and b; become increasingly large.
#'
#' @param n Represent the number of y's used in each sample
#' @param iter Represents the number of columns i.e. the number of samples
#' @param a parameter of uniform distribution
#' @param b second parameter of uniform distribution
#'
#' @return summation of the y's over iter samples with n y's per sample. The higher the value of n, the closer the resulting distribution will be to a normal distribution
#' @export
#'
#' @examples
#' w=myclt(n=50,iter=10000,a=5,b=10)
myclt=function(n,iter,a=0,b=5){
y=runif(n*iter,a,b)
data=matrix(y,nr=n,nc=iter,byrow=TRUE)
sm=apply(data,2,sum)
h=hist(sm,plot=FALSE)
hist(sm,col=rainbow(length(h$mids)),freq=FALSE,main="Distribution of the sum of uniforms")
curve(dnorm(x,mean=n*(a+b)/2,sd=sqrt(n*(b-a)^2/12)),add=TRUE,lwd=2,col="Blue")
sm
}
|
##download the data, noting it is separated by ; and that the first
##row contains the variables, so header = TRUE
power <- read.table("household_power_consumption.txt", sep = ";", header = TRUE, stringsAsFactors = FALSE)
##stringasfactors makes what would have been factor variables characters
##convert Date variable to date format
power$Date <- as.Date(power$Date, "%d/%m/%Y") ##needed to capitalize Y
##Subset for only February 1st and 2nd, 2007
DATE1 <- as.Date("1/2/2007", "%d/%m/%Y")
DATE2 <- as.Date("2/2/2007", "%d/%m/%Y")
newset <- subset(power, Date >= DATE1 & Date <= DATE2)
datetime <- paste(newset$Date, newset$Time) ##Combine dates and times
newset$datetime <- as.POSIXct(datetime) ##Put in POSIX format and insert
##into dataset
plot(newset$datetime, newset$Global_active_power, type = "l", xlab = "", ylab = "Global Active Power (kilowatts)")
dev.copy(png, file = "plot2.png", height = 480, width = 480)
dev.off()
|
/plot2.R
|
no_license
|
Fitzmar88/ExData_Plotting1
|
R
| false
| false
| 935
|
r
|
##download the data, noting it is separated by ; and that the first
##row contains the variables, so header = TRUE
power <- read.table("household_power_consumption.txt", sep = ";", header = TRUE, stringsAsFactors = FALSE)
##stringasfactors makes what would have been factor variables characters
##convert Date variable to date format
power$Date <- as.Date(power$Date, "%d/%m/%Y") ##needed to capitalize Y
##Subset for only February 1st and 2nd, 2007
DATE1 <- as.Date("1/2/2007", "%d/%m/%Y")
DATE2 <- as.Date("2/2/2007", "%d/%m/%Y")
newset <- subset(power, Date >= DATE1 & Date <= DATE2)
datetime <- paste(newset$Date, newset$Time) ##Combine dates and times
newset$datetime <- as.POSIXct(datetime) ##Put in POSIX format and insert
##into dataset
plot(newset$datetime, newset$Global_active_power, type = "l", xlab = "", ylab = "Global Active Power (kilowatts)")
dev.copy(png, file = "plot2.png", height = 480, width = 480)
dev.off()
|
library(shiny)
# input functions get values from user to pass to the backend
# e.g. selectInput(), sliderInput()
# Input function arguments:
# 1) all input functions have the same first argument:
# inputId
# must be unique!
# connects the front end (ui) to back end (server)
# 2) most have a second parameter which is the label
# this should be a human readable label for the GUI
# 3) third is usually a default value for the input
# inputs you'll likely use for media tool
# dateRangeInput - lets you input a range of dates
# selectInput - lets you drop down to select different options
# fileInput - lets a user upload a file
|
/wickham_shiny/chapter03/basic_ui/ui.R
|
no_license
|
ilellosmith/r_practice
|
R
| false
| false
| 641
|
r
|
library(shiny)
# input functions get values from user to pass to the backend
# e.g. selectInput(), sliderInput()
# Input function arguments:
# 1) all input functions have the same first argument:
# inputId
# must be unique!
# connects the front end (ui) to back end (server)
# 2) most have a second parameter which is the label
# this should be a human readable label for the GUI
# 3) third is usually a default value for the input
# inputs you'll likely use for media tool
# dateRangeInput - lets you input a range of dates
# selectInput - lets you drop down to select different options
# fileInput - lets a user upload a file
|
hdpaDir <- '/Users/jonathan/Desktop/data'
hdpaFiles <- matrix(c(
hdpaDir, 'nan-random-t5000-model-b500-k0.9-20130529-0546', 'coherence-20130530-1952.txt', 'eval-20130530-2209.txt',
hdpaDir, 'nyt-random-t5000-model-b500-k0.9-20130529-0546', 'coherence-20130531-0455.txt', 'eval-20130531-0756.txt'
), 2, 4, byrow=T)
ohdpDir <- '/Users/jonathan/Documents/6 uathabasca/project/698 - implementation/wang comparison/dat/20130331 complete runs'
ohdpFiles <- matrix(c(
ohdpDir, 'corpus-nan-kappa-0.9-tau-1-batchsize-500', 'coherence-20130605-1523.txt', 'test-log.dat', 13,
ohdpDir, 'corpus-nyt-kappa-0.9-tau-1-batchsize-500', 'coherence-20130605-1506.txt', 'test-log.dat', 14
), 2, 5, byrow=T)
readHdpa <- function(ff) {
f <- readLines(paste(ff[1], ff[2], ff[4], sep='/'))
f <- gsub("^ *(.*) *$", "\\1", f)
f <- gsub(" +", "\t", f)
tc <- textConnection(f)
d = read.table(tc, header=TRUE, sep="\t")
return (d)
}
readOhdp <- function(ff) {
f <- readLines(paste(ff[1], ff[2], ff[4], sep='/'))
f <- f[1:ff[5]]
f <- gsub("^ *(.*) *$", "\\1", f)
f <- gsub(" +", "\t", f)
tc <- textConnection(f)
d = read.table(tc, header=TRUE, sep="\t")
return (d)
}
readCoherenceFile <- function(ff) {
f <- readLines(paste(ff[1], ff[2], ff[3], sep='/'))
f <- f[c(1, 3:302)]
f <- gsub("^ *(.*) *$", "\\1", f)
f <- gsub("'", "", f)
f <- gsub(" +", "\t", f)
tc <- textConnection(f)
d = read.table(tc, header=TRUE, sep="\t")
return (d)
}
createLikelihoodChart <- function(targetdir, title, hdpa, ohdp) {
chartfile <- tolower(gsub("$", ".pdf", gsub(" ", "-", title)))
pdf(paste(targetdir, chartfile, sep='/'), width=3, height=3)
ohdp$doc.count[length(ohdp$doc.count)] = hdpa$total.docs[length(hdpa$total.docs)]
ohdpPerword <- apply(as.matrix(ohdp$score), 1, function(x) x / ohdp$word.count[1])
xrange <- range(0, max(hdpa$total.docs, ohdp$doc.count))
yrange <- range(min(hdpa$per.word, ohdpPerword), max(hdpa$per.word, ohdpPerword))
par(oma=c(0,0,0,0), mar=c(3,3,2,2)+0.1, cex=0.8)
plot(xrange, yrange, xaxt="n", type="n", xlab="", ylab="")
aty = seq(from=xrange[1], to=xrange[2], length.out=3)
axis(1, at=aty, labels=formatC(aty, format="d"))
lines = c(1,2)
lines(hdpa$total.docs, hdpa$per.word, type="l", lwd=1.5, lty=lines[1], pch=18)
lines(ohdp$doc.count, ohdpPerword, type="l", lwd=1.5, lty=lines[2], pch=18)
legend("bottomright", c('hdpa', 'ohdp'), inset=0.1, cex=0.8, pch=18, lty=lines)
dev.off()
}
createCoherenceChart <- function(targetdir, title, hdpa, ohdp) {
chartfile <- tolower(gsub("$", ".pdf", gsub(" ", "-", title)))
pdf(paste(targetdir, chartfile, sep='/'), width=3, height=3)
xrange <- range(0, 300)
yrange <- range(min(hdpa$coherence, ohdp$coherence), max(hdpa$coherence, ohdp$coherence))
par(oma=c(0,0,0,0), mar=c(3,3,2,2)+0.1, cex=0.8)
plot(xrange, yrange, xaxt="n", type="n", xlab="", ylab="")
aty = seq(from=xrange[1], to=xrange[2], length.out=3)
axis(1, at=aty, labels=formatC(aty, format="d"))
axis(2)
lines = c(1,2)
lines(sort(hdpa$coherence, T), type="l", lwd=1.5, lty=lines[1], pch=18)
lines(sort(ohdp$coherence, T), type="l", lwd=1.5, lty=lines[2], pch=18)
legend("bottomleft", c('hdpa', 'ohdp'), inset=0.1, cex=0.8, pch=18, lty=lines)
dev.off()
}
hdpa = readHdpa(hdpaFiles[1,])
ohdp = readOhdp(ohdpFiles[1,])
createLikelihoodChart('/Users/jonathan/Desktop', 'NAN Likelihood', hdpa, ohdp)
hdpa = readHdpa(hdpaFiles[2,])
ohdp = readOhdp(ohdpFiles[2,])
createLikelihoodChart('/Users/jonathan/Desktop', 'NYT Likelihood', hdpa, ohdp)
hdpa = readHdpa(hdpaFiles[1,])
ohdp = readOhdp(ohdpFiles[1,])
createLikelihoodChart('/Users/jonathan/Desktop', 'NAN Likelihood', hdpa, ohdp)
hdpa = readCoherenceFile(hdpaFiles[1,])
ohdp = readCoherenceFile(ohdpFiles[1,])
createCoherenceChart('/Users/jonathan/Desktop', 'NAN Coherence', hdpa, ohdp)
hdpa = readCoherenceFile(hdpaFiles[2,])
ohdp = readCoherenceFile(ohdpFiles[2,])
createCoherenceChart('/Users/jonathan/Desktop', 'NYT Coherence', hdpa, ohdp)
|
/src/main/r/plot-emnlp.R
|
no_license
|
jesterhazy/hdpa
|
R
| false
| false
| 3,965
|
r
|
hdpaDir <- '/Users/jonathan/Desktop/data'
hdpaFiles <- matrix(c(
hdpaDir, 'nan-random-t5000-model-b500-k0.9-20130529-0546', 'coherence-20130530-1952.txt', 'eval-20130530-2209.txt',
hdpaDir, 'nyt-random-t5000-model-b500-k0.9-20130529-0546', 'coherence-20130531-0455.txt', 'eval-20130531-0756.txt'
), 2, 4, byrow=T)
ohdpDir <- '/Users/jonathan/Documents/6 uathabasca/project/698 - implementation/wang comparison/dat/20130331 complete runs'
ohdpFiles <- matrix(c(
ohdpDir, 'corpus-nan-kappa-0.9-tau-1-batchsize-500', 'coherence-20130605-1523.txt', 'test-log.dat', 13,
ohdpDir, 'corpus-nyt-kappa-0.9-tau-1-batchsize-500', 'coherence-20130605-1506.txt', 'test-log.dat', 14
), 2, 5, byrow=T)
readHdpa <- function(ff) {
f <- readLines(paste(ff[1], ff[2], ff[4], sep='/'))
f <- gsub("^ *(.*) *$", "\\1", f)
f <- gsub(" +", "\t", f)
tc <- textConnection(f)
d = read.table(tc, header=TRUE, sep="\t")
return (d)
}
readOhdp <- function(ff) {
f <- readLines(paste(ff[1], ff[2], ff[4], sep='/'))
f <- f[1:ff[5]]
f <- gsub("^ *(.*) *$", "\\1", f)
f <- gsub(" +", "\t", f)
tc <- textConnection(f)
d = read.table(tc, header=TRUE, sep="\t")
return (d)
}
readCoherenceFile <- function(ff) {
f <- readLines(paste(ff[1], ff[2], ff[3], sep='/'))
f <- f[c(1, 3:302)]
f <- gsub("^ *(.*) *$", "\\1", f)
f <- gsub("'", "", f)
f <- gsub(" +", "\t", f)
tc <- textConnection(f)
d = read.table(tc, header=TRUE, sep="\t")
return (d)
}
createLikelihoodChart <- function(targetdir, title, hdpa, ohdp) {
chartfile <- tolower(gsub("$", ".pdf", gsub(" ", "-", title)))
pdf(paste(targetdir, chartfile, sep='/'), width=3, height=3)
ohdp$doc.count[length(ohdp$doc.count)] = hdpa$total.docs[length(hdpa$total.docs)]
ohdpPerword <- apply(as.matrix(ohdp$score), 1, function(x) x / ohdp$word.count[1])
xrange <- range(0, max(hdpa$total.docs, ohdp$doc.count))
yrange <- range(min(hdpa$per.word, ohdpPerword), max(hdpa$per.word, ohdpPerword))
par(oma=c(0,0,0,0), mar=c(3,3,2,2)+0.1, cex=0.8)
plot(xrange, yrange, xaxt="n", type="n", xlab="", ylab="")
aty = seq(from=xrange[1], to=xrange[2], length.out=3)
axis(1, at=aty, labels=formatC(aty, format="d"))
lines = c(1,2)
lines(hdpa$total.docs, hdpa$per.word, type="l", lwd=1.5, lty=lines[1], pch=18)
lines(ohdp$doc.count, ohdpPerword, type="l", lwd=1.5, lty=lines[2], pch=18)
legend("bottomright", c('hdpa', 'ohdp'), inset=0.1, cex=0.8, pch=18, lty=lines)
dev.off()
}
createCoherenceChart <- function(targetdir, title, hdpa, ohdp) {
chartfile <- tolower(gsub("$", ".pdf", gsub(" ", "-", title)))
pdf(paste(targetdir, chartfile, sep='/'), width=3, height=3)
xrange <- range(0, 300)
yrange <- range(min(hdpa$coherence, ohdp$coherence), max(hdpa$coherence, ohdp$coherence))
par(oma=c(0,0,0,0), mar=c(3,3,2,2)+0.1, cex=0.8)
plot(xrange, yrange, xaxt="n", type="n", xlab="", ylab="")
aty = seq(from=xrange[1], to=xrange[2], length.out=3)
axis(1, at=aty, labels=formatC(aty, format="d"))
axis(2)
lines = c(1,2)
lines(sort(hdpa$coherence, T), type="l", lwd=1.5, lty=lines[1], pch=18)
lines(sort(ohdp$coherence, T), type="l", lwd=1.5, lty=lines[2], pch=18)
legend("bottomleft", c('hdpa', 'ohdp'), inset=0.1, cex=0.8, pch=18, lty=lines)
dev.off()
}
hdpa = readHdpa(hdpaFiles[1,])
ohdp = readOhdp(ohdpFiles[1,])
createLikelihoodChart('/Users/jonathan/Desktop', 'NAN Likelihood', hdpa, ohdp)
hdpa = readHdpa(hdpaFiles[2,])
ohdp = readOhdp(ohdpFiles[2,])
createLikelihoodChart('/Users/jonathan/Desktop', 'NYT Likelihood', hdpa, ohdp)
hdpa = readHdpa(hdpaFiles[1,])
ohdp = readOhdp(ohdpFiles[1,])
createLikelihoodChart('/Users/jonathan/Desktop', 'NAN Likelihood', hdpa, ohdp)
hdpa = readCoherenceFile(hdpaFiles[1,])
ohdp = readCoherenceFile(ohdpFiles[1,])
createCoherenceChart('/Users/jonathan/Desktop', 'NAN Coherence', hdpa, ohdp)
hdpa = readCoherenceFile(hdpaFiles[2,])
ohdp = readCoherenceFile(ohdpFiles[2,])
createCoherenceChart('/Users/jonathan/Desktop', 'NYT Coherence', hdpa, ohdp)
|
# Convert JSON to a value
from_json <- function(json) {
fromJSON(json, simplifyDataFrame = FALSE)
}
# Convert a value to JSON
to_json <- function(value) {
# Override jsonlite which converts empty R lists to empty JSON arrays
if (is.list(value) && length(value) == 0) {
'{}'
} else {
toString(toJSON(
value,
null = "null",
na = "null",
dataframe = "columns",
digits = NA,
auto_unbox=TRUE
))
}
}
asJSON <- jsonlite:::asJSON
# Create a hook for conversion of R6 instances to JSON
methods::setClass('R6')
methods::setMethod('asJSON', 'R6', function(x, ...) {
members <- list()
for(name in ls(x, sorted=FALSE)) {
if (!is.function(x[[name]])) members[[name]] <- x[[name]]
}
to_json(members)
})
|
/R/json.R
|
permissive
|
RaoOfPhysics/r
|
R
| false
| false
| 761
|
r
|
# Convert JSON to a value
from_json <- function(json) {
fromJSON(json, simplifyDataFrame = FALSE)
}
# Convert a value to JSON
to_json <- function(value) {
# Override jsonlite which converts empty R lists to empty JSON arrays
if (is.list(value) && length(value) == 0) {
'{}'
} else {
toString(toJSON(
value,
null = "null",
na = "null",
dataframe = "columns",
digits = NA,
auto_unbox=TRUE
))
}
}
asJSON <- jsonlite:::asJSON
# Create a hook for conversion of R6 instances to JSON
methods::setClass('R6')
methods::setMethod('asJSON', 'R6', function(x, ...) {
members <- list()
for(name in ls(x, sorted=FALSE)) {
if (!is.function(x[[name]])) members[[name]] <- x[[name]]
}
to_json(members)
})
|
\name{positioning.functions}
\Rdversion{1.1}
\alias{positioning.functions}
\alias{positioning.function}
\alias{Positioning.Function}
\alias{Positioning.Functions}
\title{Built-in Positioning Functions for direct label placement}
\description{When adding direct labels to a grouped plot, label
placement can be specified using a Positioning Function (or a list of
them), of the form function(d,...), where d is a data frame of the
points to plot, with columns x y groups. The job of the Positioning
Function(s) is to return the position of each direct label you want to
plot as a data frame, with 1 row for each label. Thus normally a
Positioning Function will return 1 row for each group. Several
built-in Positioning Functions are discussed below, but you can also
create your own, either from scratch or by using dl.indep and
dl.trans.}
\usage{
## Longitudinal data:
## first.points
## left.points ## same as first.points
## last.points
## right.points ## same as last.points
## bottom.points
## low.points ## same as bottom.points
## Also good for density plots:
## top.points
## high.points ## same as top.points
## Scatter plots:
## get.means
## perpendicular.lines
## empty.grid
## empty.grid.2
}
\arguments{
\item{d}{Data frame of points to plot, with columns x y groups.}
\item{...}{Ignored.}
}
\details{
}
\value{Data frame of label positions.}
\references{
}
\author{Toby Dylan Hocking <toby.hocking@inria.fr>}
\note{
}
\seealso{
}
|
/man/positioning.functions.Rd
|
no_license
|
cran/latticedl
|
R
| false
| false
| 1,480
|
rd
|
\name{positioning.functions}
\Rdversion{1.1}
\alias{positioning.functions}
\alias{positioning.function}
\alias{Positioning.Function}
\alias{Positioning.Functions}
\title{Built-in Positioning Functions for direct label placement}
\description{When adding direct labels to a grouped plot, label
placement can be specified using a Positioning Function (or a list of
them), of the form function(d,...), where d is a data frame of the
points to plot, with columns x y groups. The job of the Positioning
Function(s) is to return the position of each direct label you want to
plot as a data frame, with 1 row for each label. Thus normally a
Positioning Function will return 1 row for each group. Several
built-in Positioning Functions are discussed below, but you can also
create your own, either from scratch or by using dl.indep and
dl.trans.}
\usage{
## Longitudinal data:
## first.points
## left.points ## same as first.points
## last.points
## right.points ## same as last.points
## bottom.points
## low.points ## same as bottom.points
## Also good for density plots:
## top.points
## high.points ## same as top.points
## Scatter plots:
## get.means
## perpendicular.lines
## empty.grid
## empty.grid.2
}
\arguments{
\item{d}{Data frame of points to plot, with columns x y groups.}
\item{...}{Ignored.}
}
\details{
}
\value{Data frame of label positions.}
\references{
}
\author{Toby Dylan Hocking <toby.hocking@inria.fr>}
\note{
}
\seealso{
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fn_exp_categorical_viz.R
\name{ExpCatViz}
\alias{ExpCatViz}
\title{Distributions of categorical variables}
\usage{
ExpCatViz(
data,
target = NULL,
fname = NULL,
clim = 10,
col = NULL,
margin = 1,
Page = NULL,
Flip = F,
sample = NULL,
rdata = FALSE,
value = NULL,
gtitle = NULL,
theme = "Default"
)
}
\arguments{
\item{data}{dataframe or matrix}
\item{target}{target variable. This is not a mandatory field}
\item{fname}{output file name. Output will be generated in PDF format}
\item{clim}{maximum categories to be considered to include in bar graphs}
\item{col}{define the colors to fill the bars, default it will take sample colours}
\item{margin}{index, 1 for row based proportions and 2 for column based proportions}
\item{Page}{output pattern. if Page=c(3,2), It will generate 6 plots with 3 rows and 2 columns}
\item{Flip}{default vertical bars. It will be used to flip the axis vertical to horizontal}
\item{sample}{random selection of categorical variable}
\item{rdata}{to plot bar graph for frequency/aggregated table}
\item{value}{value coloumn name. This is mandatory if 'rdata' is TRUE}
\item{gtitle}{graph title}
\item{theme}{adding extra themes, geoms, and scales for 'ggplot2' (eg: themes options from ggthemes package)}
}
\value{
This function returns collated graphs in grid format in PDF or JPEG format. All the files will be stored in the working directory
\itemize{
\item \code{Bar graph} for raw data(this function will dynamically pick all the categorical variable and plot the bar chart)
\item \code{Bar graph} for aggregated data
\item \code{Bar graph} is a Stacked Bar graph by target variable
}
}
\description{
This function automatically scans through each variable and creates bar plot for categorical variable.
}
\examples{
## Bar graph for specified variable
mtdata = mtcars
mtdata$carname = rownames(mtcars)
ExpCatViz(data=mtdata,target="carname",col="blue",rdata=TRUE,value="mpg")
n=nrow(mtdata)
ExpCatViz(data=mtdata,target="carname",col=rainbow(n),rdata=TRUE,value="mpg") ## Ranibow colour
# Stacked bar chart
ExpCatViz(data=mtdata,target = "gear",col=hcl.colors(3, "Set 2"))
ExpCatViz(data=mtdata,target = "gear",col=c("red", "green", "blue"))
# Bar chart
ExpCatViz(data=mtdata)
ExpCatViz(data=mtdata,col="blue",gtitle = "Barplot")
}
\seealso{
\code{\link[ggplot2:geom_bar]{geom_bar}}
}
|
/man/ExpCatViz.Rd
|
no_license
|
daya6489/SmartEDA
|
R
| false
| true
| 2,445
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fn_exp_categorical_viz.R
\name{ExpCatViz}
\alias{ExpCatViz}
\title{Distributions of categorical variables}
\usage{
ExpCatViz(
data,
target = NULL,
fname = NULL,
clim = 10,
col = NULL,
margin = 1,
Page = NULL,
Flip = F,
sample = NULL,
rdata = FALSE,
value = NULL,
gtitle = NULL,
theme = "Default"
)
}
\arguments{
\item{data}{dataframe or matrix}
\item{target}{target variable. This is not a mandatory field}
\item{fname}{output file name. Output will be generated in PDF format}
\item{clim}{maximum categories to be considered to include in bar graphs}
\item{col}{define the colors to fill the bars, default it will take sample colours}
\item{margin}{index, 1 for row based proportions and 2 for column based proportions}
\item{Page}{output pattern. if Page=c(3,2), It will generate 6 plots with 3 rows and 2 columns}
\item{Flip}{default vertical bars. It will be used to flip the axis vertical to horizontal}
\item{sample}{random selection of categorical variable}
\item{rdata}{to plot bar graph for frequency/aggregated table}
\item{value}{value coloumn name. This is mandatory if 'rdata' is TRUE}
\item{gtitle}{graph title}
\item{theme}{adding extra themes, geoms, and scales for 'ggplot2' (eg: themes options from ggthemes package)}
}
\value{
This function returns collated graphs in grid format in PDF or JPEG format. All the files will be stored in the working directory
\itemize{
\item \code{Bar graph} for raw data(this function will dynamically pick all the categorical variable and plot the bar chart)
\item \code{Bar graph} for aggregated data
\item \code{Bar graph} is a Stacked Bar graph by target variable
}
}
\description{
This function automatically scans through each variable and creates bar plot for categorical variable.
}
\examples{
## Bar graph for specified variable
mtdata = mtcars
mtdata$carname = rownames(mtcars)
ExpCatViz(data=mtdata,target="carname",col="blue",rdata=TRUE,value="mpg")
n=nrow(mtdata)
ExpCatViz(data=mtdata,target="carname",col=rainbow(n),rdata=TRUE,value="mpg") ## Ranibow colour
# Stacked bar chart
ExpCatViz(data=mtdata,target = "gear",col=hcl.colors(3, "Set 2"))
ExpCatViz(data=mtdata,target = "gear",col=c("red", "green", "blue"))
# Bar chart
ExpCatViz(data=mtdata)
ExpCatViz(data=mtdata,col="blue",gtitle = "Barplot")
}
\seealso{
\code{\link[ggplot2:geom_bar]{geom_bar}}
}
|
training<-read.csv("pml-training.csv",sep=",",header=TRUE,na.strings=c("NA",""),stringsAsFactors=FALSE,as.is=TRUE)
training$classe <- as.factor(training$classe)
training <- training[,-nearZeroVar(training)]
training <- training[,-c(1,2,3,4,5,6,7)]
inTrain <- createDataPartition(y=training$classe, p=0.75, list=FALSE)
training <- training[inTrain,]
testing <- training[-inTrain,]
training<-training[,!sapply(training,is.character)]
pre<-preProcess(training[,-length(training)],method=c("center", "scale", "knnImpute", "pca"), thresh=0.95)
clean_data <- predict(pre,training[,-length(training)])
model<-train(training$classe ~.,data=clean_data, method="knn")
test <-predict(pre, testing[,-length(testing)])
confusionMatrix(testing$classe, predict(model,test))
|
/machine learning.R
|
no_license
|
ye298/Machine-learning
|
R
| false
| false
| 758
|
r
|
training<-read.csv("pml-training.csv",sep=",",header=TRUE,na.strings=c("NA",""),stringsAsFactors=FALSE,as.is=TRUE)
training$classe <- as.factor(training$classe)
training <- training[,-nearZeroVar(training)]
training <- training[,-c(1,2,3,4,5,6,7)]
inTrain <- createDataPartition(y=training$classe, p=0.75, list=FALSE)
training <- training[inTrain,]
testing <- training[-inTrain,]
training<-training[,!sapply(training,is.character)]
pre<-preProcess(training[,-length(training)],method=c("center", "scale", "knnImpute", "pca"), thresh=0.95)
clean_data <- predict(pre,training[,-length(training)])
model<-train(training$classe ~.,data=clean_data, method="knn")
test <-predict(pre, testing[,-length(testing)])
confusionMatrix(testing$classe, predict(model,test))
|
\name{plotAATT}
\alias{plotAATT}
\title{R function for plotting AA/TT/TA/AT frequency against to the distance from the nucleosome center}
\description{This function plots AA/TT/TA/AT frequency against to the distance from the nucleosome center.
}
\usage{plotAATT(seqname,genfile,center)}
\arguments{
\item{genfile}{one or multiple strings, each string is for the path and name of a DNA sequence file in FASTA format. This sequence file can be located in any directory. It must contain only one sequence. By FASTA format, we require each line to be of the same length (the last line can be shorter; the first line should be '>sequenceName'). The length of each line should be no longer than 400 bp.}
\item{center}{one string for the path and name of the file where a unique or redundant nucleosome map is saved.}
\item{seqname}{the default value is "all", which specifies all chromosomes listed in \code{center}. One can also specify one or more individual chromosomes, e.g. "chrI" or c("chrI", "chrII"). The sequence name format must be same as in \code{center}.}
}
\value{\code{plotAATT} plots AA/TT/TA/AT frequency against to the distance from the nucleosome center.
}
\examples{
\dontrun{
library(NuCMap)
library(nucmapData)
## the user should replace "system.file("extdata",~,package="nucmapData")"
## by the actual path and file name.
chrI=system.file("extdata", "chrI.fa",package="nucmapData")
chrII=system.file("extdata", "chrII.fa",package="nucmapData")
umap=system.file("extdata", "UNIQUEcenters.txt",package="nucmapData")
plotAATT(genfile=c(chrI,chrII),center=umap,seqname=c("chrI","chrII"))
}
}
%\keyword{}
|
/man/plotAATT.Rd
|
no_license
|
HaoxiangLin/NuCMap
|
R
| false
| false
| 1,631
|
rd
|
\name{plotAATT}
\alias{plotAATT}
\title{R function for plotting AA/TT/TA/AT frequency against to the distance from the nucleosome center}
\description{This function plots AA/TT/TA/AT frequency against to the distance from the nucleosome center.
}
\usage{plotAATT(seqname,genfile,center)}
\arguments{
\item{genfile}{one or multiple strings, each string is for the path and name of a DNA sequence file in FASTA format. This sequence file can be located in any directory. It must contain only one sequence. By FASTA format, we require each line to be of the same length (the last line can be shorter; the first line should be '>sequenceName'). The length of each line should be no longer than 400 bp.}
\item{center}{one string for the path and name of the file where a unique or redundant nucleosome map is saved.}
\item{seqname}{the default value is "all", which specifies all chromosomes listed in \code{center}. One can also specify one or more individual chromosomes, e.g. "chrI" or c("chrI", "chrII"). The sequence name format must be same as in \code{center}.}
}
\value{\code{plotAATT} plots AA/TT/TA/AT frequency against to the distance from the nucleosome center.
}
\examples{
\dontrun{
library(NuCMap)
library(nucmapData)
## the user should replace "system.file("extdata",~,package="nucmapData")"
## by the actual path and file name.
chrI=system.file("extdata", "chrI.fa",package="nucmapData")
chrII=system.file("extdata", "chrII.fa",package="nucmapData")
umap=system.file("extdata", "UNIQUEcenters.txt",package="nucmapData")
plotAATT(genfile=c(chrI,chrII),center=umap,seqname=c("chrI","chrII"))
}
}
%\keyword{}
|
set.seed(14137)
n = 50
len = 1001
require(Rcpp)
require(BH)
require(nloptr)
require(ggplot2)
require(gridExtra)
options(digits=8)
source('../conv_plot.R')
sourceCpp('../Simulation.cpp')
par = c(0.05, 3.9, 0.08, 0.3038, -0.6974, 3.2, -0.3551, 0.0967*0.0967)
sourceCpp('../nll.cpp', verbose = F)
opts <- list(algorithm="NLOPT_LN_NELDERMEAD", xtol_rel = 1.0e-6, maxeval = 10000)
# set lower and upper bound
lb = c(-0.5, 0.001, 1e-6, 0.001, -0.99, 0, -1, 1e-8)
ub = c(0.5, 100, 1, 1, 0.99, 10, 0, 1)
# run n paths
converge.list = vector(mode="numeric", length = n)
est.par.list = data.frame(mu = double(), kappa = double(), theta = double(), xi = double(), rho = double(), lambda = double(), mu_s = double(), sigma_s = double())
i = 1
while(i <= n) {
seed.list = sample(10000, 4)
y = SimSVJ(par, len, log(100), par[3], 1/252, seed.list[1], seed.list[2], seed.list[3], seed.list[4])
f <- function(p) {
return(nll(p, y))
}
start = Sys.time()
result = nloptr(x0 = par, eval_f = f,
opts = opts,
lb = lb, ub = ub)
if(result$status < 0)
next;
est.res = as.numeric(result$solution)
conv.res = check.converge(f, result$solution)
write.table(est.res, file = "./est.csv", sep = ",", append = T, row.names = F, col.names = F, quote = F)
write.table(conv.res, file = "./conv.csv", sep = ",", append = T, row.names = F, col.names = F, quote = F)
print(i)
print(Sys.time() - start)
print(est.res)
est.par.list[i, ] = as.numeric(est.res)
converge.list[i] = conv.res
i = i + 1
}
write.csv(file = "convResult.csv", converge.list)
write.csv(file = "estResult.csv", est.par.list)
|
/svj_stoc/Server_svj_stoc/session13/test.R
|
no_license
|
Steven-Sakurai/Heston-SVJ
|
R
| false
| false
| 1,637
|
r
|
set.seed(14137)
n = 50
len = 1001
require(Rcpp)
require(BH)
require(nloptr)
require(ggplot2)
require(gridExtra)
options(digits=8)
source('../conv_plot.R')
sourceCpp('../Simulation.cpp')
par = c(0.05, 3.9, 0.08, 0.3038, -0.6974, 3.2, -0.3551, 0.0967*0.0967)
sourceCpp('../nll.cpp', verbose = F)
opts <- list(algorithm="NLOPT_LN_NELDERMEAD", xtol_rel = 1.0e-6, maxeval = 10000)
# set lower and upper bound
lb = c(-0.5, 0.001, 1e-6, 0.001, -0.99, 0, -1, 1e-8)
ub = c(0.5, 100, 1, 1, 0.99, 10, 0, 1)
# run n paths
converge.list = vector(mode="numeric", length = n)
est.par.list = data.frame(mu = double(), kappa = double(), theta = double(), xi = double(), rho = double(), lambda = double(), mu_s = double(), sigma_s = double())
i = 1
while(i <= n) {
seed.list = sample(10000, 4)
y = SimSVJ(par, len, log(100), par[3], 1/252, seed.list[1], seed.list[2], seed.list[3], seed.list[4])
f <- function(p) {
return(nll(p, y))
}
start = Sys.time()
result = nloptr(x0 = par, eval_f = f,
opts = opts,
lb = lb, ub = ub)
if(result$status < 0)
next;
est.res = as.numeric(result$solution)
conv.res = check.converge(f, result$solution)
write.table(est.res, file = "./est.csv", sep = ",", append = T, row.names = F, col.names = F, quote = F)
write.table(conv.res, file = "./conv.csv", sep = ",", append = T, row.names = F, col.names = F, quote = F)
print(i)
print(Sys.time() - start)
print(est.res)
est.par.list[i, ] = as.numeric(est.res)
converge.list[i] = conv.res
i = i + 1
}
write.csv(file = "convResult.csv", converge.list)
write.csv(file = "estResult.csv", est.par.list)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/GA_utils.R
\name{.palette_to_chromosome}
\alias{.palette_to_chromosome}
\title{Get chromosome of palette}
\usage{
.palette_to_chromosome(hex_palette)
}
\arguments{
\item{hex_palette}{Hex strings for palette}
}
\value{
a vector of RGB values
}
\description{
Get chromosome of palette
}
|
/man/dot-palette_to_chromosome.Rd
|
permissive
|
tsostarics/ftpals
|
R
| false
| true
| 363
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/GA_utils.R
\name{.palette_to_chromosome}
\alias{.palette_to_chromosome}
\title{Get chromosome of palette}
\usage{
.palette_to_chromosome(hex_palette)
}
\arguments{
\item{hex_palette}{Hex strings for palette}
}
\value{
a vector of RGB values
}
\description{
Get chromosome of palette
}
|
% Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/pointsWithin.R
\name{range2GRanges}
\alias{range2GRanges}
\title{From data frame of ranges to GRanges object}
\usage{
range2GRanges(df)
}
\arguments{
\item{df}{Data frame with chr, start, and end columns}
}
\description{
From data frame of ranges to GRanges object
}
\examples{
df <- data.frame(
'chr' = c('chr1', 'chr2'),
'startPos' = c(5, 0),
'endPos' = c(100, 20000)
)
range2GRanges(df)
}
|
/man/range2GRanges.Rd
|
no_license
|
JEFworks/badger
|
R
| false
| false
| 491
|
rd
|
% Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/pointsWithin.R
\name{range2GRanges}
\alias{range2GRanges}
\title{From data frame of ranges to GRanges object}
\usage{
range2GRanges(df)
}
\arguments{
\item{df}{Data frame with chr, start, and end columns}
}
\description{
From data frame of ranges to GRanges object
}
\examples{
df <- data.frame(
'chr' = c('chr1', 'chr2'),
'startPos' = c(5, 0),
'endPos' = c(100, 20000)
)
range2GRanges(df)
}
|
testlist <- list(iK = 61951L)
result <- do.call(eDMA:::PowerSet,testlist)
str(result)
|
/eDMA/inst/testfiles/PowerSet/AFL_PowerSet/PowerSet_valgrind_files/1609870003-test.R
|
no_license
|
akhikolla/updated-only-Issues
|
R
| false
| false
| 85
|
r
|
testlist <- list(iK = 61951L)
result <- do.call(eDMA:::PowerSet,testlist)
str(result)
|
#' Mapa del Banco de Porcupine sin referencias en tierra
#'
#' Función auxiliar para sacar el mapa de la campaña Porcupine
#' @param xlims Define los limites longitudinales del mapa, los valores por defecto son los del total del área de la campaña
#' @param ylims Define los limites latitudinales del mapa, los valores por defecto son los del total del área de la campaña
#' @param lwdl Ancho de las líneas del mapa
#' @param cuadr Si T saca las cuadrículas de 5x5 millas naúticas
#' @param ICESrect Si T saca los rectangulos ices de 1 grado de latitud por medio de longitud
#' @param ICESlab Si T incluye las etiquetas de los rectángulos ICES
#' @param ICESlabcex tamaño del ICESlab en cex, .5 por defecto subirlo si se quiere más grande
#' @param label Si T saca las etiquetas de cada una de las cuadriculas numeradas consecutivamente por estratos 1A,1B,2B,3A,3B
#' @param colo Color de las etiquetas, por defecto rojas
#' @param bw Si T mapa en blanco y negro respecto a tierra y puntos, en caso contrario en color. Para sacar el diseño de estratos de Porcupine se utiliza sectcol=TRUE y leg=TRUE
#' @param ax Si T saca los ejes x e y
#' @param wmf Si T saca a fichero metafile porconc.emf
#' @param corners Si T coloca dos puntos rojos en los extremos nordeste y suroeste para ajustar mapas al PescaWin con ax=F
#' @return Saca en pantalla el mapa y es utilizada por otras funciones, si wmf=TRUE lo saca a metafile para fondo del pescawin
#' @seealso {\link{MapNort}}, {\link{MapCant}}
#' @family mapas base
#' @family Porcupine
#' @export
mapporco<-function(xlims=c(-15.5,-10.5),ylims=c(50.5,54.5),lwdl=1,cuadr=FALSE,ICESrect=FALSE,ICESlab=FALSE,ICESlabcex=.7,label=FALSE,colo=2,bw=F,ax=TRUE,wmf=FALSE,corners=FALSE) {
asp<-diff(c(50.5,54.5))/(diff(range(-15.5,-10.5))*cos(mean(50.5,54.5)*pi/180))
if (wmf) win.metafile(filename = "porconc.emf", width = 10, height = 10*asp+.63, pointsize = 10)
if (!wmf) par(mar=c(2,2.5,2, 2.5) + 0.3, mgp=c(2,.5,0))
if (!ax) par(mar=c(0,0,0,0),oma=c(0,0,0,0),omd=c(0,1,0,1))
maps::map(Porc.map,xlim=xlims,ylim=ylims,type="n")
if (!bw) rect(par("usr")[1],par("usr")[3],par("usr")[2],par("usr")[4],col=ifelse(bw,"white","lightblue1"))
nstrat<-length(which(!is.na(Porc.map$names)))
nland<-length(Porc.map$names)-nstrat
if (ax) {
degs = seq(-15,-11,ifelse(abs(diff(xlims))>1,1,.5))
alg = sapply(degs,function(x) bquote(.(abs(x))*degree ~ W))
axis(1, at=degs, lab=do.call(expression,alg),font.axis=2,cex.axis=.8,tick=T,tck=c(-.01),mgp=c(1,.2,0))
axis(3, at=degs, lab=do.call(expression,alg),font.axis=2,cex.axis=.8,tick=T,tck=c(-.01),mgp=c(1,.2,0))
degs = seq(51,54,ifelse(abs(diff(ylims))>1,1,.5))
alt = sapply(degs,function(x) bquote(.(x)*degree ~ N))
axis(2, at=degs, lab=do.call(expression,alt),font.axis=2,cex.axis=.8,tick=T,tck=c(-.01),las=2,mgp=c(1,.5,0))
axis(4, at=degs, lab=do.call(expression,alt),font.axis=2,cex.axis=.8,tick=T,tck=c(-.01),las=2,mgp=c(1,.5,0))
}
if (cuadr) {
abline(h=seq(50,55,by=1/12),col=gray(.6),lwd=.6)
abline(v=seq(-18,-10,by=3/23),col=gray(.6),lwd=.6)
}
if (ICESrect) {
abline(h=seq(50,55,by=.5),col=gray(.2),lwd=.6)
abline(v=seq(-18,-10,by=1),col=gray(.2),lwd=.6)
}
if (ICESlab) text(c(stat_y+.215)~stat_x,Area,label=ICESNAME,cex=ICESlabcex,font=2)
maps::map(Porc.map,add=TRUE,fill=TRUE,col=c(rep(NA,nstrat-1),ifelse(bw,"gray85","bisque")),lwd=lwdl)
box(lwd=lwdl)
if (label) {
if (!exists("Porc.grid")) {
Porc.grid<-sacagrid()
}
text(Porc.grid$x,Porc.grid$y,labels=Porc.grid$pt,cex=.3,col=colo)
}
if (corners) points(c(-15.5,-10.5),c(50.5,54.5),pch=16,col=2)
if (wmf) dev.off()
if (wmf) par(mar=c(5, 4, 4, 2) + 0.1)
}
|
/R/mapporco.r
|
no_license
|
Franvgls/CampR
|
R
| false
| false
| 3,720
|
r
|
#' Mapa del Banco de Porcupine sin referencias en tierra
#'
#' Función auxiliar para sacar el mapa de la campaña Porcupine
#' @param xlims Define los limites longitudinales del mapa, los valores por defecto son los del total del área de la campaña
#' @param ylims Define los limites latitudinales del mapa, los valores por defecto son los del total del área de la campaña
#' @param lwdl Ancho de las líneas del mapa
#' @param cuadr Si T saca las cuadrículas de 5x5 millas naúticas
#' @param ICESrect Si T saca los rectangulos ices de 1 grado de latitud por medio de longitud
#' @param ICESlab Si T incluye las etiquetas de los rectángulos ICES
#' @param ICESlabcex tamaño del ICESlab en cex, .5 por defecto subirlo si se quiere más grande
#' @param label Si T saca las etiquetas de cada una de las cuadriculas numeradas consecutivamente por estratos 1A,1B,2B,3A,3B
#' @param colo Color de las etiquetas, por defecto rojas
#' @param bw Si T mapa en blanco y negro respecto a tierra y puntos, en caso contrario en color. Para sacar el diseño de estratos de Porcupine se utiliza sectcol=TRUE y leg=TRUE
#' @param ax Si T saca los ejes x e y
#' @param wmf Si T saca a fichero metafile porconc.emf
#' @param corners Si T coloca dos puntos rojos en los extremos nordeste y suroeste para ajustar mapas al PescaWin con ax=F
#' @return Saca en pantalla el mapa y es utilizada por otras funciones, si wmf=TRUE lo saca a metafile para fondo del pescawin
#' @seealso {\link{MapNort}}, {\link{MapCant}}
#' @family mapas base
#' @family Porcupine
#' @export
mapporco<-function(xlims=c(-15.5,-10.5),ylims=c(50.5,54.5),lwdl=1,cuadr=FALSE,ICESrect=FALSE,ICESlab=FALSE,ICESlabcex=.7,label=FALSE,colo=2,bw=F,ax=TRUE,wmf=FALSE,corners=FALSE) {
asp<-diff(c(50.5,54.5))/(diff(range(-15.5,-10.5))*cos(mean(50.5,54.5)*pi/180))
if (wmf) win.metafile(filename = "porconc.emf", width = 10, height = 10*asp+.63, pointsize = 10)
if (!wmf) par(mar=c(2,2.5,2, 2.5) + 0.3, mgp=c(2,.5,0))
if (!ax) par(mar=c(0,0,0,0),oma=c(0,0,0,0),omd=c(0,1,0,1))
maps::map(Porc.map,xlim=xlims,ylim=ylims,type="n")
if (!bw) rect(par("usr")[1],par("usr")[3],par("usr")[2],par("usr")[4],col=ifelse(bw,"white","lightblue1"))
nstrat<-length(which(!is.na(Porc.map$names)))
nland<-length(Porc.map$names)-nstrat
if (ax) {
degs = seq(-15,-11,ifelse(abs(diff(xlims))>1,1,.5))
alg = sapply(degs,function(x) bquote(.(abs(x))*degree ~ W))
axis(1, at=degs, lab=do.call(expression,alg),font.axis=2,cex.axis=.8,tick=T,tck=c(-.01),mgp=c(1,.2,0))
axis(3, at=degs, lab=do.call(expression,alg),font.axis=2,cex.axis=.8,tick=T,tck=c(-.01),mgp=c(1,.2,0))
degs = seq(51,54,ifelse(abs(diff(ylims))>1,1,.5))
alt = sapply(degs,function(x) bquote(.(x)*degree ~ N))
axis(2, at=degs, lab=do.call(expression,alt),font.axis=2,cex.axis=.8,tick=T,tck=c(-.01),las=2,mgp=c(1,.5,0))
axis(4, at=degs, lab=do.call(expression,alt),font.axis=2,cex.axis=.8,tick=T,tck=c(-.01),las=2,mgp=c(1,.5,0))
}
if (cuadr) {
abline(h=seq(50,55,by=1/12),col=gray(.6),lwd=.6)
abline(v=seq(-18,-10,by=3/23),col=gray(.6),lwd=.6)
}
if (ICESrect) {
abline(h=seq(50,55,by=.5),col=gray(.2),lwd=.6)
abline(v=seq(-18,-10,by=1),col=gray(.2),lwd=.6)
}
if (ICESlab) text(c(stat_y+.215)~stat_x,Area,label=ICESNAME,cex=ICESlabcex,font=2)
maps::map(Porc.map,add=TRUE,fill=TRUE,col=c(rep(NA,nstrat-1),ifelse(bw,"gray85","bisque")),lwd=lwdl)
box(lwd=lwdl)
if (label) {
if (!exists("Porc.grid")) {
Porc.grid<-sacagrid()
}
text(Porc.grid$x,Porc.grid$y,labels=Porc.grid$pt,cex=.3,col=colo)
}
if (corners) points(c(-15.5,-10.5),c(50.5,54.5),pch=16,col=2)
if (wmf) dev.off()
if (wmf) par(mar=c(5, 4, 4, 2) + 0.1)
}
|
#' Load tsoobgx model from binary file
#'
#' Load tsoobgx model from the binary model file.
#'
#' @param modelfile the name of the binary input file.
#'
#' @details
#' The input file is expected to contain a model saved in an tsoobgx-internal binary format
#' using either \code{\link{bgx.save}} or \code{\link{cb.save.model}} in R, or using some
#' appropriate methods from other tsoobgx interfaces. E.g., a model trained in Python and
#' saved from there in tsoobgx format, could be loaded from R.
#'
#' Note: a model saved as an R-object, has to be loaded using corresponding R-methods,
#' not \code{bgx.load}.
#'
#' @return
#' An object of \code{bgx.Booster} class.
#'
#' @seealso
#' \code{\link{bgx.save}}, \code{\link{bgx.Booster.complete}}.
#'
#' @examples
#' data(agaricus.train, package='tsoobgx')
#' data(agaricus.test, package='tsoobgx')
#' train <- agaricus.train
#' test <- agaricus.test
#' bst <- tsoobgx(data = train$data, label = train$label, max_depth = 2,
#' eta = 1, nthread = 2, nrounds = 2,objective = "binary:logistic")
#' bgx.save(bst, 'bgx.model')
#' bst <- bgx.load('bgx.model')
#' if (file.exists('bgx.model')) file.remove('bgx.model')
#' pred <- predict(bst, test$data)
#' @export
bgx.load <- function(modelfile) {
if (is.null(modelfile))
stop("bgx.load: modelfile cannot be NULL")
handle <- bgx.Booster.handle(modelfile = modelfile)
# re-use modelfile if it is raw so we do not need to serialize
if (typeof(modelfile) == "raw") {
bst <- bgx.handleToBooster(handle, modelfile)
} else {
bst <- bgx.handleToBooster(handle, NULL)
}
bst <- bgx.Booster.complete(bst, saveraw = TRUE)
return(bst)
}
#' @export
bgx.load.individual <- function(modelfile, k) {
if (is.null(modelfile) || typeof(modelfile) != "raw")
stop("bgx.load.individuals: modelfile must be a raw booster dump")
handle <- .Call(retsooBGXCreate_R, list())
.Call(retsooBGXLoadIndividualModelFromRaw_R, handle, modelfile, k)
class(handle) <- "bgx.Booster.handle"
bst <- bgx.handleToBooster(handle, modelfile)
bst <- bgx.Booster.complete(bst, saveraw = TRUE)
return(bst)
}
|
/R/bgx.load.R
|
permissive
|
nalzok/tsoobgx
|
R
| false
| false
| 2,136
|
r
|
#' Load tsoobgx model from binary file
#'
#' Load tsoobgx model from the binary model file.
#'
#' @param modelfile the name of the binary input file.
#'
#' @details
#' The input file is expected to contain a model saved in an tsoobgx-internal binary format
#' using either \code{\link{bgx.save}} or \code{\link{cb.save.model}} in R, or using some
#' appropriate methods from other tsoobgx interfaces. E.g., a model trained in Python and
#' saved from there in tsoobgx format, could be loaded from R.
#'
#' Note: a model saved as an R-object, has to be loaded using corresponding R-methods,
#' not \code{bgx.load}.
#'
#' @return
#' An object of \code{bgx.Booster} class.
#'
#' @seealso
#' \code{\link{bgx.save}}, \code{\link{bgx.Booster.complete}}.
#'
#' @examples
#' data(agaricus.train, package='tsoobgx')
#' data(agaricus.test, package='tsoobgx')
#' train <- agaricus.train
#' test <- agaricus.test
#' bst <- tsoobgx(data = train$data, label = train$label, max_depth = 2,
#' eta = 1, nthread = 2, nrounds = 2,objective = "binary:logistic")
#' bgx.save(bst, 'bgx.model')
#' bst <- bgx.load('bgx.model')
#' if (file.exists('bgx.model')) file.remove('bgx.model')
#' pred <- predict(bst, test$data)
#' @export
bgx.load <- function(modelfile) {
if (is.null(modelfile))
stop("bgx.load: modelfile cannot be NULL")
handle <- bgx.Booster.handle(modelfile = modelfile)
# re-use modelfile if it is raw so we do not need to serialize
if (typeof(modelfile) == "raw") {
bst <- bgx.handleToBooster(handle, modelfile)
} else {
bst <- bgx.handleToBooster(handle, NULL)
}
bst <- bgx.Booster.complete(bst, saveraw = TRUE)
return(bst)
}
#' @export
bgx.load.individual <- function(modelfile, k) {
if (is.null(modelfile) || typeof(modelfile) != "raw")
stop("bgx.load.individuals: modelfile must be a raw booster dump")
handle <- .Call(retsooBGXCreate_R, list())
.Call(retsooBGXLoadIndividualModelFromRaw_R, handle, modelfile, k)
class(handle) <- "bgx.Booster.handle"
bst <- bgx.handleToBooster(handle, modelfile)
bst <- bgx.Booster.complete(bst, saveraw = TRUE)
return(bst)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/healthlake_operations.R
\name{healthlake_delete_fhir_datastore}
\alias{healthlake_delete_fhir_datastore}
\title{Deletes a data store}
\usage{
healthlake_delete_fhir_datastore(DatastoreId)
}
\arguments{
\item{DatastoreId}{[required] The AWS-generated ID for the data store to be deleted.}
}
\description{
Deletes a data store.
See \url{https://www.paws-r-sdk.com/docs/healthlake_delete_fhir_datastore/} for full documentation.
}
\keyword{internal}
|
/cran/paws.analytics/man/healthlake_delete_fhir_datastore.Rd
|
permissive
|
paws-r/paws
|
R
| false
| true
| 526
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/healthlake_operations.R
\name{healthlake_delete_fhir_datastore}
\alias{healthlake_delete_fhir_datastore}
\title{Deletes a data store}
\usage{
healthlake_delete_fhir_datastore(DatastoreId)
}
\arguments{
\item{DatastoreId}{[required] The AWS-generated ID for the data store to be deleted.}
}
\description{
Deletes a data store.
See \url{https://www.paws-r-sdk.com/docs/healthlake_delete_fhir_datastore/} for full documentation.
}
\keyword{internal}
|
library(IAPWS95)
### Name: TSats
### Title: Saturation Temperature, Function of Entropy
### Aliases: TSats
### ** Examples
s <- 2.10865845
T_Sat <- TSats(s)
T_Sat
|
/data/genthat_extracted_code/IAPWS95/examples/TSats.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false
| false
| 171
|
r
|
library(IAPWS95)
### Name: TSats
### Title: Saturation Temperature, Function of Entropy
### Aliases: TSats
### ** Examples
s <- 2.10865845
T_Sat <- TSats(s)
T_Sat
|
## Нормализация обучающей выборки
trainingSampleNormalization <- function(xl)
{
n <- dim(xl)[2] - 1
for(i in 1:n)
{
xl[, i] <- (xl[, i] - mean(xl[, i])) / sd(xl[, i])
}
return (xl)
}
## Добавление колонки для из -1 для w0
trainingSamplePrepare <- function(xl)
{
l <- dim(xl)[1]
n <- dim(xl)[2] - 1
xl <- cbind(xl[, 1:n], seq(from = -1, to = -1, length.out = l), xl[, n + 1])
}
## Логарифмическая функция потерь
lossSigmoid <- function(x)
{
return (log2(1 + exp(-x)))
}
## Сигмоидная функция
sigmoidFunction <- function(z)
{
return (1 / (1 + exp(-z)))
}
## Стохастический градиент для логистической регрессии
sg.LogRegression <- function(xl)
{
l <- dim(xl)[1]
n <- dim(xl)[2] - 1
w <- c(1/2, 1/2, 1/2)
iterCount <- 0
lambda <- 1/l
## Инициализация Q
Q <- 0
for (i in 1:l)
{
## Считаем скаляр <w,x>
wx <- sum(w * xl[i, 1:n])
## Считаем отступ
margin <- wx * xl[i, n + 1]
Q <- Q + lossSigmoid(margin)
}
repeat
{
# Рандомный игдекс ошибки
i <- sample(1:l, 1)
iterCount <- iterCount + 1
xi <- xl[i, 1:n]
yi <- xl[i, n + 1]
## считаем скаляр <w,xi>
wx <- sum(w * xi)
## делаем градиентный шаг
margin <- wx * yi
ex <- lossSigmoid(margin)
eta <- 0.3#1 / iterCount
w <- w + eta * xi * yi * sigmoidFunction(-wx * yi)
## считаем новое Q
Qprev <- Q
Q <- (1 - lambda) * Q + lambda * ex
if (abs(Qprev - Q) / abs(max(Qprev, Q)) < 1e-5)
break
}
return (w)
}
ObjectsCountOfEachClass <- 100
library(MASS)
Sigma1 <- matrix(c(10, 0, 0, 10), 2, 2)
Sigma2 <- matrix(c(4, 1, 1, 2), 2, 2)
xy1 <- mvrnorm(n=ObjectsCountOfEachClass, c(0, 0), Sigma1)
xy2 <- mvrnorm(n=ObjectsCountOfEachClass, c(10, -10), Sigma2)
xl <- rbind(cbind(xy1, -1), cbind(xy2, +1))
colors <- c(rgb(255/255, 255/255, 0/255), "white", rgb(0/255, 200/255, 0/255))
## Нормализация
xlNorm <- trainingSampleNormalization(xl)
xlNorm <- trainingSamplePrepare(xlNorm)
## отображение
plot(xlNorm[, 1], xlNorm[, 2], pch = 21, bg = colors[xl[,3] + 2], asp = 1, xlab = "X", ylab = "Y", main = "Линейные классификаторы")
w <- sg.LogRegression(xlNorm)
abline(a = w[3] / w[2], b = -w[1] / w[2], lwd = 2, col = "black")
## ADALINE
w <- sg.ADALINE(xlNorm)
abline(a = w[3] / w[2], b = -w[1] / w[2], lwd = 2, col = "blue")
## Правило Хебба
w <- sg.Hebb(xlNorm)
abline(a = w[3] / w[2], b = -w[1] / w[2], lwd = 2, col = "red")
# legend("bottomleft", c("ADALINE", "Правило Хэбба", "Логистическая регрессия"), pch = c(15,15,15), col = c("blue", "red", "black"))
|
/Lines_algoritms/all.r
|
no_license
|
Abkelyamova/SMPR_AbkelyamovaGulzara
|
R
| false
| false
| 3,036
|
r
|
## Нормализация обучающей выборки
trainingSampleNormalization <- function(xl)
{
n <- dim(xl)[2] - 1
for(i in 1:n)
{
xl[, i] <- (xl[, i] - mean(xl[, i])) / sd(xl[, i])
}
return (xl)
}
## Добавление колонки для из -1 для w0
trainingSamplePrepare <- function(xl)
{
l <- dim(xl)[1]
n <- dim(xl)[2] - 1
xl <- cbind(xl[, 1:n], seq(from = -1, to = -1, length.out = l), xl[, n + 1])
}
## Логарифмическая функция потерь
lossSigmoid <- function(x)
{
return (log2(1 + exp(-x)))
}
## Сигмоидная функция
sigmoidFunction <- function(z)
{
return (1 / (1 + exp(-z)))
}
## Стохастический градиент для логистической регрессии
sg.LogRegression <- function(xl)
{
l <- dim(xl)[1]
n <- dim(xl)[2] - 1
w <- c(1/2, 1/2, 1/2)
iterCount <- 0
lambda <- 1/l
## Инициализация Q
Q <- 0
for (i in 1:l)
{
## Считаем скаляр <w,x>
wx <- sum(w * xl[i, 1:n])
## Считаем отступ
margin <- wx * xl[i, n + 1]
Q <- Q + lossSigmoid(margin)
}
repeat
{
# Рандомный игдекс ошибки
i <- sample(1:l, 1)
iterCount <- iterCount + 1
xi <- xl[i, 1:n]
yi <- xl[i, n + 1]
## считаем скаляр <w,xi>
wx <- sum(w * xi)
## делаем градиентный шаг
margin <- wx * yi
ex <- lossSigmoid(margin)
eta <- 0.3#1 / iterCount
w <- w + eta * xi * yi * sigmoidFunction(-wx * yi)
## считаем новое Q
Qprev <- Q
Q <- (1 - lambda) * Q + lambda * ex
if (abs(Qprev - Q) / abs(max(Qprev, Q)) < 1e-5)
break
}
return (w)
}
ObjectsCountOfEachClass <- 100
library(MASS)
Sigma1 <- matrix(c(10, 0, 0, 10), 2, 2)
Sigma2 <- matrix(c(4, 1, 1, 2), 2, 2)
xy1 <- mvrnorm(n=ObjectsCountOfEachClass, c(0, 0), Sigma1)
xy2 <- mvrnorm(n=ObjectsCountOfEachClass, c(10, -10), Sigma2)
xl <- rbind(cbind(xy1, -1), cbind(xy2, +1))
colors <- c(rgb(255/255, 255/255, 0/255), "white", rgb(0/255, 200/255, 0/255))
## Нормализация
xlNorm <- trainingSampleNormalization(xl)
xlNorm <- trainingSamplePrepare(xlNorm)
## отображение
plot(xlNorm[, 1], xlNorm[, 2], pch = 21, bg = colors[xl[,3] + 2], asp = 1, xlab = "X", ylab = "Y", main = "Линейные классификаторы")
w <- sg.LogRegression(xlNorm)
abline(a = w[3] / w[2], b = -w[1] / w[2], lwd = 2, col = "black")
## ADALINE
w <- sg.ADALINE(xlNorm)
abline(a = w[3] / w[2], b = -w[1] / w[2], lwd = 2, col = "blue")
## Правило Хебба
w <- sg.Hebb(xlNorm)
abline(a = w[3] / w[2], b = -w[1] / w[2], lwd = 2, col = "red")
# legend("bottomleft", c("ADALINE", "Правило Хэбба", "Логистическая регрессия"), pch = c(15,15,15), col = c("blue", "red", "black"))
|
library(simsem)
library(semTools)
library(OpenMx)
######################### Fitting factorFit
data(demoOneFactor)
manifestVars <- names(demoOneFactor)
factorModel <- mxModel("One Factor",
mxMatrix(type="Full", nrow=5, ncol=1, values=0.7, free=TRUE, name="A"),
mxMatrix(type="Symm", nrow=1, ncol=1, values=1, free=FALSE, name="L"),
mxMatrix(type="Diag", nrow=5, ncol=5, values=1, free=TRUE, name="U"),
mxMatrix(type="Full", nrow=1, ncol=5, values=1, free=TRUE, name="M"),
mxAlgebra(expression=A %*% L %*% t(A) + U, name="R"),
mxExpectationNormal(covariance="R", means="M", dimnames=manifestVars),
mxData(observed=cov(demoOneFactor), means=colMeans(demoOneFactor), type="cov", numObs=500),
mxFitFunctionML()
)
factorFit <- mxRun(factorModel)
fitMeasuresMx(factorFit)
factorFitSim <- sim(10, factorFit, n = 200)
summary(factorFitSim)
|
/SupportingDocs/Examples/Version05mx/exDemo/OneFactorMatrixDemo.R
|
no_license
|
simsem/simsem
|
R
| false
| false
| 865
|
r
|
library(simsem)
library(semTools)
library(OpenMx)
######################### Fitting factorFit
data(demoOneFactor)
manifestVars <- names(demoOneFactor)
factorModel <- mxModel("One Factor",
mxMatrix(type="Full", nrow=5, ncol=1, values=0.7, free=TRUE, name="A"),
mxMatrix(type="Symm", nrow=1, ncol=1, values=1, free=FALSE, name="L"),
mxMatrix(type="Diag", nrow=5, ncol=5, values=1, free=TRUE, name="U"),
mxMatrix(type="Full", nrow=1, ncol=5, values=1, free=TRUE, name="M"),
mxAlgebra(expression=A %*% L %*% t(A) + U, name="R"),
mxExpectationNormal(covariance="R", means="M", dimnames=manifestVars),
mxData(observed=cov(demoOneFactor), means=colMeans(demoOneFactor), type="cov", numObs=500),
mxFitFunctionML()
)
factorFit <- mxRun(factorModel)
fitMeasuresMx(factorFit)
factorFitSim <- sim(10, factorFit, n = 200)
summary(factorFitSim)
|
\name{PrepSegments}
\alias{PrepSegments}
\title{Preliminary segmentation analysis}
\usage{
PrepSegments(Data.traj, sd = 1, Km = 30, plotit = TRUE,
nmodels = 10, log = FALSE, mumin = 0, ...)
}
\arguments{
\item{Data.traj}{trajectory}
\item{sd}{standard deviation of step response}
\item{Km}{the maximum number of partitions of the
trajectory}
\item{plotit}{whether to plot the likelihood analysis}
\item{nmodels}{number of candidate models}
\item{log}{Whether to perform the analysi on the log of
the step lengths.}
}
\value{
a list with the following elements: \item{Data.traj}{a
regularized trajectory} \item{nK}{the optimal number of
partitions} \item{mus}{the mean values of all the
candidate models} \item{models}{the index of the selected
models}
}
\description{
Takes a trajectory and determines the number of
partitions of a trajectory based on Markov models. The
response variable here is limited to the step lengths.
This is a wrapper for the partitioning tools in
\code{\link{adehabitatLT}} by Calenge.
}
\seealso{
\code{\link{modpartltraj}}
}
|
/waddle/waddle/man/PrepSegments.Rd
|
no_license
|
xiang-chen-git/ecomove
|
R
| false
| false
| 1,144
|
rd
|
\name{PrepSegments}
\alias{PrepSegments}
\title{Preliminary segmentation analysis}
\usage{
PrepSegments(Data.traj, sd = 1, Km = 30, plotit = TRUE,
nmodels = 10, log = FALSE, mumin = 0, ...)
}
\arguments{
\item{Data.traj}{trajectory}
\item{sd}{standard deviation of step response}
\item{Km}{the maximum number of partitions of the
trajectory}
\item{plotit}{whether to plot the likelihood analysis}
\item{nmodels}{number of candidate models}
\item{log}{Whether to perform the analysi on the log of
the step lengths.}
}
\value{
a list with the following elements: \item{Data.traj}{a
regularized trajectory} \item{nK}{the optimal number of
partitions} \item{mus}{the mean values of all the
candidate models} \item{models}{the index of the selected
models}
}
\description{
Takes a trajectory and determines the number of
partitions of a trajectory based on Markov models. The
response variable here is limited to the step lengths.
This is a wrapper for the partitioning tools in
\code{\link{adehabitatLT}} by Calenge.
}
\seealso{
\code{\link{modpartltraj}}
}
|
# Chapter 13
# Example 13.2 page no. 514 from the pdf..
# One way ANOVA..
# NULL : H0: mu1=mu2=mu3=mu4
# alternate : H1: at least two are not equal
a <- c(49.20,44.54,45.80,95.84,30.10,36.50,82.30,87.85,105.00,95.22,97.50,105.00,58.05,86.60,58.35,72.80,116.70,45.15,70.35,77.40,97.07,73.40,68.50,91.85,106.60,0.57,0.79,0.77,0.81,62.10,94.95,142.50,53.00,175.00,79.50,29.50,78.40,127.50,110.60,57.10,117.60,77.71,150.00,82.90,111.50)
b <- c(rep(1,20),rep(2,9),rep(3,9),rep(4,7))
dat <- data.frame(a,b)
c <- aov(a~factor(b),data = dat) # anova
summary(c) # Analysis of Variance table
cat("Since the p-value is 0.022,we reject the null hypothesisand conclude alkaline levels for the four drug groups are not the same")
|
/Probability_And_Statistics_For_Engineers_And_Scientists_by_Ronald_E._Walpole,_Raymond_H._Myers,_Sharon_L._Myers,_Keying_Ye/CH13/EX13.2/Ex13_2.R
|
permissive
|
FOSSEE/R_TBC_Uploads
|
R
| false
| false
| 754
|
r
|
# Chapter 13
# Example 13.2 page no. 514 from the pdf..
# One way ANOVA..
# NULL : H0: mu1=mu2=mu3=mu4
# alternate : H1: at least two are not equal
a <- c(49.20,44.54,45.80,95.84,30.10,36.50,82.30,87.85,105.00,95.22,97.50,105.00,58.05,86.60,58.35,72.80,116.70,45.15,70.35,77.40,97.07,73.40,68.50,91.85,106.60,0.57,0.79,0.77,0.81,62.10,94.95,142.50,53.00,175.00,79.50,29.50,78.40,127.50,110.60,57.10,117.60,77.71,150.00,82.90,111.50)
b <- c(rep(1,20),rep(2,9),rep(3,9),rep(4,7))
dat <- data.frame(a,b)
c <- aov(a~factor(b),data = dat) # anova
summary(c) # Analysis of Variance table
cat("Since the p-value is 0.022,we reject the null hypothesisand conclude alkaline levels for the four drug groups are not the same")
|
RADprocessingPop <- function(object){
alefq <- list()
realestimate <- list()
name1 <- list()
name2 <- list()
better_bias <- list()
if(is.null(object$alleleDepth) == 0){
object <- list(object)
}
for (m in 1:length(object)){
tryCatch({
real_iterate <- IteratePopStruct(object[[m]])
alefq[[m]] <- real_iterate$alleleFreq
name1[[m]] <- real_iterate$locTable
name2[[m]] <- real_iterate$alleleNucleotides
better_bias[[m]] <- ((1-real_iterate$normalizedDepthProp)/(1-alefq[[m]]))/(real_iterate$normalizedDepthProp/alefq[[m]])
real_estigeno <- GetProbableGenotypes(real_iterate, omitCommonAllele = FALSE)
realestimate[[m]] <- real_estigeno
},error = function(e){cat("ERROR :",conditionMessage(e), "\n")} )
}
alratio <- list()
tot_depth <- list()
for (n in 1:length(alefq)){
alratio[[n]] <- colSums(object[[n]]$alleleDepth)/(colSums(object[[n]]$alleleDepth)+colSums(object[[n]]$antiAlleleDepth))
tot_depth[[n]] <- colSums(object[[n]]$alleleDepth)+colSums(object[[n]]$antiAlleleDepth)
}
if(length(alratio) != length(tot_depth)){
stop("NULL probably exists, dataframe cannot be built")
}
Ratio_Freq_pop <- data.frame(unlist(alratio),unlist(alefq))
colnames(Ratio_Freq_pop) <- c("alleleRatio","alleleFrequency")
Ratio_Freq_pop$difference_alefq_alratio <- abs(Ratio_Freq_pop$alleleRatio - Ratio_Freq_pop$alleleFrequency)
diff_ratio <- list()
diff_freq <- list()
bias_calc <- list()
for (o in 1:length(object)) {
diff_ratio[[o]] <- abs(colMeans(object[[o]]$depthRatio, na.rm = TRUE)-alratio[[o]])
diff_freq[[o]] <- abs(colMeans(object[[o]]$depthRatio, na.rm = TRUE)-alefq[[o]])
bias_calc[[o]] <- colMeans(object[[o]]$depthRatio, na.rm = TRUE)/alefq[[o]]
}
Ratio_Freq_pop$diff_ratio <- unlist(diff_ratio)
Ratio_Freq_pop$diff_freq <- unlist(diff_freq)
Ratio_Freq_pop$bias <- unlist(bias_calc)
Ratio_Freq_pop$totaldepth <- unlist(tot_depth)
Ratio_Freq_pop$betterBias <- unlist(better_bias)
plot(log(object$betterbias),object$diff_freq)
}
|
/RADprocessingPop function.R
|
no_license
|
jialehe3/polyRAD
|
R
| false
| false
| 2,095
|
r
|
RADprocessingPop <- function(object){
alefq <- list()
realestimate <- list()
name1 <- list()
name2 <- list()
better_bias <- list()
if(is.null(object$alleleDepth) == 0){
object <- list(object)
}
for (m in 1:length(object)){
tryCatch({
real_iterate <- IteratePopStruct(object[[m]])
alefq[[m]] <- real_iterate$alleleFreq
name1[[m]] <- real_iterate$locTable
name2[[m]] <- real_iterate$alleleNucleotides
better_bias[[m]] <- ((1-real_iterate$normalizedDepthProp)/(1-alefq[[m]]))/(real_iterate$normalizedDepthProp/alefq[[m]])
real_estigeno <- GetProbableGenotypes(real_iterate, omitCommonAllele = FALSE)
realestimate[[m]] <- real_estigeno
},error = function(e){cat("ERROR :",conditionMessage(e), "\n")} )
}
alratio <- list()
tot_depth <- list()
for (n in 1:length(alefq)){
alratio[[n]] <- colSums(object[[n]]$alleleDepth)/(colSums(object[[n]]$alleleDepth)+colSums(object[[n]]$antiAlleleDepth))
tot_depth[[n]] <- colSums(object[[n]]$alleleDepth)+colSums(object[[n]]$antiAlleleDepth)
}
if(length(alratio) != length(tot_depth)){
stop("NULL probably exists, dataframe cannot be built")
}
Ratio_Freq_pop <- data.frame(unlist(alratio),unlist(alefq))
colnames(Ratio_Freq_pop) <- c("alleleRatio","alleleFrequency")
Ratio_Freq_pop$difference_alefq_alratio <- abs(Ratio_Freq_pop$alleleRatio - Ratio_Freq_pop$alleleFrequency)
diff_ratio <- list()
diff_freq <- list()
bias_calc <- list()
for (o in 1:length(object)) {
diff_ratio[[o]] <- abs(colMeans(object[[o]]$depthRatio, na.rm = TRUE)-alratio[[o]])
diff_freq[[o]] <- abs(colMeans(object[[o]]$depthRatio, na.rm = TRUE)-alefq[[o]])
bias_calc[[o]] <- colMeans(object[[o]]$depthRatio, na.rm = TRUE)/alefq[[o]]
}
Ratio_Freq_pop$diff_ratio <- unlist(diff_ratio)
Ratio_Freq_pop$diff_freq <- unlist(diff_freq)
Ratio_Freq_pop$bias <- unlist(bias_calc)
Ratio_Freq_pop$totaldepth <- unlist(tot_depth)
Ratio_Freq_pop$betterBias <- unlist(better_bias)
plot(log(object$betterbias),object$diff_freq)
}
|
#profile analysis of FY18 pop-ups interested primarily in rate of ascent
#and factors influencing rate of ascent
#GOALS with this script: animation of data as pop-up ascends from seafloor to surface,
#compare TTP with SST, response time and precision and accuracy
require("ggplot2")
require("devtools")
require("lubridate") #make dealing with dates a little easier
require("tidyverse")
require("dplyr") #data transformation
#import dataset
`300434063921240_profile_data` <- read.csv("~/EcoFOCI_PopUp/Software_and_Analysis/300434063921240/2019/300434063921240_profile_data.csv")
View(`300434063921240_profile_data`)
#rename dataset for convenience
profile <- `300434063921240_profile_data`
#time
#Fractional seconds are printed only if options("digits.secs") is set: see strftime.
str(as.POSIXlt(profile$datetime, "" tz="UTC"))
options(digits.secs = 4)
unclass(as.POSIXlt(profile$datetime, "GMT"))
#convert bars to standard meters sea water (msw), add to variable columns
#0.1bar=1msw
profile$msw <- profile$pressure*10
#subset data for ascent (asc) only (>2.8msw)
asc <- subset.data.frame(profile, msw >= 2.8)
#use ggplot...graph change in msw over change in time
ggplot(aes(datetime, msw, data=asc, color="teal", geom="point", xaxt="n", ylab("Depth in Meters"),
scale_y_reverse())
)
#calculate ascent rate, fit with linear regression, lm is linear model
lin.mod <- lm(msw ~ datetime, data=asc)
#add linear regression line to plot
abline(lin.mod)
|
/Engineering/Iridium SBD/Data Costs/profile_analysis.R
|
permissive
|
NOAA-PMEL/EcoFOCI_PopUp
|
R
| false
| false
| 1,470
|
r
|
#profile analysis of FY18 pop-ups interested primarily in rate of ascent
#and factors influencing rate of ascent
#GOALS with this script: animation of data as pop-up ascends from seafloor to surface,
#compare TTP with SST, response time and precision and accuracy
require("ggplot2")
require("devtools")
require("lubridate") #make dealing with dates a little easier
require("tidyverse")
require("dplyr") #data transformation
#import dataset
`300434063921240_profile_data` <- read.csv("~/EcoFOCI_PopUp/Software_and_Analysis/300434063921240/2019/300434063921240_profile_data.csv")
View(`300434063921240_profile_data`)
#rename dataset for convenience
profile <- `300434063921240_profile_data`
#time
#Fractional seconds are printed only if options("digits.secs") is set: see strftime.
str(as.POSIXlt(profile$datetime, "" tz="UTC"))
options(digits.secs = 4)
unclass(as.POSIXlt(profile$datetime, "GMT"))
#convert bars to standard meters sea water (msw), add to variable columns
#0.1bar=1msw
profile$msw <- profile$pressure*10
#subset data for ascent (asc) only (>2.8msw)
asc <- subset.data.frame(profile, msw >= 2.8)
#use ggplot...graph change in msw over change in time
ggplot(aes(datetime, msw, data=asc, color="teal", geom="point", xaxt="n", ylab("Depth in Meters"),
scale_y_reverse())
)
#calculate ascent rate, fit with linear regression, lm is linear model
lin.mod <- lm(msw ~ datetime, data=asc)
#add linear regression line to plot
abline(lin.mod)
|
.two_cond_ms <- function(result, IP_BAM, Input_BAM, contrast_IP_BAM, contrast_Input_BAM,
condition1, condition2) {
sample_name <- .get.sampleid(IP_BAM, Input_BAM, contrast_IP_BAM, contrast_Input_BAM)
if (length(sample_name) == 2) {
IP_groupname <- sample_name[[1]]
Input_groupname <- sample_name[[2]]
reference_IP_groupname <- NULL
reference_Input_groupname <- NULL
}
if (length(sample_name) == 4) {
IP_groupname <- sample_name[[1]]
Input_groupname <- sample_name[[2]]
reference_IP_groupname <- sample_name[[3]]
reference_Input_groupname <- sample_name[[4]]
}
if (is.null(reference_IP_groupname) & is.null(reference_Input_groupname)) {
print("Must provide two condition bam files using this function")
}
s <- result[[1]]
ind <- unique(s$pos)
len <- length(ind)
n <- nrow(s)
se <- seq(1, n, len)
sa <- s[, -(1:2)]
con_ms_f <- function(group1, group2, cond_name1, cond_name2) {
com_bam <- function(group) {
v <- vector()
for (i in seq_len(ncol(group))) {
m <- vector()
m <- .trans_readsvector(group[, i], se, len, n)
v <- rbind(v, m)
}
return(v)
}
meanSDrel <- function(group) {
p <- com_bam(group)
size <- rowSums(p)
size_factor <- size/exp(mean(log(size)))
q <- apply(p, 2, function(x, a) x/a, a = size_factor)
Mean <- apply(q, 2, mean)
SD <- apply(q, 2, sd)
com <- cbind(Mean, SD)
com <- as.data.frame(com)
z <- which(com$Mean == 0)
com <- com[-z, ]
Mean <- log10(com$Mean)
SD <- log10(com$SD)
com <- cbind(Mean, SD)
com <- as.data.frame(com)
return(com)
}
com1 <- meanSDrel(group1)
com2 <- meanSDrel(group2)
com <- rbind(com1, com2)
com <- as.data.frame(com)
ID <- rep(c(cond_name1, cond_name2), c(length(com1$Mean), length(com2$Mean)))
com <- cbind(com, ID)
com <- as.data.frame(com)
return(com)
}
if ((length(reference_IP_groupname) != 0) & (length(reference_Input_groupname) !=
0) & ((length(reference_IP_groupname) + length(reference_Input_groupname)) <=
2 | (length(IP_groupname) + length(Input_groupname)) <= 2)) {
print("The number of samples in each condition should be larger than three when using this function")
}
if ((length(reference_IP_groupname) != 0) & (length(reference_Input_groupname) !=
0) & ((length(reference_IP_groupname) + length(reference_Input_groupname)) >
2)) {
group_IP <- sa[, (seq_len(length(IP_groupname)))]
group_IP <- as.matrix(group_IP)
Group_Input <- sa[, -(seq_len(length(IP_groupname)))]
group_Input <- Group_Input[, -((length(Input_groupname) + 1):ncol(Group_Input))]
group_Input <- as.matrix(group_Input)
ref_group <- Group_Input[, -(seq_len(length(Input_groupname)))]
ref_group_IP <- ref_group[, seq_len(length(reference_IP_groupname))]
ref_group_IP <- as.matrix(ref_group_IP)
ref_group_Input <- ref_group[, -(seq_len(length(reference_IP_groupname)))]
ref_group_Input <- as.matrix(ref_group_Input)
m_com1 <- con_ms_f(group_IP, ref_group_IP, paste("IP group in",
condition1, "condition"), paste("IP group under", condition2,
"condition"))
m_com2 <- con_ms_f(group_Input, ref_group_Input, paste("Input group in",
condition1, "condition"), paste("Input group under", condition2,
"condition"))
m_com1 <- as.data.frame(m_com1)
Mean <- m_com1$Mean
SD <- m_com1$SD
ID <- m_com1$ID
lp1 <- ggplot(m_com1, aes(Mean, SD, colour = ID)) +
geom_smooth(aes(group = ID), span = 0.5) +
geom_point(alpha = I(1/200), size = 0.002) +
theme(axis.title.x =element_text(size=9), axis.title.y=element_text(size=9),
title = element_text(size = 9),
legend.position = c(0.53,0.97),
legend.justification = c(1,1),
legend.key.height=unit(0.5,'cm'),
legend.key.width=unit(0.5,'cm'),
legend.text=element_text(size=9),
legend.title=element_text(size=9))+
labs(x = "log10(Mean)", y = "log10(SD)", title = paste(" IP samples' Mean-SD relationship between\n", condition1, "and", condition2))
m_com2 <- as.data.frame(m_com2)
Mean <- m_com2$Mean
SD <- m_com2$SD
ID <- m_com2$ID
lp2 <- ggplot(m_com2, aes(Mean, SD, colour = ID)) +
geom_smooth(aes(group = ID), span = 0.5) +
geom_point(alpha = I(1/200), size = 0.002) +
labs(x = "log10(Mean)", y = "log10(SD)", title = paste(" Input samples' Mean-SD relationship between", condition1, "and", condition2))+
theme(axis.title.x =element_text(size=9), axis.title.y=element_text(size=9),
title = element_text(size = 9),
legend.position = c(0.53,0.97),
legend.justification = c(1,1),
legend.key.height=unit(0.5,'cm'),
legend.key.width=unit(0.5,'cm'),
legend.text=element_text(size=9),
legend.title=element_text(size=9))
.multiplot(lp1, lp2, cols = 2)
}
}
|
/R/two_cond_ms.R
|
no_license
|
nkreim/Trumpet
|
R
| false
| false
| 5,568
|
r
|
.two_cond_ms <- function(result, IP_BAM, Input_BAM, contrast_IP_BAM, contrast_Input_BAM,
condition1, condition2) {
sample_name <- .get.sampleid(IP_BAM, Input_BAM, contrast_IP_BAM, contrast_Input_BAM)
if (length(sample_name) == 2) {
IP_groupname <- sample_name[[1]]
Input_groupname <- sample_name[[2]]
reference_IP_groupname <- NULL
reference_Input_groupname <- NULL
}
if (length(sample_name) == 4) {
IP_groupname <- sample_name[[1]]
Input_groupname <- sample_name[[2]]
reference_IP_groupname <- sample_name[[3]]
reference_Input_groupname <- sample_name[[4]]
}
if (is.null(reference_IP_groupname) & is.null(reference_Input_groupname)) {
print("Must provide two condition bam files using this function")
}
s <- result[[1]]
ind <- unique(s$pos)
len <- length(ind)
n <- nrow(s)
se <- seq(1, n, len)
sa <- s[, -(1:2)]
con_ms_f <- function(group1, group2, cond_name1, cond_name2) {
com_bam <- function(group) {
v <- vector()
for (i in seq_len(ncol(group))) {
m <- vector()
m <- .trans_readsvector(group[, i], se, len, n)
v <- rbind(v, m)
}
return(v)
}
meanSDrel <- function(group) {
p <- com_bam(group)
size <- rowSums(p)
size_factor <- size/exp(mean(log(size)))
q <- apply(p, 2, function(x, a) x/a, a = size_factor)
Mean <- apply(q, 2, mean)
SD <- apply(q, 2, sd)
com <- cbind(Mean, SD)
com <- as.data.frame(com)
z <- which(com$Mean == 0)
com <- com[-z, ]
Mean <- log10(com$Mean)
SD <- log10(com$SD)
com <- cbind(Mean, SD)
com <- as.data.frame(com)
return(com)
}
com1 <- meanSDrel(group1)
com2 <- meanSDrel(group2)
com <- rbind(com1, com2)
com <- as.data.frame(com)
ID <- rep(c(cond_name1, cond_name2), c(length(com1$Mean), length(com2$Mean)))
com <- cbind(com, ID)
com <- as.data.frame(com)
return(com)
}
if ((length(reference_IP_groupname) != 0) & (length(reference_Input_groupname) !=
0) & ((length(reference_IP_groupname) + length(reference_Input_groupname)) <=
2 | (length(IP_groupname) + length(Input_groupname)) <= 2)) {
print("The number of samples in each condition should be larger than three when using this function")
}
if ((length(reference_IP_groupname) != 0) & (length(reference_Input_groupname) !=
0) & ((length(reference_IP_groupname) + length(reference_Input_groupname)) >
2)) {
group_IP <- sa[, (seq_len(length(IP_groupname)))]
group_IP <- as.matrix(group_IP)
Group_Input <- sa[, -(seq_len(length(IP_groupname)))]
group_Input <- Group_Input[, -((length(Input_groupname) + 1):ncol(Group_Input))]
group_Input <- as.matrix(group_Input)
ref_group <- Group_Input[, -(seq_len(length(Input_groupname)))]
ref_group_IP <- ref_group[, seq_len(length(reference_IP_groupname))]
ref_group_IP <- as.matrix(ref_group_IP)
ref_group_Input <- ref_group[, -(seq_len(length(reference_IP_groupname)))]
ref_group_Input <- as.matrix(ref_group_Input)
m_com1 <- con_ms_f(group_IP, ref_group_IP, paste("IP group in",
condition1, "condition"), paste("IP group under", condition2,
"condition"))
m_com2 <- con_ms_f(group_Input, ref_group_Input, paste("Input group in",
condition1, "condition"), paste("Input group under", condition2,
"condition"))
m_com1 <- as.data.frame(m_com1)
Mean <- m_com1$Mean
SD <- m_com1$SD
ID <- m_com1$ID
lp1 <- ggplot(m_com1, aes(Mean, SD, colour = ID)) +
geom_smooth(aes(group = ID), span = 0.5) +
geom_point(alpha = I(1/200), size = 0.002) +
theme(axis.title.x =element_text(size=9), axis.title.y=element_text(size=9),
title = element_text(size = 9),
legend.position = c(0.53,0.97),
legend.justification = c(1,1),
legend.key.height=unit(0.5,'cm'),
legend.key.width=unit(0.5,'cm'),
legend.text=element_text(size=9),
legend.title=element_text(size=9))+
labs(x = "log10(Mean)", y = "log10(SD)", title = paste(" IP samples' Mean-SD relationship between\n", condition1, "and", condition2))
m_com2 <- as.data.frame(m_com2)
Mean <- m_com2$Mean
SD <- m_com2$SD
ID <- m_com2$ID
lp2 <- ggplot(m_com2, aes(Mean, SD, colour = ID)) +
geom_smooth(aes(group = ID), span = 0.5) +
geom_point(alpha = I(1/200), size = 0.002) +
labs(x = "log10(Mean)", y = "log10(SD)", title = paste(" Input samples' Mean-SD relationship between", condition1, "and", condition2))+
theme(axis.title.x =element_text(size=9), axis.title.y=element_text(size=9),
title = element_text(size = 9),
legend.position = c(0.53,0.97),
legend.justification = c(1,1),
legend.key.height=unit(0.5,'cm'),
legend.key.width=unit(0.5,'cm'),
legend.text=element_text(size=9),
legend.title=element_text(size=9))
.multiplot(lp1, lp2, cols = 2)
}
}
|
getwd()
data <- read.csv('test.csv',header = TRUE)
x <- seq(1,5)
# recall Graph include all attribute
full <- data[which(data$filename == "cleanedData_full_remove.txt"), "recall"]
collectionCode <- data[which(data$filename == "cleanedData_collectionCode_remove.txt"), "recall"]
habitat <- data[which(data$filename == "cleanedData_habitat_remove.txt"), "recall"]
higherGeography <- data[which(data$filename == "cleanedData_higherGeography_remove.txt"), "recall"]
locality <-data[which(data$filename == "cleanedData_locality_remove.txt"), "recall"]
higherClassification <-data[which(data$filename == "cleanedData_higherClassification_remove.txt"), "recall"]
# recall rank
png(file = "recall_rank_full.png")
plot(x, full,ylim = c(0.85,1), type = "o",col = "red", xlab = "Cross Validation", ylab = "Score",
main = "Label recall rank")
lines(x, collectionCode, type = "o", col = "yellow")
lines(x, habitat, type = "o", col = "grey")
lines(x, higherGeography, type = "o", col = "blue")
lines(x, locality, type = "o", col = "black")
lines(x, higherClassification, type = "o", col = "green")
legend("bottomright", legend = c("full","remove_collectionCode", "remove_habitat",'remove_higherGeography','remove_locality','remove_higherClassification'),
fill=c("red","yellow","grey","blue","black","green"), cex = 0.75)
dev.off()
# recall Graph without Collectioncode
no_cc_full <- data[which(data$filename == "cleanedData_no_cc_full_remove.txt"), "recall"]
no_cc_habitat <- data[which(data$filename == "cleanedData_no_cc_habitat_remove.txt"), "recall"]
no_cc_higherGeography <- data[which(data$filename == "cleanedData_no_cc_higherGeography_remove.txt"), "recall"]
no_cc_locality <-data[which(data$filename == "cleanedData_no_cc_locality_remove.txt"), "recall"]
no_cc_higherClassification <-data[which(data$filename == "cleanedData_no_cc_higherClassification_remove.txt"), "recall"]
# recall rank
png(file = "recall_rank_no_cc.png")
plot(x, no_cc_full,ylim = c(0.80,1), type = "o",col = "red", xlab = "Cross Validation", ylab = "Score",
main = "Label recall rank (all didn't cover collectioncode)")
lines(x, no_cc_habitat, type = "o", col = "grey")
lines(x, no_cc_higherGeography, type = "o", col = "blue")
lines(x, no_cc_locality, type = "o", col = "black")
lines(x, no_cc_higherClassification, type = "o", col = "green")
legend("bottomright", legend = c("full","remove_habitat",'remove_higherGeography','remove_locality','remove_higherClassification'),
fill=c("red","grey","blue","black","green"), cex = 0.75)
dev.off()
# recall Graph without Collectioncode and habitat
no_cc_ha_full <- data[which(data$filename == "cleanedData_no_cc_ha_full_remove.txt"), "recall"]
no_cc_ha_higherGeography <- data[which(data$filename == "cleanedData_no_cc_ha_higherGeography_remove.txt"), "recall"]
no_cc_ha_locality <-data[which(data$filename == "cleanedData_no_cc_ha_locality_remove.txt"), "recall"]
no_cc_ha_higherClassification <-data[which(data$filename == "cleanedData_no_cc_ha_higherClassification_remove.txt"), "recall"]
# recall rank
png(file = "recall_rank_no_cc_ha.png")
plot(x, no_cc_ha_full,ylim = c(0.75,1), type = "o",col = "red", xlab = "Cross Validation", ylab = "Score",
main = "Label recall rank (all didn't cover collectioncode and habitat)")
lines(x, no_cc_ha_higherGeography, type = "o", col = "blue")
lines(x, no_cc_ha_locality, type = "o", col = "black")
lines(x, no_cc_ha_higherClassification, type = "o", col = "green")
legend("bottomright", legend = c("full",'remove_higherGeography','remove_locality','remove_higherClassification'),
fill=c("red","blue","black","green"), cex = 0.75)
dev.off()
# recall Graph without Collectioncode, habitat and higherGeography
no_cc_ha_hg_full <- data[which(data$filename == "cleanedData_no_cc_ha_hg_full_remove.txt"), "recall"]
no_cc_ha_hg_locality <-data[which(data$filename == "cleanedData_no_cc_ha_hg_locality_remove.txt"), "recall"]
no_cc_ha_hg_higherClassification <-data[which(data$filename == "cleanedData_no_cc_ha_hg_higherClassification_remove.txt"), "recall"]
# recall rank
png(file = "recall_rank_no_cc_ha_hg.png")
plot(x, no_cc_ha_hg_full,ylim = c(0.70,1), type = "o",col = "red", xlab = "Cross Validation", ylab = "Score",
main = "Label recall rank (all didn't cover collectioncode,habitat and higherGeography)")
lines(x, no_cc_ha_hg_locality, type = "o", col = "black")
lines(x, no_cc_ha_hg_higherClassification, type = "o", col = "green")
legend("bottomright", legend = c("full",'remove_locality','remove_higherClassification'),
fill=c("red","black","green"), cex = 0.75)
dev.off()
# recall Graph compare without higherClassification with other
full <- data[which(data$filename == "cleanedData_full_remove.txt"), "recall"]
no_cc_full <- data[which(data$filename == "cleanedData_no_cc_full_remove.txt"), "recall"]
no_cc_ha_full <- data[which(data$filename == "cleanedData_no_cc_ha_full_remove.txt"), "recall"]
no_cc_ha_hg_full <- data[which(data$filename == "cleanedData_no_cc_ha_hg_full_remove.txt"), "recall"]
higherClassification <-data[which(data$filename == "cleanedData_higherClassification_remove.txt"), "recall"]
# recall rank
png(file = "recall_rank_remove_compare.png")
plot(x, full,ylim = c(0.85,1), type = "o",col = "red", xlab = "Cross Validation", ylab = "Score",
main = "Label recall rank remove compare")
lines(x, no_cc_full, type = "o", col = "yellow")
lines(x, no_cc_ha_full, type = "o", col = "grey")
lines(x, no_cc_ha_hg_full, type = "o", col = "blue")
lines(x, higherClassification, type = "o", col = "green")
legend("bottomright", legend = c("full","remove_cc", "remove_cc_ha",'remove_cc_ha_hg','remove_hg_only'),
fill=c("red","yellow","grey","blue","green"), cex = 0.75)
dev.off()
|
/Attribute_rank/recall.R
|
permissive
|
isamplesorg/vocabulary_learning
|
R
| false
| false
| 5,861
|
r
|
getwd()
data <- read.csv('test.csv',header = TRUE)
x <- seq(1,5)
# recall Graph include all attribute
full <- data[which(data$filename == "cleanedData_full_remove.txt"), "recall"]
collectionCode <- data[which(data$filename == "cleanedData_collectionCode_remove.txt"), "recall"]
habitat <- data[which(data$filename == "cleanedData_habitat_remove.txt"), "recall"]
higherGeography <- data[which(data$filename == "cleanedData_higherGeography_remove.txt"), "recall"]
locality <-data[which(data$filename == "cleanedData_locality_remove.txt"), "recall"]
higherClassification <-data[which(data$filename == "cleanedData_higherClassification_remove.txt"), "recall"]
# recall rank
png(file = "recall_rank_full.png")
plot(x, full,ylim = c(0.85,1), type = "o",col = "red", xlab = "Cross Validation", ylab = "Score",
main = "Label recall rank")
lines(x, collectionCode, type = "o", col = "yellow")
lines(x, habitat, type = "o", col = "grey")
lines(x, higherGeography, type = "o", col = "blue")
lines(x, locality, type = "o", col = "black")
lines(x, higherClassification, type = "o", col = "green")
legend("bottomright", legend = c("full","remove_collectionCode", "remove_habitat",'remove_higherGeography','remove_locality','remove_higherClassification'),
fill=c("red","yellow","grey","blue","black","green"), cex = 0.75)
dev.off()
# recall Graph without Collectioncode
no_cc_full <- data[which(data$filename == "cleanedData_no_cc_full_remove.txt"), "recall"]
no_cc_habitat <- data[which(data$filename == "cleanedData_no_cc_habitat_remove.txt"), "recall"]
no_cc_higherGeography <- data[which(data$filename == "cleanedData_no_cc_higherGeography_remove.txt"), "recall"]
no_cc_locality <-data[which(data$filename == "cleanedData_no_cc_locality_remove.txt"), "recall"]
no_cc_higherClassification <-data[which(data$filename == "cleanedData_no_cc_higherClassification_remove.txt"), "recall"]
# recall rank
png(file = "recall_rank_no_cc.png")
plot(x, no_cc_full,ylim = c(0.80,1), type = "o",col = "red", xlab = "Cross Validation", ylab = "Score",
main = "Label recall rank (all didn't cover collectioncode)")
lines(x, no_cc_habitat, type = "o", col = "grey")
lines(x, no_cc_higherGeography, type = "o", col = "blue")
lines(x, no_cc_locality, type = "o", col = "black")
lines(x, no_cc_higherClassification, type = "o", col = "green")
legend("bottomright", legend = c("full","remove_habitat",'remove_higherGeography','remove_locality','remove_higherClassification'),
fill=c("red","grey","blue","black","green"), cex = 0.75)
dev.off()
# recall Graph without Collectioncode and habitat
no_cc_ha_full <- data[which(data$filename == "cleanedData_no_cc_ha_full_remove.txt"), "recall"]
no_cc_ha_higherGeography <- data[which(data$filename == "cleanedData_no_cc_ha_higherGeography_remove.txt"), "recall"]
no_cc_ha_locality <-data[which(data$filename == "cleanedData_no_cc_ha_locality_remove.txt"), "recall"]
no_cc_ha_higherClassification <-data[which(data$filename == "cleanedData_no_cc_ha_higherClassification_remove.txt"), "recall"]
# recall rank
png(file = "recall_rank_no_cc_ha.png")
plot(x, no_cc_ha_full,ylim = c(0.75,1), type = "o",col = "red", xlab = "Cross Validation", ylab = "Score",
main = "Label recall rank (all didn't cover collectioncode and habitat)")
lines(x, no_cc_ha_higherGeography, type = "o", col = "blue")
lines(x, no_cc_ha_locality, type = "o", col = "black")
lines(x, no_cc_ha_higherClassification, type = "o", col = "green")
legend("bottomright", legend = c("full",'remove_higherGeography','remove_locality','remove_higherClassification'),
fill=c("red","blue","black","green"), cex = 0.75)
dev.off()
# recall Graph without Collectioncode, habitat and higherGeography
no_cc_ha_hg_full <- data[which(data$filename == "cleanedData_no_cc_ha_hg_full_remove.txt"), "recall"]
no_cc_ha_hg_locality <-data[which(data$filename == "cleanedData_no_cc_ha_hg_locality_remove.txt"), "recall"]
no_cc_ha_hg_higherClassification <-data[which(data$filename == "cleanedData_no_cc_ha_hg_higherClassification_remove.txt"), "recall"]
# recall rank
png(file = "recall_rank_no_cc_ha_hg.png")
plot(x, no_cc_ha_hg_full,ylim = c(0.70,1), type = "o",col = "red", xlab = "Cross Validation", ylab = "Score",
main = "Label recall rank (all didn't cover collectioncode,habitat and higherGeography)")
lines(x, no_cc_ha_hg_locality, type = "o", col = "black")
lines(x, no_cc_ha_hg_higherClassification, type = "o", col = "green")
legend("bottomright", legend = c("full",'remove_locality','remove_higherClassification'),
fill=c("red","black","green"), cex = 0.75)
dev.off()
# recall Graph compare without higherClassification with other
full <- data[which(data$filename == "cleanedData_full_remove.txt"), "recall"]
no_cc_full <- data[which(data$filename == "cleanedData_no_cc_full_remove.txt"), "recall"]
no_cc_ha_full <- data[which(data$filename == "cleanedData_no_cc_ha_full_remove.txt"), "recall"]
no_cc_ha_hg_full <- data[which(data$filename == "cleanedData_no_cc_ha_hg_full_remove.txt"), "recall"]
higherClassification <-data[which(data$filename == "cleanedData_higherClassification_remove.txt"), "recall"]
# recall rank
png(file = "recall_rank_remove_compare.png")
plot(x, full,ylim = c(0.85,1), type = "o",col = "red", xlab = "Cross Validation", ylab = "Score",
main = "Label recall rank remove compare")
lines(x, no_cc_full, type = "o", col = "yellow")
lines(x, no_cc_ha_full, type = "o", col = "grey")
lines(x, no_cc_ha_hg_full, type = "o", col = "blue")
lines(x, higherClassification, type = "o", col = "green")
legend("bottomright", legend = c("full","remove_cc", "remove_cc_ha",'remove_cc_ha_hg','remove_hg_only'),
fill=c("red","yellow","grey","blue","green"), cex = 0.75)
dev.off()
|
\alias{gtkDragDestFindTarget}
\name{gtkDragDestFindTarget}
\title{gtkDragDestFindTarget}
\description{Looks for a match between \code{context->targets} and the
\code{dest.target.list}, returning the first matching target, otherwise
returning \code{GDK_NONE}. \code{dest.target.list} should usually be the return
value from \code{\link{gtkDragDestGetTargetList}}, but some widgets may
have different valid targets for different parts of the widget; in
that case, they will have to implement a drag\_motion handler that
passes the correct target list to this function.}
\usage{gtkDragDestFindTarget(object, context, target.list)}
\arguments{
\item{\code{object}}{[\code{\link{GtkWidget}}] drag destination widget}
\item{\code{context}}{[\code{\link{GdkDragContext}}] drag context}
\item{\code{target.list}}{[\code{\link{GtkTargetList}}] list of droppable targets, or \code{NULL} to use
gtk\_drag\_dest\_get\_target\_list (\code{widget}).}
}
\value{[\code{\link{GdkAtom}}] first target that the source offers and the dest can accept, or \code{GDK_NONE}}
\author{Derived by RGtkGen from GTK+ documentation}
\keyword{internal}
|
/man/gtkDragDestFindTarget.Rd
|
no_license
|
cran/RGtk2.10
|
R
| false
| false
| 1,129
|
rd
|
\alias{gtkDragDestFindTarget}
\name{gtkDragDestFindTarget}
\title{gtkDragDestFindTarget}
\description{Looks for a match between \code{context->targets} and the
\code{dest.target.list}, returning the first matching target, otherwise
returning \code{GDK_NONE}. \code{dest.target.list} should usually be the return
value from \code{\link{gtkDragDestGetTargetList}}, but some widgets may
have different valid targets for different parts of the widget; in
that case, they will have to implement a drag\_motion handler that
passes the correct target list to this function.}
\usage{gtkDragDestFindTarget(object, context, target.list)}
\arguments{
\item{\code{object}}{[\code{\link{GtkWidget}}] drag destination widget}
\item{\code{context}}{[\code{\link{GdkDragContext}}] drag context}
\item{\code{target.list}}{[\code{\link{GtkTargetList}}] list of droppable targets, or \code{NULL} to use
gtk\_drag\_dest\_get\_target\_list (\code{widget}).}
}
\value{[\code{\link{GdkAtom}}] first target that the source offers and the dest can accept, or \code{GDK_NONE}}
\author{Derived by RGtkGen from GTK+ documentation}
\keyword{internal}
|
timestamp <- Sys.time()
library(caret)
library(plyr)
library(recipes)
library(dplyr)
model <- "awnb"
#########################################################################
set.seed(2)
training <- LPH07_1(100, factors = TRUE, class = TRUE)
testing <- LPH07_1(100, factors = TRUE, class = TRUE)
trainX <- training[, -ncol(training)]
trainY <- training$Class
rec_cls <- recipe(Class ~ ., data = training) %>%
step_center(all_predictors()) %>%
step_scale(all_predictors())
cctrl1 <- trainControl(method = "cv", number = 3, returnResamp = "all",
classProbs = TRUE,
summaryFunction = twoClassSummary)
cctrl2 <- trainControl(method = "LOOCV",
classProbs = TRUE, summaryFunction = twoClassSummary)
cctrl3 <- trainControl(method = "none",
classProbs = TRUE, summaryFunction = twoClassSummary)
cctrlR <- trainControl(method = "cv", number = 3, returnResamp = "all", search = "random")
set.seed(849)
test_class_cv_model <- train(trainX, trainY,
method = "awnb",
trControl = cctrl1,
metric = "ROC")
test_class_pred <- predict(test_class_cv_model, testing[, -ncol(testing)])
test_class_prob <- predict(test_class_cv_model, testing[, -ncol(testing)], type = "prob")
set.seed(849)
test_class_rand <- train(trainX, trainY,
method = "awnb",
trControl = cctrlR,
tuneLength = 4)
set.seed(849)
test_class_loo_model <- train(trainX, trainY,
method = "awnb",
trControl = cctrl2,
metric = "ROC")
set.seed(849)
test_class_none_model <- train(trainX, trainY,
method = "awnb",
trControl = cctrl3,
tuneGrid = test_class_cv_model$bestTune,
metric = "ROC")
test_class_none_pred <- predict(test_class_none_model, testing[, -ncol(testing)])
test_class_none_prob <- predict(test_class_none_model, testing[, -ncol(testing)], type = "prob")
test_levels <- levels(test_class_cv_model)
if(!all(levels(trainY) %in% test_levels))
cat("wrong levels")
#########################################################################
test_class_predictors1 <- predictors(test_class_cv_model)
#########################################################################
test_class_imp <- varImp(test_class_cv_model)
#########################################################################
tests <- grep("test_", ls(), fixed = TRUE, value = TRUE)
sInfo <- sessionInfo()
timestamp_end <- Sys.time()
save(list = c(tests, "sInfo", "timestamp", "timestamp_end"),
file = file.path(getwd(), paste(model, ".RData", sep = "")))
if(!interactive())
q("no")
|
/RegressionTests/Code/awnb.R
|
no_license
|
topepo/caret
|
R
| false
| false
| 2,915
|
r
|
timestamp <- Sys.time()
library(caret)
library(plyr)
library(recipes)
library(dplyr)
model <- "awnb"
#########################################################################
set.seed(2)
training <- LPH07_1(100, factors = TRUE, class = TRUE)
testing <- LPH07_1(100, factors = TRUE, class = TRUE)
trainX <- training[, -ncol(training)]
trainY <- training$Class
rec_cls <- recipe(Class ~ ., data = training) %>%
step_center(all_predictors()) %>%
step_scale(all_predictors())
cctrl1 <- trainControl(method = "cv", number = 3, returnResamp = "all",
classProbs = TRUE,
summaryFunction = twoClassSummary)
cctrl2 <- trainControl(method = "LOOCV",
classProbs = TRUE, summaryFunction = twoClassSummary)
cctrl3 <- trainControl(method = "none",
classProbs = TRUE, summaryFunction = twoClassSummary)
cctrlR <- trainControl(method = "cv", number = 3, returnResamp = "all", search = "random")
set.seed(849)
test_class_cv_model <- train(trainX, trainY,
method = "awnb",
trControl = cctrl1,
metric = "ROC")
test_class_pred <- predict(test_class_cv_model, testing[, -ncol(testing)])
test_class_prob <- predict(test_class_cv_model, testing[, -ncol(testing)], type = "prob")
set.seed(849)
test_class_rand <- train(trainX, trainY,
method = "awnb",
trControl = cctrlR,
tuneLength = 4)
set.seed(849)
test_class_loo_model <- train(trainX, trainY,
method = "awnb",
trControl = cctrl2,
metric = "ROC")
set.seed(849)
test_class_none_model <- train(trainX, trainY,
method = "awnb",
trControl = cctrl3,
tuneGrid = test_class_cv_model$bestTune,
metric = "ROC")
test_class_none_pred <- predict(test_class_none_model, testing[, -ncol(testing)])
test_class_none_prob <- predict(test_class_none_model, testing[, -ncol(testing)], type = "prob")
test_levels <- levels(test_class_cv_model)
if(!all(levels(trainY) %in% test_levels))
cat("wrong levels")
#########################################################################
test_class_predictors1 <- predictors(test_class_cv_model)
#########################################################################
test_class_imp <- varImp(test_class_cv_model)
#########################################################################
tests <- grep("test_", ls(), fixed = TRUE, value = TRUE)
sInfo <- sessionInfo()
timestamp_end <- Sys.time()
save(list = c(tests, "sInfo", "timestamp", "timestamp_end"),
file = file.path(getwd(), paste(model, ".RData", sep = "")))
if(!interactive())
q("no")
|
## Coursera Exploratory Data Analysis Project 1
setwd("~/a/highEd/dataScience_coursera/ExploreData/data")
# read the part of the file that includes the header record plus
# all the data for 2007-02-1 and 2007-02-01
hpc=read.table("household_power_consumption.txt", sep=";", header=TRUE,
nrows=69518)
hpc$Date<-as.Date(hpc$Date, format="%d/%m/%Y")
hpc<-hpc[hpc$Date >="2007-02-01" & hpc$Date<="2007-02-02",]
hpc$Global_active_power<-as.character(hpc$Global_active_power)
hpc$Global_active_power<-as.numeric(hpc$Global_active_power)
png("plot1.png", width=480, height=480, res=120)
with(hpc, hist(Global_active_power, main="Global Active Power",
xlab="Global Active Power (kilowatts)", col="red"))
dev.off()
# hist(hpc$Global_active_power, main="Global Active Power",
# xlab="Global Active Power (kilowatts)", col="red")
# with(hpc, hist(Global_active_power, main="Global Active Power",
# xlab="Global Active Power (kilowatts)", col="red"))
#
# pdf(file="plt.pdf")
# with(hpc, hist(Global_active_power, main="Global Active Power",
# xlab="Global Active Power (kilowatts)", col="red"))
#
# dev.off()
|
/plot1.R
|
no_license
|
pwvirgo/ExData_Plotting1
|
R
| false
| false
| 1,129
|
r
|
## Coursera Exploratory Data Analysis Project 1
setwd("~/a/highEd/dataScience_coursera/ExploreData/data")
# read the part of the file that includes the header record plus
# all the data for 2007-02-1 and 2007-02-01
hpc=read.table("household_power_consumption.txt", sep=";", header=TRUE,
nrows=69518)
hpc$Date<-as.Date(hpc$Date, format="%d/%m/%Y")
hpc<-hpc[hpc$Date >="2007-02-01" & hpc$Date<="2007-02-02",]
hpc$Global_active_power<-as.character(hpc$Global_active_power)
hpc$Global_active_power<-as.numeric(hpc$Global_active_power)
png("plot1.png", width=480, height=480, res=120)
with(hpc, hist(Global_active_power, main="Global Active Power",
xlab="Global Active Power (kilowatts)", col="red"))
dev.off()
# hist(hpc$Global_active_power, main="Global Active Power",
# xlab="Global Active Power (kilowatts)", col="red")
# with(hpc, hist(Global_active_power, main="Global Active Power",
# xlab="Global Active Power (kilowatts)", col="red"))
#
# pdf(file="plt.pdf")
# with(hpc, hist(Global_active_power, main="Global Active Power",
# xlab="Global Active Power (kilowatts)", col="red"))
#
# dev.off()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/clusters.R
\name{dd_cluster_stations}
\alias{dd_cluster_stations}
\title{dd_cluster_stations}
\usage{
dd_cluster_stations(cl, city, stns, min_size = 3)
}
\arguments{
\item{cl}{Vector of cluster numbers obtained from \link{dd_cov_clusters}}
\item{city}{City for which clusters were obtained}
\item{stns}{A \pkg{tibble} of station data, including \code{stn_id},
\code{longitude}, and \code{latitude}, typically obtained from
\code{bikedata::bike_stations(city)}.}
\item{min_size}{Lower threshold above which to plot clusters}
}
\value{
A \pkg{tibble} of (\code{stn_id}, \code{longitude}, \code{latitude},
and \code{cl}), where the latter denotes the cluster number.
}
\description{
Convert the results of \link{dd_cov_clusters} to a \pkg{tibble} including
station IDs, longitudes, and latitudes.
}
|
/man/dd_cluster_stations.Rd
|
no_license
|
mpadge/distdecay
|
R
| false
| true
| 877
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/clusters.R
\name{dd_cluster_stations}
\alias{dd_cluster_stations}
\title{dd_cluster_stations}
\usage{
dd_cluster_stations(cl, city, stns, min_size = 3)
}
\arguments{
\item{cl}{Vector of cluster numbers obtained from \link{dd_cov_clusters}}
\item{city}{City for which clusters were obtained}
\item{stns}{A \pkg{tibble} of station data, including \code{stn_id},
\code{longitude}, and \code{latitude}, typically obtained from
\code{bikedata::bike_stations(city)}.}
\item{min_size}{Lower threshold above which to plot clusters}
}
\value{
A \pkg{tibble} of (\code{stn_id}, \code{longitude}, \code{latitude},
and \code{cl}), where the latter denotes the cluster number.
}
\description{
Convert the results of \link{dd_cov_clusters} to a \pkg{tibble} including
station IDs, longitudes, and latitudes.
}
|
# initialize app
# remotes::install_github("ewenme/ghibli")
library(tidyverse)
landings <- read.csv(here::here("05_ggplotly","data","nmfslandings.csv"))
crabs <- landings %>%
filter(Confidentiality == "Public") %>%
#filter(str_detect(NMFS.Name, "CRAB")) %>%
mutate_at(c("Pounds","Dollars"), parse_number)
|
/05_ggplotly/00_initializeapp.R
|
no_license
|
alopp18/shinyoverview
|
R
| false
| false
| 316
|
r
|
# initialize app
# remotes::install_github("ewenme/ghibli")
library(tidyverse)
landings <- read.csv(here::here("05_ggplotly","data","nmfslandings.csv"))
crabs <- landings %>%
filter(Confidentiality == "Public") %>%
#filter(str_detect(NMFS.Name, "CRAB")) %>%
mutate_at(c("Pounds","Dollars"), parse_number)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.