markdown
stringlengths
0
37k
code
stringlengths
1
33.3k
path
stringlengths
8
215
repo_name
stringlengths
6
77
license
stringclasses
15 values
$$ s_{new} = \begin{cases} (t+1,h-1,i) & \text{if } x = 0 \ (t+1,h+1,i) & \text{if } x = 1 \ (t+1,h+1.5,i) & \text{if } x = 2\end{cases} $$
assert(hjb((settings['T'],settings['H_init']-1)) == 10000) assert(hjb((settings['T'],settings['H_init'])) == 0) assert(hjb((settings['T']-1,settings['H_min']-1)) == 10000) assert(hjb((settings['T']-1,settings['H_max']+1)) == 10000)
Dynamic programming class.ipynb
icfly2/hjb_solvers
gpl-3.0
state is given by $s = (t,h,i)$ $$ v(s) = \min_x(cost(x,s) + \sum_j p_{ij} v(new state(x,s))) $$
def make_schedule(settings): sched = np.ones(settings['T'])*np.nan cost_calc= 0 elev = np.ones(settings['T']+1)*np.nan s = settings['Initial state'] t = 0 while t < settings['T']: sched[t] = settings['cache'][s][1] print sched[t] cost_calc += settings['Cost eq.'](sched[t],s) elev[t] = s[1] s = settings['State eq.'](sched[t],s) t += 1 elev[settings['T']] = s[1] return cost_calc, sched, elev def make_schedule2(settings): sched_stack = [] cost_summary = [] string_stack = [] elev = np.ones(settings['T']+1)*np.nan for ij in [0,1,2]: cost_calc = 0 string_stack.insert(i,[]) s = settings['Initial state'] t = 0 #string_stack[ij].insert(0,[]) #string_stack[ij].insert(0,'{0:2} {1} {2}'.format(t,settings['cache'][s][1], s[1:])) string_stack[ij].insert(0,'{0}'.format(s)) while t < settings['T']: state = tuple(sk if sk in s[:-1] else ij for sk in s ) print s, s[:-1], state x = settings['cache'][state][1] print x cost_calc += settings['Cost eq.'](x,s) elev[t] = s[1] s = settings['State eq.'](x,s) t += 1 string_stack[ij].insert(t,'{0} {1} {2}'.format(x,s[1:],ij) ) elev[settings['T']] = s[1] return string_stack, cost_calc make_schedule2(settings) settings['cache'] def stoch_hjb(s): global settings if settings['cache'].has_key(s): return settings['cache'][s][0] if s[0] == settings['T'] and s[1] < settings['H_init']: return 10000 elif s[0] == settings['T'] and s[1] >= settings['H_init']: return 0 elif s[1] < settings['H_min'] or s[1] > settings['H_max']: return 10000 else: p=[] for x in settings['x_set']: future = sum(stoch_hjb(settings['State eq.'](x,(s[0],s[1],i))) *settings['P'][s[2]][i] for i in [0,1,2]) p.append(settings['Cost eq.'](x,s) + future) m = min(p) for x in settings['x_set']: if m == p[x]: pp = x settings['cache'][s] = [m, pp] return m A = [] for i in [0,1,2]: A.insert(i,[]) print A for t in range(5): A[i].insert(t,i*t**2) A cost_calc, sched, elev = make_schedule(settings) print sched print elev print cost_calc settings['cache'][settings['Initial state']] cost_calc, sched, elev = make_schedule(settings) print sched print elev print cost_calc dic = {'blub': simple_cost } dic['blub'] dic['blub'](1,(0,1)) len([5,5,5,5,5,8,8,8,8,8,12,12,12,12,12,50,50,50,50,20,20,6,5,5]) np.random.randint(50,size=24) x = [100,7,90,787] x_lim_min = [0,10,0,0] x_lim_max = [100,10,100,1000] for c,i in enumerate(x): if i < x_lim_min[c] or i > x_lim_max[c]: print 1000
Dynamic programming class.ipynb
icfly2/hjb_solvers
gpl-3.0
Let Jupyter know that you're gonna be charting inline (Don't worry if you get a warning about building a font library.)
%matplotlib inline
completed/07. pandas? pandas! (Part 3).ipynb
ireapps/cfj-2017
mit
Read in MLB data
# create a data frame df = pd.read_csv('data/mlb.csv') # use head to check it out df.head()
completed/07. pandas? pandas! (Part 3).ipynb
ireapps/cfj-2017
mit
Prep data for charting Let's chart total team payroll, most to least. Let's repeat the grouping we did before.
# group by team, aggregate on sum grouped_by_team = df[['TEAM', 'SALARY']].groupby('TEAM') \ .sum() \ .reset_index() \ .set_index('TEAM') \ .sort_values('SALARY', ascending=False) # get top 10 top_10 = grouped_by_team.head(10) top_10
completed/07. pandas? pandas! (Part 3).ipynb
ireapps/cfj-2017
mit
Make a horizonal bar chart
# make a horizontal bar chart # set the figure size bar_chart = top_10.plot.barh(figsize=(14, 6)) # sort the bars top to bottom bar_chart.invert_yaxis() # set the title bar_chart.set_title('Top 10 opening day MLB payrolls, 2017') # kill the legend bar_chart.legend_.remove() # kill y axis label bar_chart.set_ylabel('') # define a function to format x axis ticks # otherwise they'd all run together (100000000) # via https://stackoverflow.com/a/46454637 def millions(num, pos, m=1000000): if num % m == 0: num = int(num/m) else: num = float(num/m) return '${}M'.format(num) # format the x axis ticks using the function we just defined bar_chart.xaxis.set_major_formatter(FuncFormatter(millions))
completed/07. pandas? pandas! (Part 3).ipynb
ireapps/cfj-2017
mit
Calculating scale heights for temperature and air density Here is equation 1 of the hydrostatic balance notes $$\frac{ 1}{\overline{H_p}} = \overline{ \left ( \frac{1 }{H} \right )} = \frac{\int_{0 }^{z}!\frac{1}{H} dz^\prime }{z-0} $$ where $$H=R_d T/g$$ and here is the Python code to do that integral:
g=9.8 #don't worry about g(z) for this exercise Rd=287. #kg/m^3 def calcScaleHeight(T,p,z): """ Calculate the pressure scale height H_p Parameters ---------- T: vector (float) temperature (K) p: vector (float) of len(T) pressure (pa) z: vector (float) of len(T height (m) Returns ------- Hbar: vector (float) of len(T) pressure scale height (m) """ dz=np.diff(z) TLayer=(T[1:] + T[0:-1])/2. oneOverH=g/(Rd*TLayer) Zthick=z[-1] - z[0] oneOverHbar=np.sum(oneOverH*dz)/Zthick Hbar = 1/oneOverHbar return Hbar
notebooks/hydrostatic.ipynb
a301-teaching/a301_code
mit
Similarly, equation (5) of the hydrostatic balance notes is: $$\frac{d\rho }{\rho} = - \left ( \frac{1 }{H} + \frac{1 }{T} \frac{dT }{dz} \right ) dz \equiv - \frac{dz }{H_\rho} $$ Which leads to $$\frac{ 1}{\overline{H_\rho}} = \frac{\int_{0 }^{z}!\left [ \frac{1}{H} + \frac{1 }{T} \frac{dT }{dz} \right ] dz^\prime }{z-0} $$ and the following python function:
def calcDensHeight(T,p,z): """ Calculate the density scale height H_rho Parameters ---------- T: vector (float) temperature (K) p: vector (float) of len(T) pressure (pa) z: vector (float) of len(T height (m) Returns ------- Hbar: vector (float) of len(T) density scale height (m) """ dz=np.diff(z) TLayer=(T[1:] + T[0:-1])/2. dTdz=np.diff(T)/np.diff(z) oneOverH=g/(Rd*TLayer) + (1/TLayer*dTdz) Zthick=z[-1] - z[0] oneOverHbar=np.sum(oneOverH*dz)/Zthick Hbar = 1/oneOverHbar return Hbar
notebooks/hydrostatic.ipynb
a301-teaching/a301_code
mit
How do $\overline{H_p}$ and $\overline{H_\rho}$ compare for the tropical sounding?
sounding='tropics' # # grab the dataframe and get the sounding columns # df=sound_dict[sounding] z=df['z'].values Temp=df['temp'].values press=df['press'].values # # limit calculation to bottom 10 km # hit=z<10000. zL,pressL,TempL=(z[hit],press[hit],Temp[hit]) rhoL=pressL/(Rd*TempL) Hbar= calcScaleHeight(TempL,pressL,zL) Hrho= calcDensHeight(TempL,pressL,zL) print("pressure scale height for the {} sounding is {:5.2f} km".format(sounding,Hbar*1.e-3)) print("density scale height for the {} is {:5.2f} km".format(sounding,Hrho*1.e-3))
notebooks/hydrostatic.ipynb
a301-teaching/a301_code
mit
How well do these average values represent the pressure and density profiles?
theFig,theAx=plt.subplots(1,1) theAx.semilogy(Temp,press/100.) # # need to flip the y axis since pressure decreases with height # theAx.invert_yaxis() tickvals=[1000,800, 600, 400, 200, 100, 50,1] theAx.set_yticks(tickvals) majorFormatter = ticks.FormatStrFormatter('%d') theAx.yaxis.set_major_formatter(majorFormatter) theAx.set_yticklabels(tickvals) theAx.set_ylim([1000.,50.]) theAx.set_title('{} temperature profile'.format(sounding)) theAx.set_xlabel('Temperature (K)') _=theAx.set_ylabel('pressure (hPa)')
notebooks/hydrostatic.ipynb
a301-teaching/a301_code
mit
Now check the hydrostatic approximation by plotting the pressure column against $$p(z) = p_0 \exp \left (-z/\overline{H_p} \right )$$ vs. the actual sounding p(T):
fig,theAx=plt.subplots(1,1) hydroPress=pressL[0]*np.exp(-zL/Hbar) theAx.plot(pressL/100.,zL/1000.,label='sounding') theAx.plot(hydroPress/100.,zL/1000.,label='hydrostat approx') theAx.set_title('height vs. pressure for tropics') theAx.set_xlabel('pressure (hPa)') theAx.set_ylabel('height (km)') theAx.set_xlim([500,1000]) theAx.set_ylim([0,5]) tickVals=[500, 600, 700, 800, 900, 1000] theAx.set_xticks(tickVals) theAx.set_xticklabels(tickVals) _=theAx.legend(loc='best')
notebooks/hydrostatic.ipynb
a301-teaching/a301_code
mit
Again plot the hydrostatic approximation $$\rho(z) = \rho_0 \exp \left (-z/\overline{H_\rho} \right )$$ vs. the actual sounding $\rho(z)$:
fig,theAx=plt.subplots(1,1) hydroDens=rhoL[0]*np.exp(-zL/Hrho) theAx.plot(rhoL,zL/1000.,label='sounding') theAx.plot(hydroDens,zL/1000.,label='hydrostat approx') theAx.set_title('height vs. density for the tropics') theAx.set_xlabel('density ($kg\,m^{-3}$)') theAx.set_ylabel('height (km)') theAx.set_ylim([0,5]) _=theAx.legend(loc='best')
notebooks/hydrostatic.ipynb
a301-teaching/a301_code
mit
Q: What is the variance of $\mathscr{P}_{HV}$?
psi.dag()*Phv*Phv*psi 1- (-0.6)**2
Lab 4 - Measurements.ipynb
amcdawes/QMlabs
mit
Example: Use the random function to generate a mock data set for the state $|\psi\rangle$. random.choice([1,-1],size=10,p=[0.2,0.8]) gives a list of 10 numbers, either 1 or -1 with the associated probability p:
data = random.choice([1, -1],size=1000000,p=[0.2,0.8])
Lab 4 - Measurements.ipynb
amcdawes/QMlabs
mit
Q: Verify the mean and variance of the mock data set match your QM predictions. How big does the set need to be for you to get ±5% agreement?
data.mean() data.var()
Lab 4 - Measurements.ipynb
amcdawes/QMlabs
mit
These are what we want from the new pipeline. Extracting spectra from the mzml, mzxml files Grouping features and exporting in a format that MS2LDA can deal with Running MS2LDA Visualising with possible variations on step (1), (2) and maybe even (3) too. Example Step 1 Below we define an example class to load existing CSV files that have been created before from an mzxml/mzml (method 3) pair. However you can imagine that here we provide different implementations to load mztab, MSP-style files, whatever.
class ExtractSpectra(lg.Task): datadir = lg.Parameter() prefix = lg.Parameter() def run(self): # we could actually extract the spectra from mzxml, mzml files here print 'Processing %s and %s' % (datadir, prefix) def output(self): out_dict = { 'ms1': lg.LocalTarget(self.datadir + self.prefix + '_ms1.csv'), 'ms2': lg.LocalTarget(self.datadir + self.prefix + '_ms2.csv') } return out_dict
notebooks/experimental_pipeline.ipynb
sdrogers/lda
gpl-3.0
Example Step 2 Similarly here we define a class to take the output of the ExtractSpectra class above (the dependency is defined in the requires method below), performs the grouping by detecting gaps along the groups (defined in the run method) and produces the output to a pickled file (defined in the output method). It would be easy to provide different implementations here based on other methods of grouping as well. I just copied and pasted .. the code below could be shorter.
class GroupFeatures(lg.Task): scaling_factor = lg.IntParameter(default=1000) fragment_grouping_tol = lg.IntParameter(default=7) loss_grouping_tol = lg.IntParameter(default=7) loss_threshold_min_count = lg.IntParameter(default=5) loss_threshold_max_val = lg.IntParameter(default=200) loss_threshold_min_val = lg.IntParameter(default=0) datadir = lg.Parameter() prefixes = lg.ListParameter() def requires(self): return [ExtractSpectra(datadir=datadir, prefix=prefix) for prefix in self.prefixes] def run(self): # input_set is a list of tuples of (ms1, ms2) input_set = [] for out_dict in self.input(): ms1 = out_dict['ms1'].path ms2 = out_dict['ms2'].path items = (ms1, ms2) input_set.append(items) # performs the grouping here extractor = SparseFeatureExtractor(input_set, self.fragment_grouping_tol, self.loss_grouping_tol, self.loss_threshold_min_count, self.loss_threshold_max_val, self.loss_threshold_min_val, input_type='filename') fragment_q = extractor.make_fragment_queue() fragment_groups = extractor.group_features(fragment_q, extractor.fragment_grouping_tol) loss_q = extractor.make_loss_queue() loss_groups = extractor.group_features(loss_q, extractor.loss_grouping_tol, check_threshold=True) extractor.create_counts(fragment_groups, loss_groups, self.scaling_factor) mat, vocab, ms1, ms2 = extractor.get_entry(0) global_word_index = {} for i,v in enumerate(vocab): global_word_index[v] = i corpus_dictionary = {} for f in range(extractor.F): print "Processing file {}".format(f) corpus = {} mat, vocab, ms1, ms2 = extractor.get_entry(f) n_docs,n_words = mat.shape print n_docs,n_words d_pos = 0 for d in ms1.iterrows(): doc_name = "{}_{}".format(d[1]['mz'],d[1]['rt']) corpus[doc_name] = {} for word_index,count in zip(mat[d_pos,:].rows[0],mat[d_pos,:].data[0]): if count > 0: corpus[doc_name][vocab[word_index]] = count d_pos += 1 # Added by Simon name = input_set[f][0].split('/')[-1].split('ms1')[0][:-1] corpus_dictionary[name] = corpus output_dict = {} output_dict['global_word_index'] = global_word_index output_dict['corpus_dictionary'] = corpus_dictionary with self.output().open('w') as f: pickle.dump(output_dict, f) def output(self): return lg.LocalTarget('output_dict.p')
notebooks/experimental_pipeline.ipynb
sdrogers/lda
gpl-3.0
Example Step 3 Finally here we define a RunLDA task that depends on the output of the grouping class above.
class RunLDA(lg.Task): n_its = lg.IntParameter(default=10) K = lg.IntParameter(default=300) alpha = lg.FloatParameter(default=1) eta = lg.FloatParameter(default=0.1) update_alpha = lg.BoolParameter(default=True) datadir = lg.Parameter() prefixes = lg.ListParameter() def requires(self): return GroupFeatures(datadir=self.datadir, prefixes=self.prefixes) def run(self): with self.input().open('r') as f: output_dict = pickle.load(f) global_word_index = output_dict['global_word_index'] corpus_dictionary = output_dict['corpus_dictionary'] mf_lda = MultiFileVariationalLDA(corpus_dictionary, word_index=global_word_index, K=self.K, alpha=self.alpha, eta=self.eta, update_alpha=self.update_alpha) mf_lda.run_vb(parallel=False, n_its=self.n_its, initialise=True)
notebooks/experimental_pipeline.ipynb
sdrogers/lda
gpl-3.0
Run the pipeline Set up the initial parameters
datadir = '/Users/joewandy/Dropbox/Meta_clustering/MS2LDA/large_study/Urine_mzXML_large_study/method_1/POS/' prefixes = [ 'Urine_StrokeDrugs_02_T10_POS', 'Urine_StrokeDrugs_03_T10_POS', 'Urine_StrokeDrugs_08_T10_POS', 'Urine_StrokeDrugs_09_T10_POS', ] prefixes_json = json.dumps(prefixes)
notebooks/experimental_pipeline.ipynb
sdrogers/lda
gpl-3.0
And run the pipeline
lg.run(['RunLDA', '--workers', '1', '--local-scheduler', '--datadir', datadir, '--prefixes', prefixes_json])
notebooks/experimental_pipeline.ipynb
sdrogers/lda
gpl-3.0
We create 'bins' for the columns Age and Fare to help with later plots.
df['Ages'] = df['Age'].map(lambda x: int(x/10)*10) df['Fares'] = df['Fare'].map(lambda x: int(x/10)*10) %reload_ext rpy2.ipython %Rpush df %%R library(ggplot2)
scikit/titanic/notebooks/Appendix B - Visualisation.ipynb
obulpathi/datascience
apache-2.0
First, we review the number of passengers by the column Survived, which indicates whether or not they survived.
%%R -w 400 qplot(factor(Survived), data=df, geom="bar")
scikit/titanic/notebooks/Appendix B - Visualisation.ipynb
obulpathi/datascience
apache-2.0
We now view a breakdown of the data by the column Sex, and rescale so that the breakdown appears as a percentage.
%%R -w 400 qplot(factor(Survived), data=df, fill=factor(Sex), geom="bar", position="fill") #qplot(factor(Survived), data=df, fill=factor(Pclass), geom="bar", position="fill") #qplot(factor(Survived), data=df, fill=factor(Ages), geom="bar", position="fill")
scikit/titanic/notebooks/Appendix B - Visualisation.ipynb
obulpathi/datascience
apache-2.0
Next, we make a scatter plot between the columns Age and Survived. Note that the column Survived has categorical values, and is represented as factor(Survived).
%%R -w 800 qplot(Age, factor(Survived), data=df)
scikit/titanic/notebooks/Appendix B - Visualisation.ipynb
obulpathi/datascience
apache-2.0
We improve our plot by adjusting the shade of each point, such that the point is lighter if the passenger is older.
%%R -w 800 qplot(Age, factor(Survived), data=df, color=Age, size=10, alpha=0.5) #qplot(Age, factor(Survived), data=df, color=Age, size=Age, alpha=0.5)
scikit/titanic/notebooks/Appendix B - Visualisation.ipynb
obulpathi/datascience
apache-2.0
We plot the passengers by their passenger class.
%%R -w 600 qplot(factor(Pclass), data=df, fill=factor(Pclass), geom="bar") + coord_flip()
scikit/titanic/notebooks/Appendix B - Visualisation.ipynb
obulpathi/datascience
apache-2.0
We improve this plot by breaking down each bar by their departure point.
%%R -w 600 qplot(factor(Pclass), data=df, fill=Embarked, geom="bar") + coord_flip()
scikit/titanic/notebooks/Appendix B - Visualisation.ipynb
obulpathi/datascience
apache-2.0
Finally, we review the column Age in a histogram.
%%R -w 800 qplot(Age, data=df, geom="histogram")
scikit/titanic/notebooks/Appendix B - Visualisation.ipynb
obulpathi/datascience
apache-2.0
We then review the fare distribution by each age range.
%%R -w 800 qplot(Age, data=df, fill=factor(Fares), geom="histogram", binwidth=5)
scikit/titanic/notebooks/Appendix B - Visualisation.ipynb
obulpathi/datascience
apache-2.0
Funkci plot můžeme předat další parametr, který určuje: styl čáry: - -- -. : barvu: b g r c m y k w styl vykreslení bodu: . , o v ^ &lt; &gt; s p h H * d D _ | + x chci vědět víc
plot(x,y,'g<') plot(x,y,'b+:') plot(x,y,'r*', markersize=15) plot(x,y,'y--')
Matplotlib--zaklady_pro_tvorbu_grafu.ipynb
tlapicka/IPythonNotebooks
gpl-2.0
Příklad s exponenciální funkcí Nejprve vytvoříme hodnoty na hodnoty pro osu x:...
x=linspace(-10,2,200)
Matplotlib--zaklady_pro_tvorbu_grafu.ipynb
tlapicka/IPythonNotebooks
gpl-2.0
... a poté pro osu y:...
y=exp(x)
Matplotlib--zaklady_pro_tvorbu_grafu.ipynb
tlapicka/IPythonNotebooks
gpl-2.0
...závyslot si můžeme vykreslit:...
plot(x,y)
Matplotlib--zaklady_pro_tvorbu_grafu.ipynb
tlapicka/IPythonNotebooks
gpl-2.0
Můžeme si zobrazit mřížku, popsat osy a upravit rozsah os:...
plot(x,y) grid(True) xlabel('x') ylabel('y') xlim( [-11,3] ) ylim( [-1,10] )
Matplotlib--zaklady_pro_tvorbu_grafu.ipynb
tlapicka/IPythonNotebooks
gpl-2.0
Příklad s funkcí sinus, f=50 Hz Nejpve si vytvoříme časovou osu:..
f=50 t=linspace(0,0.08,200)
Matplotlib--zaklady_pro_tvorbu_grafu.ipynb
tlapicka/IPythonNotebooks
gpl-2.0
... vypočítáme hodnoty napětí:..
u=1.2*cos(2*pi*f*t)
Matplotlib--zaklady_pro_tvorbu_grafu.ipynb
tlapicka/IPythonNotebooks
gpl-2.0
... a proudu:
i=0.7*sin(2*pi*f*t-pi/4)
Matplotlib--zaklady_pro_tvorbu_grafu.ipynb
tlapicka/IPythonNotebooks
gpl-2.0
... a vyneseme do grafu:
plot(t,u,t,i) grid(True)
Matplotlib--zaklady_pro_tvorbu_grafu.ipynb
tlapicka/IPythonNotebooks
gpl-2.0
Vypočítáme výkon:..
p=u*i
Matplotlib--zaklady_pro_tvorbu_grafu.ipynb
tlapicka/IPythonNotebooks
gpl-2.0
Vyneseme vše do grafu, popíšeme osy, uděláme nadpis a legendu:
figure(figsize=(10,7)) plot(t,u,':',label=u'elektrické napětí') plot(t,i,'--',label=u'elektrický proud') plot(t,p,label=u'elektrický výkon') xlim([0,0.08]) ylim([-1.3,1.8]) title(u'Časový průběh napětí proudu a výkonu') minorticks_on() grid(True) xlabel('t [s]') ylabel('u [V], i [A], p [W]') legend(loc='upper right')
Matplotlib--zaklady_pro_tvorbu_grafu.ipynb
tlapicka/IPythonNotebooks
gpl-2.0
Data We are going to use the Pascal dataset for object detection. There is a version from 2007 and a bigger version from 2012. We'll use the 2007 version here.
path = untar_data(URLs.PASCAL_2007)
nbs/dl2/pascal.ipynb
fastai/course-v3
apache-2.0
The annotations for the images are stored in json files that give the bounding boxes for each class.
import json annots = json.load(open(path/'train.json')) annots.keys() annots['annotations'][0]
nbs/dl2/pascal.ipynb
fastai/course-v3
apache-2.0
This first annotation is a bounding box on the image with id 12, and the corresponding object is the category with id 7. We can read the correspondance in the 'images' and the 'categories' keys.
annots['categories']
nbs/dl2/pascal.ipynb
fastai/course-v3
apache-2.0
There is a convenience method in fastai to extract all the annotations and map them with the right images/categories directly, as long as they are in the format we just saw (called the COCO format).
train_images, train_lbl_bbox = get_annotations(path/'train.json') val_images, val_lbl_bbox = get_annotations(path/'valid.json') #tst_images, tst_lbl_bbox = get_annotations(path/'test.json')
nbs/dl2/pascal.ipynb
fastai/course-v3
apache-2.0
Here we will directly find the same image as before at the beginning of the training set, with the corresponding bounding box and category.
train_images[0], train_lbl_bbox[0]
nbs/dl2/pascal.ipynb
fastai/course-v3
apache-2.0
To see it, we open the image properly and we create an ImageBBox object from the list of bounding boxes. This will allow us to apply data augmentation to our bounding box. To create an ImageBBox, we need to give it the height and the width of the original picture, the list of bounding boxes, the list of category ids and the classes list (to map an id to a class). Here we don't have a class dictionary available (that will be done automatically behind the scenes with the data block API), so we just pass id 0 and classes=['car'].
img = open_image(path/'train'/train_images[0]) bbox = ImageBBox.create(*img.size, train_lbl_bbox[0][0], [0], classes=['car']) img.show(figsize=(6,4), y=bbox)
nbs/dl2/pascal.ipynb
fastai/course-v3
apache-2.0
This works with one or several bounding boxes:
train_images[1], train_lbl_bbox[1] img = open_image(path/'train'/train_images[1]) bbox = ImageBBox.create(*img.size, train_lbl_bbox[1][0], [0, 1], classes=['person', 'horse']) img.show(figsize=(6,4), y=bbox)
nbs/dl2/pascal.ipynb
fastai/course-v3
apache-2.0
And if we apply a transform to our image and the ImageBBox object, they stay aligned:
img = img.rotate(-10) bbox = bbox.rotate(-10) img.show(figsize=(6,4), y=bbox)
nbs/dl2/pascal.ipynb
fastai/course-v3
apache-2.0
We group all the image filenames and annotations together, to use the data block API to load the dataset in a DataBunch.
images, lbl_bbox = train_images+val_images,train_lbl_bbox+val_lbl_bbox img2bbox = dict(zip(images, lbl_bbox)) get_y_func = lambda o:img2bbox[o.name] def get_data(bs, size): src = ObjectItemList.from_folder(path/'train') src = src.split_by_files(val_images) src = src.label_from_func(get_y_func) src = src.transform(get_transforms(), size=size, tfm_y=True) return src.databunch(path=path, bs=bs, collate_fn=bb_pad_collate) data = get_data(64,128) data.show_batch(rows=3)
nbs/dl2/pascal.ipynb
fastai/course-v3
apache-2.0
Model The architecture we will use is a RetinaNet, which is based on a Feature Pyramid Network. This is a bit like a Unet in the sense we have a branch where the image is progressively reduced then another one where we upsample it again, and there are lateral connections, but we will use the feature maps produced at each level for our final predictions. Specifically, if we start with an image of size (256,256), the traditional resnet has intermediate features maps of sizes: - C1 (128, 128) - C2 (64, 64) - C3 (32, 32) - C4 (16, 16) - C5 (8, 8) To which the authors add two other features maps C6 and C7 of sizes (4,4) and (2,2) by using stride-2 convolutions. (Note that the model requires an image size of 128 at the minimum because of this.) Then we have P7 = C7 and we go down from P7 to P2 by upsampling the result of the previous P-layer and adding a lateral connection. The idea is that the last feature map P7 will be responsible to detect big objects, while one like P3 will be responsible to detect smaller objects. Each P-something feature map then goes through two subnet of four convolutional layers (with the same weights for all the feature maps), one that will be responsible for finding the category of the object and the other for drawing the bounding box. Each location in the feature map is assigned a given number of anchors (see below) so the classifier ends up with n_anchors * n_classes channels and the bounding box regressor with n_anchors * 4 channels.
#Grab the convenience functions that helps us buil the Unet from fastai.vision.models.unet import _get_sfs_idxs, model_sizes, hook_outputs class LateralUpsampleMerge(nn.Module): "Merge the features coming from the downsample path (in `hook`) with the upsample path." def __init__(self, ch, ch_lat, hook): super().__init__() self.hook = hook self.conv_lat = conv2d(ch_lat, ch, ks=1, bias=True) def forward(self, x): return self.conv_lat(self.hook.stored) + F.interpolate(x, self.hook.stored.shape[-2:], mode='nearest') class RetinaNet(nn.Module): "Implements RetinaNet from https://arxiv.org/abs/1708.02002" def __init__(self, encoder:nn.Module, n_classes, final_bias=0., chs=256, n_anchors=9, flatten=True): super().__init__() self.n_classes,self.flatten = n_classes,flatten imsize = (256,256) sfs_szs = model_sizes(encoder, size=imsize) sfs_idxs = list(reversed(_get_sfs_idxs(sfs_szs))) self.sfs = hook_outputs([encoder[i] for i in sfs_idxs]) self.encoder = encoder self.c5top5 = conv2d(sfs_szs[-1][1], chs, ks=1, bias=True) self.c5top6 = conv2d(sfs_szs[-1][1], chs, stride=2, bias=True) self.p6top7 = nn.Sequential(nn.ReLU(), conv2d(chs, chs, stride=2, bias=True)) self.merges = nn.ModuleList([LateralUpsampleMerge(chs, sfs_szs[idx][1], hook) for idx,hook in zip(sfs_idxs[-2:-4:-1], self.sfs[-2:-4:-1])]) self.smoothers = nn.ModuleList([conv2d(chs, chs, 3, bias=True) for _ in range(3)]) self.classifier = self._head_subnet(n_classes, n_anchors, final_bias, chs=chs) self.box_regressor = self._head_subnet(4, n_anchors, 0., chs=chs) def _head_subnet(self, n_classes, n_anchors, final_bias=0., n_conv=4, chs=256): "Helper function to create one of the subnet for regression/classification." layers = [conv_layer(chs, chs, bias=True, norm_type=None) for _ in range(n_conv)] layers += [conv2d(chs, n_classes * n_anchors, bias=True)] layers[-1].bias.data.zero_().add_(final_bias) layers[-1].weight.data.fill_(0) return nn.Sequential(*layers) def _apply_transpose(self, func, p_states, n_classes): #Final result of the classifier/regressor is bs * (k * n_anchors) * h * w #We make it bs * h * w * n_anchors * k then flatten in bs * -1 * k so we can contenate #all the results in bs * anchors * k (the non flatten version is there for debugging only) if not self.flatten: sizes = [[p.size(0), p.size(2), p.size(3)] for p in p_states] return [func(p).permute(0,2,3,1).view(*sz,-1,n_classes) for p,sz in zip(p_states,sizes)] else: return torch.cat([func(p).permute(0,2,3,1).contiguous().view(p.size(0),-1,n_classes) for p in p_states],1) def forward(self, x): c5 = self.encoder(x) p_states = [self.c5top5(c5.clone()), self.c5top6(c5)] p_states.append(self.p6top7(p_states[-1])) for merge in self.merges: p_states = [merge(p_states[0])] + p_states for i, smooth in enumerate(self.smoothers[:3]): p_states[i] = smooth(p_states[i]) return [self._apply_transpose(self.classifier, p_states, self.n_classes), self._apply_transpose(self.box_regressor, p_states, 4), [[p.size(2), p.size(3)] for p in p_states]] def __del__(self): if hasattr(self, "sfs"): self.sfs.remove()
nbs/dl2/pascal.ipynb
fastai/course-v3
apache-2.0
The model is a bit complex, but that's not the hardest part. It will spit out an absurdly high number of predictions: for the features P3 to P7 with an image size of 256, we have 32*32 + 16*16 + 8*8 + 4*4 +2*2 locations possible in one of the five feature maps, which gives 1,364 possible detections, multiplied by the number of anchors we choose to attribute to each location (9 below), which makes 12,276 possible hits. A lot of those aren't going to correspond to any object in the picture, and we need to somehow match all those predictions to either nothing or a given bounding box in the picture. "Encore" boxes If we look at the feature map of size 4*4, we have 16 locations numbered like below:
torch.arange(0,16).long().view(4,4)
nbs/dl2/pascal.ipynb
fastai/course-v3
apache-2.0
The most basic way to map one of these features with an actual area inside the image is to create the regular 4 by 4 grid. Our convention is that y is first (like in numpy or PyTorch), and that all coordinates are scaled from -1 to 1 (-1 being top/right, 1 being bottom/left).
def create_grid(size): "Create a grid of a given `size`." H, W = size if is_tuple(size) else (size,size) grid = FloatTensor(H, W, 2) linear_points = torch.linspace(-1+1/W, 1-1/W, W) if W > 1 else tensor([0.]) grid[:, :, 1] = torch.ger(torch.ones(H), linear_points).expand_as(grid[:, :, 0]) linear_points = torch.linspace(-1+1/H, 1-1/H, H) if H > 1 else tensor([0.]) grid[:, :, 0] = torch.ger(linear_points, torch.ones(W)).expand_as(grid[:, :, 1]) return grid.view(-1,2)
nbs/dl2/pascal.ipynb
fastai/course-v3
apache-2.0
Let's use a helper function to draw those anchors:
def show_anchors(ancs, size): _,ax = plt.subplots(1,1, figsize=(5,5)) ax.set_xticks(np.linspace(-1,1, size[1]+1)) ax.set_yticks(np.linspace(-1,1, size[0]+1)) ax.grid() ax.scatter(ancs[:,1], ancs[:,0]) #y is first ax.set_yticklabels([]) ax.set_xticklabels([]) ax.set_xlim(-1,1) ax.set_ylim(1,-1) #-1 is top, 1 is bottom for i, (x, y) in enumerate(zip(ancs[:, 1], ancs[:, 0])): ax.annotate(i, xy = (x,y)) size = (4,4) show_anchors(create_grid(size), size)
nbs/dl2/pascal.ipynb
fastai/course-v3
apache-2.0
In practice, we use different ratios and scales of that basic grid to build our anchors, because bounding boxes aren't always a perfect square inside a grid.
def create_anchors(sizes, ratios, scales, flatten=True): "Create anchor of `sizes`, `ratios` and `scales`." aspects = [[[s*math.sqrt(r), s*math.sqrt(1/r)] for s in scales] for r in ratios] aspects = torch.tensor(aspects).view(-1,2) anchors = [] for h,w in sizes: #4 here to have the anchors overlap. sized_aspects = 4 * (aspects * torch.tensor([2/h,2/w])).unsqueeze(0) base_grid = create_grid((h,w)).unsqueeze(1) n,a = base_grid.size(0),aspects.size(0) ancs = torch.cat([base_grid.expand(n,a,2), sized_aspects.expand(n,a,2)], 2) anchors.append(ancs.view(h,w,a,4)) return torch.cat([anc.view(-1,4) for anc in anchors],0) if flatten else anchors ratios = [1/2,1,2] scales = [1,2**(-1/3), 2**(-2/3)] #Paper used [1,2**(1/3), 2**(2/3)] but a bigger size (600) too, so the largest feature map gave anchors that cover less of the image. sizes = [(2**i,2**i) for i in range(5)] sizes.reverse() #Predictions come in the order of the smallest feature map to the biggest anchors = create_anchors(sizes, ratios, scales) anchors.size()
nbs/dl2/pascal.ipynb
fastai/course-v3
apache-2.0
That's a bit less than in our computation earlier, but this is because it's for the case of (128,128) images (sizes go from (1,1) to (32,32) instead of (2,2) to (64,64)).
import matplotlib.cm as cmx import matplotlib.colors as mcolors from cycler import cycler def get_cmap(N): color_norm = mcolors.Normalize(vmin=0, vmax=N-1) return cmx.ScalarMappable(norm=color_norm, cmap='Set3').to_rgba num_color = 12 cmap = get_cmap(num_color) color_list = [cmap(float(x)) for x in range(num_color)] def draw_outline(o, lw): o.set_path_effects([patheffects.Stroke( linewidth=lw, foreground='black'), patheffects.Normal()]) def draw_rect(ax, b, color='white'): patch = ax.add_patch(patches.Rectangle(b[:2], *b[-2:], fill=False, edgecolor=color, lw=2)) draw_outline(patch, 4) def draw_text(ax, xy, txt, sz=14, color='white'): text = ax.text(*xy, txt, verticalalignment='top', color=color, fontsize=sz, weight='bold') draw_outline(text, 1) def show_boxes(boxes): "Show the `boxes` (size by 4)" _, ax = plt.subplots(1,1, figsize=(5,5)) ax.set_xlim(-1,1) ax.set_ylim(1,-1) for i, bbox in enumerate(boxes): bb = bbox.numpy() rect = [bb[1]-bb[3]/2, bb[0]-bb[2]/2, bb[3], bb[2]] draw_rect(ax, rect, color=color_list[i%num_color]) draw_text(ax, [bb[1]-bb[3]/2,bb[0]-bb[2]/2], str(i), color=color_list[i%num_color])
nbs/dl2/pascal.ipynb
fastai/course-v3
apache-2.0
Here is an example of the 9 anchor boxes with different scales/ratios on one region of the image. Now imagine we have this at every location of each of the feature maps.
show_boxes(anchors[900:909])
nbs/dl2/pascal.ipynb
fastai/course-v3
apache-2.0
For each anchor, we have one class predicted by the classifier and 4 floats p_y,p_x,p_h,p_w predicted by the regressor. If the corresponding anchor as a center in anc_y, anc_x with dimensions anc_h, anc_w, the predicted bounding box has those characteristics: center = [p_y * anc_h + anc_y, p_x * anc_w + anc_x] height = anc_h * exp(p_h) width = anc_w * exp(p_w) The idea is that a prediction of (0,0,0,0) corresponds to the anchor itself. The next function converts the activations of the model in bounding boxes.
def activ_to_bbox(acts, anchors, flatten=True): "Extrapolate bounding boxes on anchors from the model activations." if flatten: acts.mul_(acts.new_tensor([[0.1, 0.1, 0.2, 0.2]])) #Can't remember where those scales come from, but they help regularize centers = anchors[...,2:] * acts[...,:2] + anchors[...,:2] sizes = anchors[...,2:] * torch.exp(acts[...,:2]) return torch.cat([centers, sizes], -1) else: return [activ_to_bbox(act,anc) for act,anc in zip(acts, anchors)] return res
nbs/dl2/pascal.ipynb
fastai/course-v3
apache-2.0
Here is an example with the 3 by 4 regular grid and random predictions.
size=(3,4) anchors = create_grid(size) anchors = torch.cat([anchors, torch.tensor([2/size[0],2/size[1]]).expand_as(anchors)], 1) activations = torch.randn(size[0]*size[1], 4) * 0.1 bboxes = activ_to_bbox(activations, anchors) show_boxes(bboxes)
nbs/dl2/pascal.ipynb
fastai/course-v3
apache-2.0
This helper function changes boxes in the format center/height/width to top/left/bottom/right.
def cthw2tlbr(boxes): "Convert center/size format `boxes` to top/left bottom/right corners." top_left = boxes[:,:2] - boxes[:,2:]/2 bot_right = boxes[:,:2] + boxes[:,2:]/2 return torch.cat([top_left, bot_right], 1)
nbs/dl2/pascal.ipynb
fastai/course-v3
apache-2.0
Now to decide which predicted bounding box will match a given ground truth object, we will compute the intersection over unions ratios between all the anchors and all the targets, then we will keep the ones that have an overlap greater than a given threshold (0.5).
def intersection(anchors, targets): "Compute the sizes of the intersections of `anchors` by `targets`." ancs, tgts = cthw2tlbr(anchors), cthw2tlbr(targets) a, t = ancs.size(0), tgts.size(0) ancs, tgts = ancs.unsqueeze(1).expand(a,t,4), tgts.unsqueeze(0).expand(a,t,4) top_left_i = torch.max(ancs[...,:2], tgts[...,:2]) bot_right_i = torch.min(ancs[...,2:], tgts[...,2:]) sizes = torch.clamp(bot_right_i - top_left_i, min=0) return sizes[...,0] * sizes[...,1]
nbs/dl2/pascal.ipynb
fastai/course-v3
apache-2.0
Let's see some results, if we have our 12 anchors from before...
show_boxes(anchors)
nbs/dl2/pascal.ipynb
fastai/course-v3
apache-2.0
... and those targets (0. is the whole image)
targets = torch.tensor([[0.,0.,2.,2.], [-0.5,-0.5,1.,1.], [1/3,0.5,0.5,0.5]]) show_boxes(targets)
nbs/dl2/pascal.ipynb
fastai/course-v3
apache-2.0
Then the intersections of each bboxes by each targets are:
intersection(anchors, targets) def IoU_values(anchors, targets): "Compute the IoU values of `anchors` by `targets`." inter = intersection(anchors, targets) anc_sz, tgt_sz = anchors[:,2] * anchors[:,3], targets[:,2] * targets[:,3] union = anc_sz.unsqueeze(1) + tgt_sz.unsqueeze(0) - inter return inter/(union+1e-8)
nbs/dl2/pascal.ipynb
fastai/course-v3
apache-2.0
And then the IoU values are.
IoU_values(anchors, targets)
nbs/dl2/pascal.ipynb
fastai/course-v3
apache-2.0
Then we match a anchor to targets with the following rules: - for each anchor we take the maximum overlap possible with any of the targets. - if that maximum overlap is less than 0.4, we match the anchor box to background, the classifier's target will be that class - if the maximum overlap is greater than 0.5, we match the anchor box to that ground truth object. The classifier's target will be the category of that target - if the maximum overlap is between 0.4 and 0.5, we ignore that anchor in our loss computation - optionally, we force-match for each ground truth object the anchor that has the maximum overlap with it (not sure it helps)
def match_anchors(anchors, targets, match_thr=0.5, bkg_thr=0.4): "Match `anchors` to targets. -1 is match to background, -2 is ignore." matches = anchors.new(anchors.size(0)).zero_().long() - 2 if targets.numel() == 0: return matches ious = IoU_values(anchors, targets) vals,idxs = torch.max(ious,1) matches[vals < bkg_thr] = -1 matches[vals > match_thr] = idxs[vals > match_thr] #Overwrite matches with each target getting the anchor that has the max IoU. #vals,idxs = torch.max(ious,0) #If idxs contains repetition, this doesn't bug and only the last is considered. #matches[idxs] = targets.new_tensor(list(range(targets.size(0)))).long() return matches
nbs/dl2/pascal.ipynb
fastai/course-v3
apache-2.0
In our previous example, no one had an overlap > 0.5, so unless we use the special rule commented out, there are no matches.
match_anchors(anchors, targets)
nbs/dl2/pascal.ipynb
fastai/course-v3
apache-2.0
With anchors very close to the targets.
size=(3,4) anchors = create_grid(size) anchors = torch.cat([anchors, torch.tensor([2/size[0],2/size[1]]).expand_as(anchors)], 1) activations = 0.1 * torch.randn(size[0]*size[1], 4) bboxes = activ_to_bbox(activations, anchors) match_anchors(anchors,bboxes)
nbs/dl2/pascal.ipynb
fastai/course-v3
apache-2.0
With anchors in the grey area.
anchors = create_grid((2,2)) anchors = torch.cat([anchors, torch.tensor([1.,1.]).expand_as(anchors)], 1) targets = anchors.clone() anchors = torch.cat([anchors, torch.tensor([[-0.5,0.,1.,1.8]])], 0) match_anchors(anchors,targets)
nbs/dl2/pascal.ipynb
fastai/course-v3
apache-2.0
Does the opposite of cthw2tbr.
def tlbr2cthw(boxes): "Convert top/left bottom/right format `boxes` to center/size corners." center = (boxes[:,:2] + boxes[:,2:])/2 sizes = boxes[:,2:] - boxes[:,:2] return torch.cat([center, sizes], 1)
nbs/dl2/pascal.ipynb
fastai/course-v3
apache-2.0
Does the opposite of activ_to_bbox.
def bbox_to_activ(bboxes, anchors, flatten=True): "Return the target of the model on `anchors` for the `bboxes`." if flatten: t_centers = (bboxes[...,:2] - anchors[...,:2]) / anchors[...,2:] t_sizes = torch.log(bboxes[...,2:] / anchors[...,2:] + 1e-8) return torch.cat([t_centers, t_sizes], -1).div_(bboxes.new_tensor([[0.1, 0.1, 0.2, 0.2]])) else: return [activ_to_bbox(act,anc) for act,anc in zip(acts, anchors)] return res
nbs/dl2/pascal.ipynb
fastai/course-v3
apache-2.0
We will one-hot encode our targets with the convention that the class of index 0 is the background, which is the absence of any other classes. That is coded by a row of zeros.
def encode_class(idxs, n_classes): target = idxs.new_zeros(len(idxs), n_classes).float() mask = idxs != 0 i1s = LongTensor(list(range(len(idxs)))) target[i1s[mask],idxs[mask]-1] = 1 return target encode_class(LongTensor([1,2,0,1,3]),3)
nbs/dl2/pascal.ipynb
fastai/course-v3
apache-2.0
And now we are ready to build the loss function. It has two parts, one for the classifier and one for the regressor. For the regression, we will use the L1 (potentially smoothed) loss between the predicted activations for an anchor that matches a given object (we ignore the no match or matches to background) and the corresponding bounding box (after going through bbox2activ). For the classification, we use the focal loss, which is a variant of the binary cross entropy used when we have a lot imbalance between the classes to predict (here we will very often have to predict 'background').
class RetinaNetFocalLoss(nn.Module): def __init__(self, gamma:float=2., alpha:float=0.25, pad_idx:int=0, scales:Collection[float]=None, ratios:Collection[float]=None, reg_loss:LossFunction=F.smooth_l1_loss): super().__init__() self.gamma,self.alpha,self.pad_idx,self.reg_loss = gamma,alpha,pad_idx,reg_loss self.scales = ifnone(scales, [1,2**(-1/3), 2**(-2/3)]) self.ratios = ifnone(ratios, [1/2,1,2]) def _change_anchors(self, sizes:Sizes) -> bool: if not hasattr(self, 'sizes'): return True for sz1, sz2 in zip(self.sizes, sizes): if sz1[0] != sz2[0] or sz1[1] != sz2[1]: return True return False def _create_anchors(self, sizes:Sizes, device:torch.device): self.sizes = sizes self.anchors = create_anchors(sizes, self.ratios, self.scales).to(device) def _unpad(self, bbox_tgt, clas_tgt): i = torch.min(torch.nonzero(clas_tgt-self.pad_idx)) return tlbr2cthw(bbox_tgt[i:]), clas_tgt[i:]-1+self.pad_idx def _focal_loss(self, clas_pred, clas_tgt): encoded_tgt = encode_class(clas_tgt, clas_pred.size(1)) ps = torch.sigmoid(clas_pred.detach()) weights = encoded_tgt * (1-ps) + (1-encoded_tgt) * ps alphas = (1-encoded_tgt) * self.alpha + encoded_tgt * (1-self.alpha) weights.pow_(self.gamma).mul_(alphas) clas_loss = F.binary_cross_entropy_with_logits(clas_pred, encoded_tgt, weights, reduction='sum') return clas_loss def _one_loss(self, clas_pred, bbox_pred, clas_tgt, bbox_tgt): bbox_tgt, clas_tgt = self._unpad(bbox_tgt, clas_tgt) matches = match_anchors(self.anchors, bbox_tgt) bbox_mask = matches>=0 if bbox_mask.sum() != 0: bbox_pred = bbox_pred[bbox_mask] bbox_tgt = bbox_tgt[matches[bbox_mask]] bb_loss = self.reg_loss(bbox_pred, bbox_to_activ(bbox_tgt, self.anchors[bbox_mask])) else: bb_loss = 0. matches.add_(1) clas_tgt = clas_tgt + 1 clas_mask = matches>=0 clas_pred = clas_pred[clas_mask] clas_tgt = torch.cat([clas_tgt.new_zeros(1).long(), clas_tgt]) clas_tgt = clas_tgt[matches[clas_mask]] return bb_loss + self._focal_loss(clas_pred, clas_tgt)/torch.clamp(bbox_mask.sum(), min=1.) def forward(self, output, bbox_tgts, clas_tgts): clas_preds, bbox_preds, sizes = output if self._change_anchors(sizes): self._create_anchors(sizes, clas_preds.device) n_classes = clas_preds.size(2) return sum([self._one_loss(cp, bp, ct, bt) for (cp, bp, ct, bt) in zip(clas_preds, bbox_preds, clas_tgts, bbox_tgts)])/clas_tgts.size(0)
nbs/dl2/pascal.ipynb
fastai/course-v3
apache-2.0
This is a variant of the L1 loss used in several implementations:
class SigmaL1SmoothLoss(nn.Module): def forward(self, output, target): reg_diff = torch.abs(target - output) reg_loss = torch.where(torch.le(reg_diff, 1/9), 4.5 * torch.pow(reg_diff, 2), reg_diff - 1/18) return reg_loss.mean()
nbs/dl2/pascal.ipynb
fastai/course-v3
apache-2.0
Defining the Learner
ratios = [1/2,1,2] scales = [1,2**(-1/3), 2**(-2/3)] #scales = [1,2**(1/3), 2**(2/3)] for bigger size encoder = create_body(models.resnet50, cut=-2) model = RetinaNet(encoder, data.c, final_bias=-4) crit = RetinaNetFocalLoss(scales=scales, ratios=ratios) learn = Learner(data, model, loss_func=crit)
nbs/dl2/pascal.ipynb
fastai/course-v3
apache-2.0
Why final_bias=-4? That's because we want the network to predict background easily at the beginning (since it's the most common class). At first the final convolution of the classifier is initialized with weights=0 and that bias, so it will return -4 for everyone. If go though a sigmoid
torch.sigmoid(tensor([-4.]))
nbs/dl2/pascal.ipynb
fastai/course-v3
apache-2.0
We see it'll give a corresponding probability of 0.02 roughly. Then, for transfer learning/discriminative LRs, we need to define how to split between body and custom head.
def retina_net_split(model): groups = [list(model.encoder.children())[:6], list(model.encoder.children())[6:]] return groups + [list(model.children())[1:]] learn = learn.split(retina_net_split)
nbs/dl2/pascal.ipynb
fastai/course-v3
apache-2.0
And now we can train as usual!
learn.freeze() learn.lr_find() learn.recorder.plot(skip_end=5) learn.fit_one_cycle(5, 1e-4) learn.save('stage1-128') learn.unfreeze() learn.fit_one_cycle(10, slice(1e-6, 5e-5)) learn.save('stage2-128') learn.data = get_data(32,192) learn.freeze() learn.lr_find() learn.recorder.plot() learn.fit_one_cycle(5, 1e-4) learn.save('stage1-192') learn.unfreeze() learn.fit_one_cycle(10, slice(1e-6, 5e-5)) learn.save('stage2-192') learn.data = get_data(24,256) learn.freeze() learn.fit_one_cycle(5, 1e-4) learn.save('stage1-256') learn.unfreeze() learn.fit_one_cycle(10, slice(1e-6, 5e-5)) learn.save('stage2-256')
nbs/dl2/pascal.ipynb
fastai/course-v3
apache-2.0
Results
learn = learn.load('stage2-256') img,target = next(iter(data.valid_dl)) with torch.no_grad(): output = learn.model(img)
nbs/dl2/pascal.ipynb
fastai/course-v3
apache-2.0
First we need to remove the padding that was added to collate our targets together.
def unpad(tgt_bbox, tgt_clas, pad_idx=0): i = torch.min(torch.nonzero(tgt_clas-pad_idx)) return tlbr2cthw(tgt_bbox[i:]), tgt_clas[i:]-1+pad_idx
nbs/dl2/pascal.ipynb
fastai/course-v3
apache-2.0
Then we process the outputs of the model: we convert the activations of the regressor to bounding boxes and the predictions to probabilities, only keeping those above a given threshold.
def process_output(output, i, detect_thresh=0.25): "Process `output[i]` and return the predicted bboxes above `detect_thresh`." clas_pred,bbox_pred,sizes = output[0][i], output[1][i], output[2] anchors = create_anchors(sizes, ratios, scales).to(clas_pred.device) bbox_pred = activ_to_bbox(bbox_pred, anchors) clas_pred = torch.sigmoid(clas_pred) detect_mask = clas_pred.max(1)[0] > detect_thresh bbox_pred, clas_pred = bbox_pred[detect_mask], clas_pred[detect_mask] bbox_pred = tlbr2cthw(torch.clamp(cthw2tlbr(bbox_pred), min=-1, max=1)) scores, preds = clas_pred.max(1) return bbox_pred, scores, preds
nbs/dl2/pascal.ipynb
fastai/course-v3
apache-2.0
Helper functions to plot the results
def _draw_outline(o:Patch, lw:int): "Outline bounding box onto image `Patch`." o.set_path_effects([patheffects.Stroke( linewidth=lw, foreground='black'), patheffects.Normal()]) def draw_rect(ax:plt.Axes, b:Collection[int], color:str='white', text=None, text_size=14): "Draw bounding box on `ax`." patch = ax.add_patch(patches.Rectangle(b[:2], *b[-2:], fill=False, edgecolor=color, lw=2)) _draw_outline(patch, 4) if text is not None: patch = ax.text(*b[:2], text, verticalalignment='top', color=color, fontsize=text_size, weight='bold') _draw_outline(patch,1) def show_preds(img, output, idx, detect_thresh=0.25, classes=None): bbox_pred, scores, preds = process_output(output, idx, detect_thresh) bbox_pred, preds, scores = bbox_pred.cpu(), preds.cpu(), scores.cpu() t_sz = torch.Tensor([*img.size])[None].float() bbox_pred[:,:2] = bbox_pred[:,:2] - bbox_pred[:,2:]/2 bbox_pred[:,:2] = (bbox_pred[:,:2] + 1) * t_sz/2 bbox_pred[:,2:] = bbox_pred[:,2:] * t_sz bbox_pred = bbox_pred.long() _, ax = plt.subplots(1,1) for bbox, c, scr in zip(bbox_pred, preds, scores): img.show(ax=ax) txt = str(c.item()) if classes is None else classes[c.item()+1] draw_rect(ax, [bbox[1],bbox[0],bbox[3],bbox[2]], text=f'{txt} {scr:.2f}')
nbs/dl2/pascal.ipynb
fastai/course-v3
apache-2.0
And let's have a look at one picture.
idx = 0 img = data.valid_ds[idx][0] show_preds(img, output, idx, detect_thresh=0.3, classes=data.classes)
nbs/dl2/pascal.ipynb
fastai/course-v3
apache-2.0
It looks like a lot of our anchors are detecting kind of the same object. We use an algorithm called Non-Maximum Suppression to remove near-duplicates: going from the biggest score predicted to the lowest, we take the corresponding bounding boxes and remove all the bounding boxes down the list that have an IoU > 0.5 with this one. We continue the process until we have reached the end of the list.
def nms(boxes, scores, thresh=0.3): idx_sort = scores.argsort(descending=True) boxes, scores = boxes[idx_sort], scores[idx_sort] to_keep, indexes = [], torch.LongTensor(range_of(scores)) while len(scores) > 0: to_keep.append(idx_sort[indexes[0]]) iou_vals = IoU_values(boxes, boxes[:1]).squeeze() mask_keep = iou_vals < thresh if len(mask_keep.nonzero()) == 0: break boxes, scores, indexes = boxes[mask_keep], scores[mask_keep], indexes[mask_keep] return LongTensor(to_keep) def process_output(output, i, detect_thresh=0.25): clas_pred,bbox_pred,sizes = output[0][i], output[1][i], output[2] anchors = create_anchors(sizes, ratios, scales).to(clas_pred.device) bbox_pred = activ_to_bbox(bbox_pred, anchors) clas_pred = torch.sigmoid(clas_pred) detect_mask = clas_pred.max(1)[0] > detect_thresh bbox_pred, clas_pred = bbox_pred[detect_mask], clas_pred[detect_mask] bbox_pred = tlbr2cthw(torch.clamp(cthw2tlbr(bbox_pred), min=-1, max=1)) if clas_pred.numel() == 0: return [],[],[] scores, preds = clas_pred.max(1) return bbox_pred, scores, preds def show_preds(img, output, idx, detect_thresh=0.25, classes=None, ax=None): bbox_pred, scores, preds = process_output(output, idx, detect_thresh) if len(scores) != 0: to_keep = nms(bbox_pred, scores) bbox_pred, preds, scores = bbox_pred[to_keep].cpu(), preds[to_keep].cpu(), scores[to_keep].cpu() t_sz = torch.Tensor([*img.size])[None].float() bbox_pred[:,:2] = bbox_pred[:,:2] - bbox_pred[:,2:]/2 bbox_pred[:,:2] = (bbox_pred[:,:2] + 1) * t_sz/2 bbox_pred[:,2:] = bbox_pred[:,2:] * t_sz bbox_pred = bbox_pred.long() if ax is None: _, ax = plt.subplots(1,1) img.show(ax=ax) for bbox, c, scr in zip(bbox_pred, preds, scores): txt = str(c.item()) if classes is None else classes[c.item()+1] draw_rect(ax, [bbox[1],bbox[0],bbox[3],bbox[2]], text=f'{txt} {scr:.2f}') def show_results(learn, start=0, n=5, detect_thresh=0.35, figsize=(10,25)): x,y = learn.data.one_batch(DatasetType.Valid, cpu=False) with torch.no_grad(): z = learn.model.eval()(x) _,axs = plt.subplots(n, 2, figsize=figsize) for i in range(n): img,bbox = learn.data.valid_ds[start+i] img.show(ax=axs[i,0], y=bbox) show_preds(img, z, start+i, detect_thresh=detect_thresh, classes=learn.data.classes, ax=axs[i,1]) learn = learn.load('stage2-256') show_results(learn, start=10)
nbs/dl2/pascal.ipynb
fastai/course-v3
apache-2.0
mAP A metric often used for this kind of task is the mean Average Precision (our mAP). It relies on computing the cumulated precision and recall for each class, then tries to compute the area under the precision/recall curve we can draw.
def get_predictions(output, idx, detect_thresh=0.05): bbox_pred, scores, preds = process_output(output, idx, detect_thresh) if len(scores) == 0: return [],[],[] to_keep = nms(bbox_pred, scores) return bbox_pred[to_keep], preds[to_keep], scores[to_keep] def compute_ap(precision, recall): "Compute the average precision for `precision` and `recall` curve." recall = np.concatenate(([0.], list(recall), [1.])) precision = np.concatenate(([0.], list(precision), [0.])) for i in range(len(precision) - 1, 0, -1): precision[i - 1] = np.maximum(precision[i - 1], precision[i]) idx = np.where(recall[1:] != recall[:-1])[0] ap = np.sum((recall[idx + 1] - recall[idx]) * precision[idx + 1]) return ap def compute_class_AP(model, dl, n_classes, iou_thresh=0.5, detect_thresh=0.35, num_keep=100): tps, clas, p_scores = [], [], [] classes, n_gts = LongTensor(range(n_classes)),torch.zeros(n_classes).long() with torch.no_grad(): for input,target in progress_bar(dl): output = model(input) for i in range(target[0].size(0)): bbox_pred, preds, scores = get_predictions(output, i, detect_thresh) tgt_bbox, tgt_clas = unpad(target[0][i], target[1][i]) if len(bbox_pred) != 0 and len(tgt_bbox) != 0: ious = IoU_values(bbox_pred, tgt_bbox) max_iou, matches = ious.max(1) detected = [] for i in range_of(preds): if max_iou[i] >= iou_thresh and matches[i] not in detected and tgt_clas[matches[i]] == preds[i]: detected.append(matches[i]) tps.append(1) else: tps.append(0) clas.append(preds.cpu()) p_scores.append(scores.cpu()) n_gts += (tgt_clas.cpu()[:,None] == classes[None,:]).sum(0) tps, p_scores, clas = torch.tensor(tps), torch.cat(p_scores,0), torch.cat(clas,0) fps = 1-tps idx = p_scores.argsort(descending=True) tps, fps, clas = tps[idx], fps[idx], clas[idx] aps = [] #return tps, clas for cls in range(n_classes): tps_cls, fps_cls = tps[clas==cls].float().cumsum(0), fps[clas==cls].float().cumsum(0) if tps_cls.numel() != 0 and tps_cls[-1] != 0: precision = tps_cls / (tps_cls + fps_cls + 1e-8) recall = tps_cls / (n_gts[cls] + 1e-8) aps.append(compute_ap(precision, recall)) else: aps.append(0.) return aps L = compute_class_AP(learn.model, data.valid_dl, data.c-1) for ap,cl in zip(L, data.classes[1:]): print(f'{cl}: {ap:.6f}') for ap,cl in zip(L, data.classes[1:]): print(f'{cl}: {ap:.6f}') for ap,cl in zip(L, data.classes[1:]): print(f'{cl}: {ap:.6f}') for ap,cl in zip(L, data.classes[1:]): print(f'{cl}: {ap:.6f}') for ap,cl in zip(L, data.classes[1:]): print(f'{cl}: {ap:.6f}')
nbs/dl2/pascal.ipynb
fastai/course-v3
apache-2.0
Basic Poisson Model I won't spend too long on this model, as it was the subject of the previous post. Essentially, you treat the number of goals scored by each team as two independent Poisson distributions (henceforth called the Basic Poisson (BP) model). The shape of each distribution is determined by the average number of goals scored by that team. A little reminder on the mathematical definition of the Poisson distribution: $$ P\left( x \right) = \frac{e^{-\lambda} \lambda ^x }{x!}, \lambda>0 $$ In our case, $\lambda$ represents the team's average or expected goal scoring rate. The Poisson distribution is a decent approximation of a team's scoring frequency. All of the model's discussed here agree on this point; the disagreement centres on how to calculate $\lambda_{home}$ and $\lambda_{away}$.
# construct Poisson for each mean goals value poisson_pred = np.column_stack([[poisson.pmf(i, epl_1718.mean()[j]) for i in range(8)] for j in range(2)]) fig, ax = plt.subplots(figsize=(9,4)) # plot histogram of actual goals plt.hist(epl_1718[['HomeGoals', 'AwayGoals']].values, range(9), alpha=0.7, label=['Home', 'Away'],normed=True, color=["#FFA07A", "#20B2AA"]) # add lines for the Poisson distributions pois1, = plt.plot([i-0.5 for i in range(1,9)], poisson_pred[:,0], linestyle='-', marker='o',label="Home", color = '#CD5C5C') pois2, = plt.plot([i-0.5 for i in range(1,9)], poisson_pred[:,1], linestyle='-', marker='o',label="Away", color = '#006400') leg=plt.legend(loc='upper right', fontsize=13, ncol=2) leg.set_title("Poisson Actual ", prop = {'size':'14', 'weight':'bold'}) plt.xticks([i-0.5 for i in range(1,9)],[i for i in range(9)]) plt.xlabel("Goals per Match",size=13) plt.ylabel("Proportion of Matches",size=13) plt.title("Number of Goals per Match (EPL 2017/18 Season)",size=14,fontweight='bold') plt.ylim([-0.004, 0.4]) plt.tight_layout() plt.show()
Jupyter/2018-09-13-predicting-football-results-with-statistical-modelling-dixon-coles-and-time-weighting.ipynb
dashee87/blogScripts
mit
We can formulate the model in mathematical terms: $$ P\left(X_{i,j} = x, Y_{j,i} = y \right) = \frac{e^{-\lambda} \lambda^x }{x!} \frac{e^{-\mu} \mu^y }{y!} \ \text{where } \quad \lambda = \alpha_i \beta_j \gamma \quad \mu = \alpha_j \beta_i $$ In this equation, $i$ and $j$ refer to the home and away teams, respectively; $\alpha$ and $\beta$ denote each team's attack and defensive strength, respectively, while $\gamma$ represents the home advantage factor. So, we need to calculate $\alpha$ and $\beta$ for each team, as well as $\gamma$ (the home field advantage term- it's the same value for every team). As this was explained in the previous post, I'll just show the model output.
# importing the tools required for the Poisson regression model import statsmodels.api as sm import statsmodels.formula.api as smf goal_model_data = pd.concat([epl_1718[['HomeTeam','AwayTeam','HomeGoals']].assign(home=1).rename( columns={'HomeTeam':'team', 'AwayTeam':'opponent','HomeGoals':'goals'}), epl_1718[['AwayTeam','HomeTeam','AwayGoals']].assign(home=0).rename( columns={'AwayTeam':'team', 'HomeTeam':'opponent','AwayGoals':'goals'})]) poisson_model = smf.glm(formula="goals ~ home + team + opponent", data=goal_model_data, family=sm.families.Poisson()).fit() print(poisson_model.summary()) poisson_model.predict(pd.DataFrame(data={'team': 'Arsenal', 'opponent': 'Southampton', 'home':1},index=[1])) poisson_model.predict(pd.DataFrame(data={'team': 'Southampton', 'opponent': 'Arsenal', 'home':0},index=[1]))
Jupyter/2018-09-13-predicting-football-results-with-statistical-modelling-dixon-coles-and-time-weighting.ipynb
dashee87/blogScripts
mit
As an example, Arsenal (playing at home) would be expected to score 2.43 goals against Southampton, while their opponents would get about 0.86 goals on average (I'm using the terms average and expected interchangeably). As each team is treated independently, we can construct a match score probability matrix.
def simulate_match(foot_model, homeTeam, awayTeam, max_goals=10): home_goals_avg = foot_model.predict(pd.DataFrame(data={'team': homeTeam, 'opponent': awayTeam,'home':1}, index=[1])).values[0] away_goals_avg = foot_model.predict(pd.DataFrame(data={'team': awayTeam, 'opponent': homeTeam,'home':0}, index=[1])).values[0] team_pred = [[poisson.pmf(i, team_avg) for i in range(0, max_goals+1)] for team_avg in [home_goals_avg, away_goals_avg]] return(np.outer(np.array(team_pred[0]), np.array(team_pred[1]))) ars_sou = simulate_match(poisson_model, 'Arsenal', 'Southampton', max_goals=10) print(ars_sou[0:5, 0:5]) from matplotlib.colors import ListedColormap def matrix_gif(matrix, colour_matrix, colour_map, subtitle="", heatmap=False, alpha=0.8): fig, ax1 = plt.subplots(1, figsize=(5,5)) if heatmap: ax1.matshow(matrix, alpha=alpha) else: ax1.matshow(colour_matrix, cmap=colour_map, alpha=alpha) ax1.tick_params(axis=u'both', which=u'both',length=0) ax1.grid(which='major', axis='both', linestyle='') ax1.set_xlabel('Away Team Goals', fontsize=12) ax1.set_ylabel('Home Team Goals', fontsize=12) ax1.xaxis.set_label_position('top') nrows, ncols = matrix.shape for i in range(nrows): for j in range(ncols): c = matrix[i][j] ax1.text(j, i, str(round(c,4)), va='center', ha='center', size=13) plt.figtext(0.5, 0.05, subtitle, horizontalalignment='center', fontsize=14, multialignment='left', fontweight='bold') return fig cmap = ListedColormap(['w', '#04f5ff', '#00ff85', '#e90052']) matrix = simulate_match(poisson_model, 'Arsenal', 'Southampton', max_goals=5) matn = len(matrix) matrix_gif(matrix, matrix, ListedColormap(['w']), heatmap=True, alpha=0.6, subtitle="Match Score Probability Matrix").savefig("match_matrix_0.png") plt.close() for t,(mat,colour,subtitle) in enumerate(zip([np.zeros((matn, matn)), np.tril(np.ones((matn,matn)),-1), np.triu(np.ones((matn,matn))*2,1), np.diag([3]*matn), np.array([0 if i+j<3 else 1 for i in range(matn) for j in range(matn)]).reshape(matn,matn)], ['w', '#04f5ff', '#00ff85', '#e90052','#EAF205'], ['Match Score Probability Matrix', 'Home Win', 'Away Win', 'Draw', 'Over 2.5 goals'])): matrix_gif(matrix, mat, ListedColormap(['w'] + [colour]), heatmap=False, alpha=0.6, subtitle=subtitle).savefig("match_matrix_{}.png".format(t+1)) plt.close()
Jupyter/2018-09-13-predicting-football-results-with-statistical-modelling-dixon-coles-and-time-weighting.ipynb
dashee87/blogScripts
mit
First published by Maher in 1982, the BP model still serves a good starting point from which you can add features that more closely reflect the reality. That brings us onto the Dixon Coles (DC) model. Dixon-Coles Model In their 1997 paper, Mark Dixon and Stuart Coles proposed two specific improvements to the BP model: * Introduce an interaction term to correct underestimated frequency of low scoring matches * Apply time decay component so that recent matches are weighted more strongly The authors claim that low score results (0-0, 1-0, 0-1 and 1-1) are inherently under-reported by the BP model. In the paper, they provide some analysis that supports their case- though I wouldn't call their approach particularly rigorous. The matrix below shows the average difference between actual and model predicted scorelines for the 2005/06 season all the way up to the 2017/18 season. Green cells imply the model underestimated those scorelines, while red cells suggest overestimation- the colour strength indicates the level of disagreement.
def poiss_actual_diff(football_url, max_goals): epl_1718 = pd.read_csv(football_url) epl_1718 = epl_1718[['HomeTeam','AwayTeam','FTHG','FTAG']] epl_1718 = epl_1718.rename(columns={'FTHG': 'HomeGoals', 'FTAG': 'AwayGoals'}) team_pred = [[poisson.pmf(i, team_avg) for i in range(0, max_goals)] \ for team_avg in [epl_1718['HomeGoals'].mean(), epl_1718['AwayGoals'].mean()]] return np.outer(np.array(team_pred[0]), np.array(team_pred[1])) - \ np.array([sum((epl_1718['HomeGoals']==i) & (epl_1718['AwayGoals']==j)) for i in range(max_goals) for j in range(max_goals)]).reshape((6,6))/len(epl_1718) year_arrays = [] for year in range(2005,2018): year_arrays.append(poiss_actual_diff("http://www.football-data.co.uk/mmz4281/{}{}/E0.csv".format( str(year)[-2:], str(year+1)[-2:]),6)) cmap = sns.diverging_palette(10, 133, as_cmap=True) fig, ax = plt.subplots(figsize=(5,5)) with sns.axes_style("white"): ax = sns.heatmap(np.mean(year_arrays, axis=0), annot=True, fmt='.4f', cmap=cmap, vmin=-0.013, vmax=.013, center=0.00, square=True, linewidths=.5, annot_kws={"size": 11}, cbar_kws={"shrink": .8}) ax.tick_params(axis=u'both', which=u'both',length=0) ax.grid(which='major', axis='both', linestyle='') ax.set_xlabel('Away Team Goals', fontsize=13) ax.set_ylabel('Home Team Goals', fontsize=13) ax.xaxis.set_label_position('top') ax.xaxis.set_ticks_position('top') plt.figtext(0.45, 0.1, 'Actual Proportion - Model Probability', horizontalalignment='center', fontsize=14, multialignment='left', fontweight='bold') plt.tight_layout() plt.show()
Jupyter/2018-09-13-predicting-football-results-with-statistical-modelling-dixon-coles-and-time-weighting.ipynb
dashee87/blogScripts
mit
There does seem to be an issue around low scoring draws, though it is less apparent with 1-0 and 0-1 results. The Dixon-Coles (DC) model applies a correction to the BP model. It can be written in these mathematical terms: $$ P\left(X_{i,j} = x, Y_{j,i} = y \right) = \tau_{\lambda, \mu}(x) \frac{e^{-\lambda} \lambda^x }{x!} \frac{e^{-\mu} \mu^y }{y!} \ \text{where } \quad \lambda = \alpha_i \beta_j \gamma \quad \mu = \alpha_j \beta_i \ \tau_{\lambda, \mu}(x, y) = \begin{cases} 1 - \lambda \mu \rho & \text{if $x = y = 0$} \ 1 - \lambda \rho & \text{if $x=0$, $y=1$} \ 1 + \mu \rho & \text{if $x=0$, $y=1$} \ 1 - \rho & \text{if $x = y = 1$} \ 1 & \text{otherwise} \ \end{cases} $$ The key difference over the BP model is the addition of the $\tau$ (tau) function. It is highly dependent on the $\rho$ (rho) parameter, which controls the strength of the correction (note: setting $\rho$=0 equates to the standard BP model). We can easily convert $\tau_{\lambda, \mu}(x, y)$ to Python code.
def rho_correction(x, y, lambda_x, mu_y, rho): if x==0 and y==0: return 1- (lambda_x * mu_y * rho) elif x==0 and y==1: return 1 + (lambda_x * rho) elif x==1 and y==0: return 1 + (mu_y * rho) elif x==1 and y==1: return 1 - rho else: return 1.0
Jupyter/2018-09-13-predicting-football-results-with-statistical-modelling-dixon-coles-and-time-weighting.ipynb
dashee87/blogScripts
mit
Unfortunately, you can't just update your match score matrix with this function; you need to recalculate the various coefficients that go into the model. And unfortunately again, you can't just implement an off the shelf generalised linear model, as we did before. We have to construct the likelihood function and find the coefficients that maximise it- a technique known as Maximum Likelihood Estimation. With matches indexed $k=1,\dots,N$ and corresponding scores ($x_k$, $y_k$), this is the likelihood function that we seek to maximise: $$ L(\alpha_i, \beta_i, \rho, \gamma, i=1,\dots,n) = \prod_{k=1}^{N} \tau_{\lambda_k,\mu_k}(x_k, y_k) \ \frac{e^{-\lambda} \lambda^{x_k} }{x_k!} \frac{e^{-\mu} \mu^{y_k} }{y_k!} \ \text{where } \quad \lambda_k = \alpha_{i(k)} \beta_{j(k)} \gamma \quad \mu_k = \alpha_{j(k)} \beta_{i(k)} $$ In this equation, $i(k)$ and $j(k)$ respectively denote the indices of the home and away teams in match $k$. For a few different reasons (numerical precision, practicality, etc.), we'll actually maximise the log-likelihood function. As the logarithm is a strictly increasing function (i.e. $\log(b) > \log(a) \ \forall \ b > a$), both likelihood functions are maximised at the same point. Also, recall that $\log(a \ b) = \log(a) + \log(b)$. We can thus write the log-likelihood function in Python code.
def dc_log_like(x, y, alpha_x, beta_x, alpha_y, beta_y, rho, gamma): lambda_x, mu_y = np.exp(alpha_x + beta_y + gamma), np.exp(alpha_y + beta_x) return (np.log(rho_correction(x, y, lambda_x, mu_y, rho)) + np.log(poisson.pmf(x, lambda_x)) + np.log(poisson.pmf(y, mu_y)))
Jupyter/2018-09-13-predicting-football-results-with-statistical-modelling-dixon-coles-and-time-weighting.ipynb
dashee87/blogScripts
mit
You may have noticed that dc_log_like included a transformation of $\lambda$ and $\mu$, where $\lambda = \exp(\alpha_i + \beta_j + \gamma)$ and $\mu = \exp(\alpha_j + \beta_i)$, so that we're essentially trying to calculate expected log goals. This is equivalent to the log link function in the previous BP glm implementation. It shouldn't really affect model accuracy, it just means that convergence of the maximisation algorithm should be easier as $\lambda, \mu > 0 \ \forall \ \alpha, \beta, \gamma$. Non-positive lambdas are not compatible with a Poisson distribution, so this would return warnings and/or errors during implementation. We're now ready to find the parameters that maximise the log likelihood function. Basically, you design a function that takes a set of model parameters as an argument. You set some initial values and potentially include some constraints and select the appropriate optimisation algorithm. I've opted for scipy's minimise function (a possible alternative is fmin- note: the functions seek to minimise the negative log-likelihood). It employs a process analogous to gradient descent, so that the algorithm iteratively converges to the optimal parameter set. The computation can be quite slow as it's forced to approximate the derivatives. If you're not as lazy as me, you could potentially speed it up by manually constructing the partial derivatives. In line with the original Dixon Coles paper and the opisthokonta blog, I've added the constraint that $\frac{1}{n}\sum_{i} \alpha_{i}=1$ (i.e. the average attack strength value is 1). This step isn't strictly necessary, but it means that it should return a unique solution (otherwise, the model would suffer from overparamterisation and each execution would return different coefficients). Okay, we're ready to find the coefficients that maximise the log-likelihood function for the 2017/18 EPL season.
def solve_parameters(dataset, debug = False, init_vals=None, options={'disp': True, 'maxiter':100}, constraints = [{'type':'eq', 'fun': lambda x: sum(x[:20])-20}] , **kwargs): teams = np.sort(dataset['HomeTeam'].unique()) # check for no weirdness in dataset away_teams = np.sort(dataset['AwayTeam'].unique()) if not np.array_equal(teams, away_teams): raise ValueError("Something's not right") n_teams = len(teams) if init_vals is None: # random initialisation of model parameters init_vals = np.concatenate((np.random.uniform(0,1,(n_teams)), # attack strength np.random.uniform(0,-1,(n_teams)), # defence strength np.array([0, 1.0]) # rho (score correction), gamma (home advantage) )) def dc_log_like(x, y, alpha_x, beta_x, alpha_y, beta_y, rho, gamma): lambda_x, mu_y = np.exp(alpha_x + beta_y + gamma), np.exp(alpha_y + beta_x) return (np.log(rho_correction(x, y, lambda_x, mu_y, rho)) + np.log(poisson.pmf(x, lambda_x)) + np.log(poisson.pmf(y, mu_y))) def estimate_paramters(params): score_coefs = dict(zip(teams, params[:n_teams])) defend_coefs = dict(zip(teams, params[n_teams:(2*n_teams)])) rho, gamma = params[-2:] log_like = [dc_log_like(row.HomeGoals, row.AwayGoals, score_coefs[row.HomeTeam], defend_coefs[row.HomeTeam], score_coefs[row.AwayTeam], defend_coefs[row.AwayTeam], rho, gamma) for row in dataset.itertuples()] return -sum(log_like) opt_output = minimize(estimate_paramters, init_vals, options=options, constraints = constraints, **kwargs) if debug: # sort of hacky way to investigate the output of the optimisation process return opt_output else: return dict(zip(["attack_"+team for team in teams] + ["defence_"+team for team in teams] + ['rho', 'home_adv'], opt_output.x)) params = solve_parameters(epl_1718) params
Jupyter/2018-09-13-predicting-football-results-with-statistical-modelling-dixon-coles-and-time-weighting.ipynb
dashee87/blogScripts
mit
The optimal rho value (-0.1285) returned by the model fits quite nicely with the value (-0.13) given in the equivalent opisthokonta blog post. We can now start making some predictions by constructing match score matrices based on these model parameters. This part is quite similar to BP model, except for the correction applied to the 0-0, 1-0, 0-1 and 1-1 matrix elements.
def calc_means(param_dict, homeTeam, awayTeam): return [np.exp(param_dict['attack_'+homeTeam] + param_dict['defence_'+awayTeam] + param_dict['home_adv']), np.exp(param_dict['defence_'+homeTeam] + param_dict['attack_'+awayTeam])] def dixon_coles_simulate_match(params_dict, homeTeam, awayTeam, max_goals=10): team_avgs = calc_means(params_dict, homeTeam, awayTeam) team_pred = [[poisson.pmf(i, team_avg) for i in range(0, max_goals+1)] for team_avg in team_avgs] output_matrix = np.outer(np.array(team_pred[0]), np.array(team_pred[1])) correction_matrix = np.array([[rho_correction(home_goals, away_goals, team_avgs[0], team_avgs[1], params['rho']) for away_goals in range(2)] for home_goals in range(2)]) output_matrix[:2,:2] = output_matrix[:2,:2] * correction_matrix return output_matrix ars_sou_dc = dixon_coles_simulate_match(params, 'Arsenal', 'Southampton', max_goals=10) # [Simple Poisson, Dixon-Coles] print("Arsenal Win") print('; '.join("{0}: {1:.5f}".format(model, prob) for model,prob in zip(["Basic Poisson", "Dixon-Coles"], list(map(lambda x:np.sum(np.tril(x, -1)), [ars_sou, ars_sou_dc]))))) print("Southampton Win") print('; '.join("{0}: {1:.5f}".format(model, prob) for model,prob in zip(["Basic Poisson", "Dixon-Coles"], list(map(lambda x:np.sum(np.triu(x, 1)), [ars_sou, ars_sou_dc]))))) print("Draw") print('; '.join("{0}: {1:.5f}".format(model, prob) for model,prob in zip(["Basic Poisson", "Dixon-Coles"], list(map(lambda x:np.sum(np.diag(x)), [ars_sou, ars_sou_dc])))))
Jupyter/2018-09-13-predicting-football-results-with-statistical-modelling-dixon-coles-and-time-weighting.ipynb
dashee87/blogScripts
mit
As you can see, the DC model reports a higher probability of a draw compared to the BP model. In fact, you can plot the difference in the match score probability matrices between the two models.
cmap = sns.diverging_palette(10, 133, as_cmap=True) fig, ax = plt.subplots(figsize=(5,5)) with sns.axes_style("white"): ax = sns.heatmap(simulate_match(poisson_model, 'Arsenal', 'Southampton', max_goals=5) - \ dixon_coles_simulate_match(params, 'Arsenal', 'Southampton', max_goals=5), annot=True, fmt='.4f', cmap=cmap, vmin=-0.013, vmax=.013, center=0.00, square=True, linewidths=.5, annot_kws={"size": 11}, cbar_kws={"shrink": .8}) ax.tick_params(axis=u'both', which=u'both',length=0) ax.grid(which='major', axis='both', linestyle='') ax.set_xlabel('Away Team Goals', fontsize=13) ax.set_ylabel('Home Team Goals', fontsize=13) ax.xaxis.set_label_position('top') ax.xaxis.set_ticks_position('top') plt.figtext(0.45, 0.07, ' BP Probs - DC Probs \nArsenal v Southampton', horizontalalignment='center', fontsize=14, multialignment='left', fontweight='bold') plt.tight_layout() plt.show()
Jupyter/2018-09-13-predicting-football-results-with-statistical-modelling-dixon-coles-and-time-weighting.ipynb
dashee87/blogScripts
mit
In one way, this is a good plot. The correction was only intended to have an effect on 4 specific match results (0-0, 1-0, 0-1 and 1-1) and that's what has happened. On the other hand, that was alot of hard work to essentially tweak the existing model. And that's without even considering whether it was a beneficial adjustment. Without exploring that point any further, I'm going to discuss the second advancement introduced by the DC model. Dixon-Coles Time Decay Model Crystal Palace famously (!) lost their opening seven fixtures of the 2017/18 EPL season, conceding 17 times and scoring zero goals. During his short reign, Ronald De Boer had disastrously tried to transform Palace into a more attractive, possession based side. Under their new manager, Roy Hodgson, they returned to their traditional counter attacking style. They recovered well from their poor start to end the season in a respectable 11th place. Intuitively, if you were trying to predict a Crystal Palace match in January 2018, you would want the model to somewhat discount those losses in August and September 2017. That's the rationale behind adding a time component to the adjusted Poisson model outlined above. How exactly to down-weight those earlier games is the tricky part. Two weighting options offered in the Dixon-Coles paper are illustrated below.
fig,(ax1,ax2) = plt.subplots(2, 1, figsize=(10,5)) ax1.plot(range(1000), [0 if y >600 else 1 for y in range(1000)], label='Component 1', color='#38003c', marker='') ax2.plot(range(1000), np.exp([y*-0.005 for y in range(1000)]), label='Component 1', color='#07F2F2', marker='') ax2.plot(range(1000), np.exp([y*-0.003 for y in range(1000)]), label='Component 1', color='#05F26C', marker='') ax2.plot(range(1000), np.exp([y*-0.001 for y in range(1000)]), label='Component 1', color='#e90052', marker='') ax1.set_ylim([-0.05,1.05]) ax2.set_ylim([-0.05,1.05]) ax1.set_xlim([-0.5,1000]) ax2.set_xlim([-0.5,1000]) ax1.set_xticklabels([]) ax2.xaxis.set_tick_params(labelsize=12) ax1.yaxis.set_tick_params(labelsize=12) ax2.yaxis.set_tick_params(labelsize=12) ax1.set_title("Time Decay Weighting Functions",size=14,fontweight='bold') ax2.set_xlabel("Number of Days Ago",size=13) ax1.set_ylabel("ϕ(t)",size=13) ax2.set_ylabel("ϕ(t)",size=13) ax1.text(830, 0.5, '1 $t \leq \mathregular{t_0}$\n0 $t > \mathregular{t_0}$', verticalalignment='bottom', horizontalalignment='left', color='black', fontsize=15) ax1.text(800, 0.5, '{', verticalalignment='bottom', horizontalalignment='left', color='black', fontsize=44) ax1.text(730, 0.62, 'ϕ(t) = ', verticalalignment='bottom', horizontalalignment='left', color='black', fontsize=15) ax2.text(730, 0.62, 'ϕ(t) = exp(−ξt)', verticalalignment='bottom', horizontalalignment='left', color='black', fontsize=15) ax2.text(250, 0.8, 'ξ = 0.001', verticalalignment='bottom', horizontalalignment='left', color='#e90052', fontsize=15) ax2.text(250, 0.5, 'ξ = 0.003', verticalalignment='bottom', horizontalalignment='left', color='#05F26C', fontsize=15) ax2.text(250, 0.0, 'ξ = 0.005', verticalalignment='bottom', horizontalalignment='left', color='#07F2F2', fontsize=15) plt.tight_layout() plt.show()
Jupyter/2018-09-13-predicting-football-results-with-statistical-modelling-dixon-coles-and-time-weighting.ipynb
dashee87/blogScripts
mit
The first option forces the model to only consider matches within some predefined period (e.g. since the start of the season), while the negative exponential downweights match results more strongly going further into the past. The refined model can be written in these mathematical terms: $$ L(\alpha_i, \beta_i, \rho, \gamma, i=1,\dots,n) = \prod_{k \in A_t}{\tau_{\lambda_k,\mu_k}(x_k, y_k) \frac{e^{-\lambda} \lambda^{x_k} }{x_k!} \frac{e^{-\mu} \mu^{y_k} }{y_k!}}^{\phi(t-t_k)} $$ where $t_k$ represents the time that match $k$ was played, $A_t = {k: t_k < t}$ (i.e. set of matches played before time $t$), $\alpha$, $\beta$, $\gamma$ and $\tau$ are defined as before. $\phi$ represents the non-increasing weighting function. Copying the original Dixon Coles paper, we'll set $\phi(t)$ to be a negative exponential with rate $\xi$ (called xi). As before, we need to determine the parameters that maximise this likelihood function. We can't just feed this equation into a minimisation algorithm for various reasons (e.g. we can trivially maximise this function by increasing $\xi$). Instead, we'll fix $\xi$ and determine the remaining parameters the same way as before. We can thus write the corresponding log-likelihood function in the following Python code (recall $\log(a^b) = \log(a) \log(b)$). Note how $\xi$=0 equates to the standard non-time weighted log-likelihood function.
def dc_log_like_decay(x, y, alpha_x, beta_x, alpha_y, beta_y, rho, gamma, t, xi=0): lambda_x, mu_y = np.exp(alpha_x + beta_y + gamma), np.exp(alpha_y + beta_x) return np.exp(-xi*t) * (np.log(rho_correction(x, y, lambda_x, mu_y, rho)) + np.log(poisson.pmf(x, lambda_x)) + np.log(poisson.pmf(y, mu_y)))
Jupyter/2018-09-13-predicting-football-results-with-statistical-modelling-dixon-coles-and-time-weighting.ipynb
dashee87/blogScripts
mit
To determine the optimal value of $\xi$, we'll select the model that makes the best predictions. Repeating the process in the Dixon-Coles paper, rather working on match score predictions, the models will be assessed on match result predictions. Essentially, the model that predicted the actual match results with the highest probability will be deemed the winner. An obvious flaw here is that only one match result is considered. For example, if the result was a home win, then the draw and away win probabilities are ignored. Alternative approaches could utilise Ranked Probability Scores or betting probabilities. But we'll keep things simple and replicate the Dixon Coles paper. We can redefine the objective in mathematical terms; we wish to find $\xi$ that maximises $S(\xi)$: $$ S(\xi) = \sum^{N}{k=1} (\delta^{H}{k} \log p^{H}{k} + \delta^{A}{k} \log p^{A}{k} + \delta^{D}{k} \log p^{D}_{k}) $$ This looks more complicated than it really is. The $\delta$ terms just captures the match result e.g. $\delta^{H}{k}$ = 1 if match $k$ ended in a home win, while the $p$ terms are simply the match result probabilities. For example, we can rewrite $p^{H}{k}$ (probability of home win): $$ p^{H}{k} = \sum{l,m \in B_H} P(X_k = l, Y_k = m), \text{ where } B_H = {(l,m): l>m} $$ Each of these $p$ terms translates to the matrix operations outlined previously. One part that will change is the log likelihood function. To assess the predictive accuracy of the model, we'll utilise an approach analogous to the validation set in machine learning. Let's say we're trying to predict the fixtures occurring on the 13th January 2018. With $\xi$ fixed to a specific value, we use all of the previous results in that season to build a model. We determine how that model predicted the actual results of those matches with the above equations. We move onto the next set of fixtures (say 20th January) and build the model again- this time including the 13th January games- and assess how well it predicted the results of those matches. We repeat this process for the rest of the 2017/18 season. When we sum up all of these predictions, you have calculated what is called the predicted profile log-likelihood for that value of $\xi$. However, a new model must be built for each set of fixtures, so this can be quite slow. I have taken a few steps to speed up the computations: Predicting the fixtures for the last 100 days of the 2017/18 EPL season. This is probably preferable anyway, as early season predictions would be quite unreliable. Forming match days consisting of three consecutive days (i.e. on Saturday we'll try to predict matches taking place on Saturday, Sunday and Monday). This should be okay, as teams tend not to play more than once in three days (except at Christmas, which isn't included in the validation period). We need to make some slight adjustments to the epl_1718 dataframe to include columns that represent the number of days since the completion of that fixture as well as the match result (home, away or draw).
epl_1718 = pd.read_csv("http://www.football-data.co.uk/mmz4281/1718/E0.csv") epl_1718['Date'] = pd.to_datetime(epl_1718['Date'], format='%d/%m/%y') epl_1718['time_diff'] = (max(epl_1718['Date']) - epl_1718['Date']).dt.days epl_1718 = epl_1718[['HomeTeam','AwayTeam','FTHG','FTAG', 'FTR', 'time_diff']] epl_1718 = epl_1718.rename(columns={'FTHG': 'HomeGoals', 'FTAG': 'AwayGoals'}) epl_1718.head()
Jupyter/2018-09-13-predicting-football-results-with-statistical-modelling-dixon-coles-and-time-weighting.ipynb
dashee87/blogScripts
mit
With this dataframe, we're now ready to compare different values of $\xi$. To speed up this process even further, I made the code parallelisable and ran it across my computer's multiple (4) cores (see Python file). Here's an example of how you would build a model for a given value of $\xi$.
def solve_parameters_decay(dataset, xi=0.001, debug = False, init_vals=None, options={'disp': True, 'maxiter':100}, constraints = [{'type':'eq', 'fun': lambda x: sum(x[:20])-20}] , **kwargs): teams = np.sort(dataset['HomeTeam'].unique()) # check for no weirdness in dataset away_teams = np.sort(dataset['AwayTeam'].unique()) if not np.array_equal(teams, away_teams): raise ValueError("something not right") n_teams = len(teams) if init_vals is None: # random initialisation of model parameters init_vals = np.concatenate((np.random.uniform(0,1,(n_teams)), # attack strength np.random.uniform(0,-1,(n_teams)), # defence strength np.array([0,1.0]) # rho (score correction), gamma (home advantage) )) def dc_log_like_decay(x, y, alpha_x, beta_x, alpha_y, beta_y, rho, gamma, t, xi=xi): lambda_x, mu_y = np.exp(alpha_x + beta_y + gamma), np.exp(alpha_y + beta_x) return np.exp(-xi*t) * (np.log(rho_correction(x, y, lambda_x, mu_y, rho)) + np.log(poisson.pmf(x, lambda_x)) + np.log(poisson.pmf(y, mu_y))) def estimate_paramters(params): score_coefs = dict(zip(teams, params[:n_teams])) defend_coefs = dict(zip(teams, params[n_teams:(2*n_teams)])) rho, gamma = params[-2:] log_like = [dc_log_like_decay(row.HomeGoals, row.AwayGoals, score_coefs[row.HomeTeam], defend_coefs[row.HomeTeam], score_coefs[row.AwayTeam], defend_coefs[row.AwayTeam], rho, gamma, row.time_diff, xi=xi) for row in dataset.itertuples()] return -sum(log_like) opt_output = minimize(estimate_paramters, init_vals, options=options, constraints = constraints) if debug: # sort of hacky way to investigate the output of the optimisation process return opt_output else: return dict(zip(["attack_"+team for team in teams] + ["defence_"+team for team in teams] + ['rho', 'home_adv'], opt_output.x)) params_xi= solve_parameters_decay(epl_1718, xi=0.0018) params_xi xi_vals = [0.0, 0.0002, 0.0004, 0.0006, 0.0008, 0.001, 0.0012, 0.0014, 0.0016, 0.0018, 0.002, 0.0025, 0.003, 0.0035, 0.0035, 0.004, 0.0045, 0.005] # I pulled the scores from files on my computer that had been generated seperately #xi_scores = [] #for xi in xi_vals: # with open ('find_xi__{}.txt'.format(str(xi)[2:]), 'rb') as fp: # xi_scores.append(sum(pickle.load(fp))) xi_scores = [-125.38424297397718, -125.3994150871104, -125.41582329299528, -125.43330024318175, -125.45167361727589, -125.47148572476918, -125.49165987944551, -125.51283291929082, -125.53570389317336, -125.5588181265923, -125.58171066742123, -125.64545123148538, -125.71506317675832, -125.78763678848986, -125.78763678848986, -125.8651515986525, -125.94721517841089, -126.03247674382676] fig, ax1 = plt.subplots(1, 1, figsize=(10,4)) ax1.plot(xi_vals, xi_scores, label='Component 1', color='#F2055C', marker='o') ax1.set_ylim([-126.20, -125.20]) ax1.set_xlim([-0.0001,0.0051]) #ax1.set_xticklabels([]) ax1.set_ylabel('S(ξ)', fontsize=13) ax1.set_xlabel('ξ', fontsize=13) ax1.xaxis.set_tick_params(labelsize=12) ax1.yaxis.set_tick_params(labelsize=12) ax1.set_title("Predictive Profile Log-Likelihood (EPL 2017/18 Season)",size=14,fontweight='bold') plt.show()
Jupyter/2018-09-13-predicting-football-results-with-statistical-modelling-dixon-coles-and-time-weighting.ipynb
dashee87/blogScripts
mit
It seems that $S(\xi)$ is minimised at $\xi$=0 (remember that $\xi \geq 0$), which is simply the standard non-weighted DC model. I suppose this makes sense: If you only have data for the season in question, then you don't have the luxury of down-weighting older results. In the Dixon-Coles paper, they actually compiled data from 4 consecutive seasons (1992/93 to 95/96). You'd expect time weighting to become more effective as the timeframe of your data expands. In other words, the first game of the same season might well be valuable, but the first game of the season five years ago is presumably less valuable. To investigate this hypothesis, we'll pull data for the previous 5 completed EPL seasons (i.e. 2013/14 to 17/18).
epl_1318 = pd.DataFrame() for year in range(13,18): epl_1318 = pd.concat((epl_1318, pd.read_csv("http://www.football-data.co.uk/mmz4281/{}{}/E0.csv".format(year, year+1)))) epl_1318['Date'] = pd.to_datetime(epl_1318['Date'], format='%d/%m/%y') epl_1318['time_diff'] = (max(epl_1318['Date']) - epl_1318['Date']).dt.days epl_1318 = epl_1318[['HomeTeam','AwayTeam','FTHG','FTAG', 'FTR', 'time_diff']] epl_1318 = epl_1318.rename(columns={'FTHG': 'HomeGoals', 'FTAG': 'AwayGoals'}) epl_1318 = epl_1318.dropna(how='all') epl_1318.head()
Jupyter/2018-09-13-predicting-football-results-with-statistical-modelling-dixon-coles-and-time-weighting.ipynb
dashee87/blogScripts
mit
Same procedure as before, with varying values of $\xi$, we'll quanitfy how well the model predicted the match results of the second half of the 17/18 EPL season. Again, I ran the program across multiple cores (see Python file).
xi_vals = [0.0, 0.0005, 0.001, 0.0015, 0.002, 0.0025, 0.00275, 0.003, 0.00325, 0.0035, 0.00375, 0.004, 0.00425, 0.0045, 0.005, 0.0055, 0.006] # I pulled the scores from files on my computer that had been generated seperately #xi_scores = [] #for xi in xi_vals: # with open ('find_xi_5season_{}.txt'.format(str(xi)[2:]), 'rb') as fp: # xi_scores.append(sum(pickle.load(fp))) xi_scores = [-127.64548699733858, -126.88558052909376, -126.24253680407995, -125.75657140537645, -125.43198691100818, -125.24473381373896, -125.1929173322124, -125.16314084998176, -125.15259048041912, -125.15741294807299, -125.17611832471187, -125.20427802084305, -125.24143128833828, -125.2863163741079, -125.39161839279092, -125.51241118364625, -125.64269122223465] fig, ax1 = plt.subplots(1, 1, figsize=(10,4)) ax1.plot(xi_vals, xi_scores, label='Component 1', color='#F2055C', marker='o') #ax1.set_ylim([-0.05,1.05]) ax1.set_xlim([-0.0001, 0.0061]) #ax1.set_xticklabels([]) ax1.set_ylabel('S(ξ)', fontsize=13) ax1.set_xlabel('ξ', fontsize=13) ax1.xaxis.set_tick_params(labelsize=12) ax1.yaxis.set_tick_params(labelsize=12) ax1.set_title("Predictive Profile Log-Likelihood (EPL 13/14 - 17/18 Seasons)",size=14,fontweight='bold') plt.show()
Jupyter/2018-09-13-predicting-football-results-with-statistical-modelling-dixon-coles-and-time-weighting.ipynb
dashee87/blogScripts
mit
Open the Dataset
monthly_mean_file = 'RASM_example_data.nc' ds = xr.open_dataset(monthly_mean_file, decode_coords=False) print(ds)
notebooks/norman/xarray-ex-3.ipynb
CCI-Tools/sandbox
gpl-3.0